summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorYosuke Furukawa <yosuke.furukawa@gmail.com>2015-01-16 02:53:46 +0900
committerBen Noordhuis <info@bnoordhuis.nl>2015-01-15 21:21:18 +0100
commit946eabd18f623b438e17164b14c98066f7054168 (patch)
tree04275ee794310d9f3967064283f9ce02480e4a44 /tools
parent9e62ae4304a0bee3aec8c5fb743eb17d78b1cd35 (diff)
downloadnode-new-946eabd18f623b438e17164b14c98066f7054168.tar.gz
tools: update closure linter to 2.3.17
PR-URL: https://github.com/iojs/io.js/pull/449 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Diffstat (limited to 'tools')
-rw-r--r--tools/closure_linter/AUTHORS6
-rw-r--r--tools/closure_linter/LICENSE176
-rw-r--r--tools/closure_linter/PKG-INFO10
-rw-r--r--tools/closure_linter/build/lib/closure_linter/__init__.py16
-rw-r--r--tools/closure_linter/build/lib/closure_linter/aliaspass.py248
-rw-r--r--tools/closure_linter/build/lib/closure_linter/aliaspass_test.py191
-rw-r--r--tools/closure_linter/build/lib/closure_linter/checker.py108
-rw-r--r--tools/closure_linter/build/lib/closure_linter/checkerbase.py192
-rw-r--r--tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo.py578
-rw-r--r--tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo_test.py873
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/__init__.py16
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/error.py65
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/erroraccumulator.py46
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/errorhandler.py61
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/erroroutput.py52
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/filetestcase.py115
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/htmlutil.py170
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/lintrunner.py39
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/matcher.py60
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/position.py126
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/simplefileflags.py190
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/tokenizer.py185
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/tokens.py145
-rw-r--r--tools/closure_linter/build/lib/closure_linter/common/tokens_test.py113
-rw-r--r--tools/closure_linter/build/lib/closure_linter/ecmalintrules.py844
-rw-r--r--tools/closure_linter/build/lib/closure_linter/ecmametadatapass.py574
-rw-r--r--tools/closure_linter/build/lib/closure_linter/error_check.py95
-rw-r--r--tools/closure_linter/build/lib/closure_linter/error_fixer.py618
-rw-r--r--tools/closure_linter/build/lib/closure_linter/error_fixer_test.py57
-rw-r--r--tools/closure_linter/build/lib/closure_linter/errorrecord.py66
-rw-r--r--tools/closure_linter/build/lib/closure_linter/errorrules.py72
-rw-r--r--tools/closure_linter/build/lib/closure_linter/errorrules_test.py117
-rw-r--r--tools/closure_linter/build/lib/closure_linter/errors.py154
-rw-r--r--tools/closure_linter/build/lib/closure_linter/fixjsstyle.py66
-rw-r--r--tools/closure_linter/build/lib/closure_linter/fixjsstyle_test.py615
-rw-r--r--tools/closure_linter/build/lib/closure_linter/full_test.py121
-rw-r--r--tools/closure_linter/build/lib/closure_linter/gjslint.py319
-rw-r--r--tools/closure_linter/build/lib/closure_linter/indentation.py617
-rw-r--r--tools/closure_linter/build/lib/closure_linter/javascriptlintrules.py754
-rw-r--r--tools/closure_linter/build/lib/closure_linter/javascriptstatetracker.py150
-rw-r--r--tools/closure_linter/build/lib/closure_linter/javascriptstatetracker_test.py278
-rw-r--r--tools/closure_linter/build/lib/closure_linter/javascripttokenizer.py463
-rw-r--r--tools/closure_linter/build/lib/closure_linter/javascripttokens.py153
-rw-r--r--tools/closure_linter/build/lib/closure_linter/not_strict_test.py74
-rw-r--r--tools/closure_linter/build/lib/closure_linter/requireprovidesorter.py329
-rw-r--r--tools/closure_linter/build/lib/closure_linter/requireprovidesorter_test.py155
-rw-r--r--tools/closure_linter/build/lib/closure_linter/runner.py198
-rw-r--r--tools/closure_linter/build/lib/closure_linter/runner_test.py101
-rw-r--r--tools/closure_linter/build/lib/closure_linter/scopeutil.py206
-rw-r--r--tools/closure_linter/build/lib/closure_linter/scopeutil_test.py222
-rw-r--r--tools/closure_linter/build/lib/closure_linter/statetracker.py1294
-rw-r--r--tools/closure_linter/build/lib/closure_linter/statetracker_test.py123
-rw-r--r--tools/closure_linter/build/lib/closure_linter/strict_test.py67
-rw-r--r--tools/closure_linter/build/lib/closure_linter/testutil.py94
-rw-r--r--tools/closure_linter/build/lib/closure_linter/tokenutil.py697
-rw-r--r--tools/closure_linter/build/lib/closure_linter/tokenutil_test.py297
-rw-r--r--tools/closure_linter/build/lib/closure_linter/typeannotation.py401
-rw-r--r--tools/closure_linter/build/lib/closure_linter/typeannotation_test.py232
-rw-r--r--tools/closure_linter/closure_linter.egg-info/PKG-INFO2
-rw-r--r--tools/closure_linter/closure_linter.egg-info/SOURCES.txt26
-rwxr-xr-xtools/closure_linter/closure_linter/__init__.py15
-rw-r--r--tools/closure_linter/closure_linter/aliaspass.py248
-rwxr-xr-xtools/closure_linter/closure_linter/aliaspass_test.py191
-rwxr-xr-xtools/closure_linter/closure_linter/checker.py98
-rwxr-xr-xtools/closure_linter/closure_linter/checkerbase.py197
-rwxr-xr-xtools/closure_linter/closure_linter/closurizednamespacesinfo.py578
-rwxr-xr-xtools/closure_linter/closure_linter/closurizednamespacesinfo_test.py873
-rwxr-xr-xtools/closure_linter/closure_linter/common/__init__.py15
-rwxr-xr-xtools/closure_linter/closure_linter/common/error.py2
-rwxr-xr-xtools/closure_linter/closure_linter/common/erroraccumulator.py2
-rw-r--r--tools/closure_linter/closure_linter/common/erroroutput.py52
-rwxr-xr-xtools/closure_linter/closure_linter/common/errorprinter.py203
-rwxr-xr-xtools/closure_linter/closure_linter/common/filetestcase.py32
-rwxr-xr-xtools/closure_linter/closure_linter/common/tokenizer.py3
-rwxr-xr-xtools/closure_linter/closure_linter/common/tokens.py22
-rw-r--r--tools/closure_linter/closure_linter/common/tokens_test.py113
-rwxr-xr-xtools/closure_linter/closure_linter/ecmalintrules.py660
-rwxr-xr-xtools/closure_linter/closure_linter/ecmametadatapass.py93
-rwxr-xr-xtools/closure_linter/closure_linter/error_check.py95
-rwxr-xr-xtools/closure_linter/closure_linter/error_fixer.py434
-rw-r--r--tools/closure_linter/closure_linter/error_fixer_test.py57
-rw-r--r--tools/closure_linter/closure_linter/errorrecord.py66
-rwxr-xr-xtools/closure_linter/closure_linter/errorrules.py40
-rw-r--r--tools/closure_linter/closure_linter/errorrules_test.py117
-rwxr-xr-xtools/closure_linter/closure_linter/errors.py33
-rwxr-xr-xtools/closure_linter/closure_linter/fixjsstyle.py29
-rwxr-xr-xtools/closure_linter/closure_linter/fixjsstyle_test.py588
-rwxr-xr-xtools/closure_linter/closure_linter/full_test.py38
-rwxr-xr-xtools/closure_linter/closure_linter/gjslint.py229
-rwxr-xr-xtools/closure_linter/closure_linter/indentation.py168
-rw-r--r--[-rwxr-xr-x]tools/closure_linter/closure_linter/javascriptlintrules.py721
-rwxr-xr-xtools/closure_linter/closure_linter/javascriptstatetracker.py176
-rw-r--r--[-rwxr-xr-x]tools/closure_linter/closure_linter/javascriptstatetracker_test.py289
-rwxr-xr-xtools/closure_linter/closure_linter/javascripttokenizer.py380
-rwxr-xr-xtools/closure_linter/closure_linter/javascripttokens.py12
-rwxr-xr-xtools/closure_linter/closure_linter/not_strict_test.py74
-rwxr-xr-xtools/closure_linter/closure_linter/requireprovidesorter.py329
-rw-r--r--tools/closure_linter/closure_linter/requireprovidesorter_test.py155
-rw-r--r--tools/closure_linter/closure_linter/runner.py198
-rw-r--r--tools/closure_linter/closure_linter/runner_test.py101
-rw-r--r--tools/closure_linter/closure_linter/scopeutil.py206
-rw-r--r--tools/closure_linter/closure_linter/scopeutil_test.py222
-rw-r--r--[-rwxr-xr-x]tools/closure_linter/closure_linter/statetracker.py488
-rwxr-xr-xtools/closure_linter/closure_linter/statetracker_test.py123
-rwxr-xr-xtools/closure_linter/closure_linter/strict_test.py67
-rw-r--r--tools/closure_linter/closure_linter/testdata/all_js_wrapped.js5
-rw-r--r--tools/closure_linter/closure_linter/testdata/blank_lines.js104
-rw-r--r--tools/closure_linter/closure_linter/testdata/bugs.js43
-rw-r--r--tools/closure_linter/closure_linter/testdata/empty_file.js0
-rw-r--r--tools/closure_linter/closure_linter/testdata/ends_with_block.js19
-rw-r--r--tools/closure_linter/closure_linter/testdata/externs.js34
-rw-r--r--tools/closure_linter/closure_linter/testdata/externs_jsdoc.js37
-rw-r--r--tools/closure_linter/closure_linter/testdata/file_level_comment.js13
-rw-r--r--tools/closure_linter/closure_linter/testdata/fixjsstyle.html.in.html52
-rw-r--r--tools/closure_linter/closure_linter/testdata/fixjsstyle.html.out.html51
-rw-r--r--tools/closure_linter/closure_linter/testdata/fixjsstyle.in.js293
-rw-r--r--tools/closure_linter/closure_linter/testdata/fixjsstyle.indentation.out.js465
-rw-r--r--tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.in.js21
-rw-r--r--tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.out.js21
-rw-r--r--tools/closure_linter/closure_linter/testdata/fixjsstyle.out.js310
-rw-r--r--tools/closure_linter/closure_linter/testdata/goog_scope.js63
-rw-r--r--tools/closure_linter/closure_linter/testdata/html_parse_error.html32
-rw-r--r--tools/closure_linter/closure_linter/testdata/indentation.js465
-rw-r--r--tools/closure_linter/closure_linter/testdata/interface.js89
-rw-r--r--tools/closure_linter/closure_linter/testdata/jsdoc.js1455
-rw-r--r--tools/closure_linter/closure_linter/testdata/limited_doc_checks.js29
-rw-r--r--tools/closure_linter/closure_linter/testdata/minimal.js1
-rw-r--r--tools/closure_linter/closure_linter/testdata/not_strict.js42
-rw-r--r--tools/closure_linter/closure_linter/testdata/other.js459
-rw-r--r--tools/closure_linter/closure_linter/testdata/provide_blank.js29
-rw-r--r--tools/closure_linter/closure_linter/testdata/provide_extra.js39
-rw-r--r--tools/closure_linter/closure_linter/testdata/provide_missing.js40
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_alias.js14
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_all_caps.js30
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_blank.js29
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_extra.js35
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_function.js22
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_function_missing.js24
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_function_through_both.js23
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_function_through_namespace.js22
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_interface.js31
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_interface_alias.js34
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_interface_base.js31
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_lower_case.js30
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_missing.js40
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_numeric.js30
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_provide_blank.js31
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_provide_missing.js76
-rw-r--r--tools/closure_linter/closure_linter/testdata/require_provide_ok.js214
-rw-r--r--tools/closure_linter/closure_linter/testdata/semicolon_missing.js18
-rw-r--r--tools/closure_linter/closure_linter/testdata/simple.html33
-rw-r--r--tools/closure_linter/closure_linter/testdata/spaces.js354
-rw-r--r--tools/closure_linter/closure_linter/testdata/tokenizer.js78
-rw-r--r--tools/closure_linter/closure_linter/testdata/unparseable.js44
-rw-r--r--tools/closure_linter/closure_linter/testdata/unused_local_variables.js88
-rw-r--r--tools/closure_linter/closure_linter/testdata/unused_private_members.js205
-rw-r--r--tools/closure_linter/closure_linter/testdata/utf8.html26
-rw-r--r--tools/closure_linter/closure_linter/testutil.py94
-rwxr-xr-xtools/closure_linter/closure_linter/tokenutil.py460
-rw-r--r--tools/closure_linter/closure_linter/tokenutil_test.py297
-rw-r--r--tools/closure_linter/closure_linter/typeannotation.py401
-rwxr-xr-xtools/closure_linter/closure_linter/typeannotation_test.py232
-rw-r--r--tools/closure_linter/dist/closure_linter-2.3.17-py2.7.eggbin0 -> 315656 bytes
-rw-r--r--tools/closure_linter/gflags.py2489
-rw-r--r--tools/closure_linter/setup.cfg5
-rwxr-xr-xtools/closure_linter/setup.py2
166 files changed, 29049 insertions, 3968 deletions
diff --git a/tools/closure_linter/AUTHORS b/tools/closure_linter/AUTHORS
new file mode 100644
index 0000000000..2f72bd6b2f
--- /dev/null
+++ b/tools/closure_linter/AUTHORS
@@ -0,0 +1,6 @@
+# This is a list of contributors to the Closure Linter.
+
+# Names should be added to this file like so:
+# Name or Organization <email address>
+
+Google Inc.
diff --git a/tools/closure_linter/LICENSE b/tools/closure_linter/LICENSE
new file mode 100644
index 0000000000..d9a10c0d8e
--- /dev/null
+++ b/tools/closure_linter/LICENSE
@@ -0,0 +1,176 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
diff --git a/tools/closure_linter/PKG-INFO b/tools/closure_linter/PKG-INFO
deleted file mode 100644
index b6e71c8f11..0000000000
--- a/tools/closure_linter/PKG-INFO
+++ /dev/null
@@ -1,10 +0,0 @@
-Metadata-Version: 1.0
-Name: closure_linter
-Version: 2.2.6
-Summary: Closure Linter
-Home-page: http://code.google.com/p/closure-linter
-Author: The Closure Linter Authors
-Author-email: opensource@google.com
-License: Apache
-Description: UNKNOWN
-Platform: UNKNOWN
diff --git a/tools/closure_linter/build/lib/closure_linter/__init__.py b/tools/closure_linter/build/lib/closure_linter/__init__.py
new file mode 100644
index 0000000000..1798c8cfff
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/__init__.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Package indicator for gjslint."""
diff --git a/tools/closure_linter/build/lib/closure_linter/aliaspass.py b/tools/closure_linter/build/lib/closure_linter/aliaspass.py
new file mode 100644
index 0000000000..bb37bfa07b
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/aliaspass.py
@@ -0,0 +1,248 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Pass that scans for goog.scope aliases and lint/usage errors."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+from closure_linter import ecmametadatapass
+from closure_linter import errors
+from closure_linter import javascripttokens
+from closure_linter import scopeutil
+from closure_linter import tokenutil
+from closure_linter.common import error
+
+
+# TODO(nnaze): Create a Pass interface and move this class, EcmaMetaDataPass,
+# and related classes onto it.
+
+
+def _GetAliasForIdentifier(identifier, alias_map):
+ """Returns the aliased_symbol name for an identifier.
+
+ Example usage:
+ >>> alias_map = {'MyClass': 'goog.foo.MyClass'}
+ >>> _GetAliasForIdentifier('MyClass.prototype.action', alias_map)
+ 'goog.foo.MyClass.prototype.action'
+
+ >>> _GetAliasForIdentifier('MyClass.prototype.action', {})
+ None
+
+ Args:
+ identifier: The identifier.
+ alias_map: A dictionary mapping a symbol to an alias.
+
+ Returns:
+ The aliased symbol name or None if not found.
+ """
+ ns = identifier.split('.', 1)[0]
+ aliased_symbol = alias_map.get(ns)
+ if aliased_symbol:
+ return aliased_symbol + identifier[len(ns):]
+
+
+def _SetTypeAlias(js_type, alias_map):
+ """Updates the alias for identifiers in a type.
+
+ Args:
+ js_type: A typeannotation.TypeAnnotation instance.
+ alias_map: A dictionary mapping a symbol to an alias.
+ """
+ aliased_symbol = _GetAliasForIdentifier(js_type.identifier, alias_map)
+ if aliased_symbol:
+ js_type.alias = aliased_symbol
+ for sub_type in js_type.IterTypes():
+ _SetTypeAlias(sub_type, alias_map)
+
+
+class AliasPass(object):
+ """Pass to identify goog.scope() usages.
+
+ Identifies goog.scope() usages and finds lint/usage errors. Notes any
+ aliases of symbols in Closurized namespaces (that is, reassignments
+ such as "var MyClass = goog.foo.MyClass;") and annotates identifiers
+ when they're using an alias (so they may be expanded to the full symbol
+ later -- that "MyClass.prototype.action" refers to
+ "goog.foo.MyClass.prototype.action" when expanded.).
+ """
+
+ def __init__(self, closurized_namespaces=None, error_handler=None):
+ """Creates a new pass.
+
+ Args:
+ closurized_namespaces: A set of Closurized namespaces (e.g. 'goog').
+ error_handler: An error handler to report lint errors to.
+ """
+
+ self._error_handler = error_handler
+
+ # If we have namespaces, freeze the set.
+ if closurized_namespaces:
+ closurized_namespaces = frozenset(closurized_namespaces)
+
+ self._closurized_namespaces = closurized_namespaces
+
+ def Process(self, start_token):
+ """Runs the pass on a token stream.
+
+ Args:
+ start_token: The first token in the stream.
+ """
+
+ if start_token is None:
+ return
+
+ # TODO(nnaze): Add more goog.scope usage checks.
+ self._CheckGoogScopeCalls(start_token)
+
+ # If we have closurized namespaces, identify aliased identifiers.
+ if self._closurized_namespaces:
+ context = start_token.metadata.context
+ root_context = context.GetRoot()
+ self._ProcessRootContext(root_context)
+
+ def _CheckGoogScopeCalls(self, start_token):
+ """Check goog.scope calls for lint/usage errors."""
+
+ def IsScopeToken(token):
+ return (token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER and
+ token.string == 'goog.scope')
+
+ # Find all the goog.scope tokens in the file
+ scope_tokens = [t for t in start_token if IsScopeToken(t)]
+
+ for token in scope_tokens:
+ scope_context = token.metadata.context
+
+ if not (scope_context.type == ecmametadatapass.EcmaContext.STATEMENT and
+ scope_context.parent.type == ecmametadatapass.EcmaContext.ROOT):
+ self._MaybeReportError(
+ error.Error(errors.INVALID_USE_OF_GOOG_SCOPE,
+ 'goog.scope call not in global scope', token))
+
+ # There should be only one goog.scope reference. Register errors for
+ # every instance after the first.
+ for token in scope_tokens[1:]:
+ self._MaybeReportError(
+ error.Error(errors.EXTRA_GOOG_SCOPE_USAGE,
+ 'More than one goog.scope call in file.', token))
+
+ def _MaybeReportError(self, err):
+ """Report an error to the handler (if registered)."""
+ if self._error_handler:
+ self._error_handler.HandleError(err)
+
+ @classmethod
+ def _YieldAllContexts(cls, context):
+ """Yields all contexts that are contained by the given context."""
+ yield context
+ for child_context in context.children:
+ for descendent_child in cls._YieldAllContexts(child_context):
+ yield descendent_child
+
+ @staticmethod
+ def _IsTokenInParentBlock(token, parent_block):
+ """Determines whether the given token is contained by the given block.
+
+ Args:
+ token: A token
+ parent_block: An EcmaContext.
+
+ Returns:
+ Whether the token is in a context that is or is a child of the given
+ parent_block context.
+ """
+ context = token.metadata.context
+
+ while context:
+ if context is parent_block:
+ return True
+ context = context.parent
+
+ return False
+
+ def _ProcessRootContext(self, root_context):
+ """Processes all goog.scope blocks under the root context."""
+
+ assert root_context.type is ecmametadatapass.EcmaContext.ROOT
+
+ # Process aliases in statements in the root scope for goog.module-style
+ # aliases.
+ global_alias_map = {}
+ for context in root_context.children:
+ if context.type == ecmametadatapass.EcmaContext.STATEMENT:
+ for statement_child in context.children:
+ if statement_child.type == ecmametadatapass.EcmaContext.VAR:
+ match = scopeutil.MatchModuleAlias(statement_child)
+ if match:
+ # goog.require aliases cannot use further aliases, the symbol is
+ # the second part of match, directly.
+ symbol = match[1]
+ if scopeutil.IsInClosurizedNamespace(symbol,
+ self._closurized_namespaces):
+ global_alias_map[match[0]] = symbol
+
+ # Process each block to find aliases.
+ for context in root_context.children:
+ self._ProcessBlock(context, global_alias_map)
+
+ def _ProcessBlock(self, context, global_alias_map):
+ """Scans a goog.scope block to find aliases and mark alias tokens."""
+ alias_map = global_alias_map.copy()
+
+ # Iterate over every token in the context. Each token points to one
+ # context, but multiple tokens may point to the same context. We only want
+ # to check each context once, so keep track of those we've seen.
+ seen_contexts = set()
+ token = context.start_token
+ while token and self._IsTokenInParentBlock(token, context):
+ token_context = token.metadata.context if token.metadata else None
+
+ # Check to see if this token is an alias.
+ if token_context and token_context not in seen_contexts:
+ seen_contexts.add(token_context)
+
+ # If this is a alias statement in the goog.scope block.
+ if (token_context.type == ecmametadatapass.EcmaContext.VAR and
+ scopeutil.IsGoogScopeBlock(token_context.parent.parent)):
+ match = scopeutil.MatchAlias(token_context)
+
+ # If this is an alias, remember it in the map.
+ if match:
+ alias, symbol = match
+ symbol = _GetAliasForIdentifier(symbol, alias_map) or symbol
+ if scopeutil.IsInClosurizedNamespace(symbol,
+ self._closurized_namespaces):
+ alias_map[alias] = symbol
+
+ # If this token is an identifier that matches an alias,
+ # mark the token as an alias to the original symbol.
+ if (token.type is javascripttokens.JavaScriptTokenType.SIMPLE_LVALUE or
+ token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER):
+ identifier = tokenutil.GetIdentifierForToken(token)
+ if identifier:
+ aliased_symbol = _GetAliasForIdentifier(identifier, alias_map)
+ if aliased_symbol:
+ token.metadata.aliased_symbol = aliased_symbol
+
+ elif token.type == javascripttokens.JavaScriptTokenType.DOC_FLAG:
+ flag = token.attached_object
+ if flag and flag.HasType() and flag.jstype:
+ _SetTypeAlias(flag.jstype, alias_map)
+
+ token = token.next # Get next token
diff --git a/tools/closure_linter/build/lib/closure_linter/aliaspass_test.py b/tools/closure_linter/build/lib/closure_linter/aliaspass_test.py
new file mode 100644
index 0000000000..7042e53487
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/aliaspass_test.py
@@ -0,0 +1,191 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the aliaspass module."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+import unittest as googletest
+
+from closure_linter import aliaspass
+from closure_linter import errors
+from closure_linter import javascriptstatetracker
+from closure_linter import testutil
+from closure_linter.common import erroraccumulator
+
+
+def _GetTokenByLineAndString(start_token, string, line_number):
+ for token in start_token:
+ if token.line_number == line_number and token.string == string:
+ return token
+
+
+class AliasPassTest(googletest.TestCase):
+
+ def testInvalidGoogScopeCall(self):
+ start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCOPE_SCRIPT)
+
+ error_accumulator = erroraccumulator.ErrorAccumulator()
+ alias_pass = aliaspass.AliasPass(
+ error_handler=error_accumulator)
+ alias_pass.Process(start_token)
+
+ alias_errors = error_accumulator.GetErrors()
+ self.assertEquals(1, len(alias_errors))
+
+ alias_error = alias_errors[0]
+
+ self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, alias_error.code)
+ self.assertEquals('goog.scope', alias_error.token.string)
+
+ def testAliasedIdentifiers(self):
+ start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT)
+ alias_pass = aliaspass.AliasPass(set(['goog', 'myproject']))
+ alias_pass.Process(start_token)
+
+ alias_token = _GetTokenByLineAndString(start_token, 'Event', 4)
+ self.assertTrue(alias_token.metadata.is_alias_definition)
+
+ my_class_token = _GetTokenByLineAndString(start_token, 'myClass', 9)
+ self.assertIsNone(my_class_token.metadata.aliased_symbol)
+
+ component_token = _GetTokenByLineAndString(start_token, 'Component', 17)
+ self.assertEquals('goog.ui.Component',
+ component_token.metadata.aliased_symbol)
+
+ event_token = _GetTokenByLineAndString(start_token, 'Event.Something', 17)
+ self.assertEquals('goog.events.Event.Something',
+ event_token.metadata.aliased_symbol)
+
+ non_closurized_token = _GetTokenByLineAndString(
+ start_token, 'NonClosurizedClass', 18)
+ self.assertIsNone(non_closurized_token.metadata.aliased_symbol)
+
+ long_start_token = _GetTokenByLineAndString(start_token, 'Event', 24)
+ self.assertEquals('goog.events.Event.MultilineIdentifier.someMethod',
+ long_start_token.metadata.aliased_symbol)
+
+ def testAliasedDoctypes(self):
+ """Tests that aliases are correctly expanded within type annotations."""
+ start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT)
+ tracker = javascriptstatetracker.JavaScriptStateTracker()
+ tracker.DocFlagPass(start_token, error_handler=None)
+
+ alias_pass = aliaspass.AliasPass(set(['goog', 'myproject']))
+ alias_pass.Process(start_token)
+
+ flag_token = _GetTokenByLineAndString(start_token, '@type', 22)
+ self.assertEquals(
+ 'goog.events.Event.<goog.ui.Component,Array<myproject.foo.MyClass>>',
+ repr(flag_token.attached_object.jstype))
+
+ def testModuleAlias(self):
+ start_token = testutil.TokenizeSourceAndRunEcmaPass("""
+goog.module('goog.test');
+var Alias = goog.require('goog.Alias');
+Alias.use();
+""")
+ alias_pass = aliaspass.AliasPass(set(['goog']))
+ alias_pass.Process(start_token)
+ alias_token = _GetTokenByLineAndString(start_token, 'Alias', 3)
+ self.assertTrue(alias_token.metadata.is_alias_definition)
+
+ def testMultipleGoogScopeCalls(self):
+ start_token = testutil.TokenizeSourceAndRunEcmaPass(
+ _TEST_MULTIPLE_SCOPE_SCRIPT)
+
+ error_accumulator = erroraccumulator.ErrorAccumulator()
+
+ alias_pass = aliaspass.AliasPass(
+ set(['goog', 'myproject']),
+ error_handler=error_accumulator)
+ alias_pass.Process(start_token)
+
+ alias_errors = error_accumulator.GetErrors()
+
+ self.assertEquals(3, len(alias_errors))
+
+ error = alias_errors[0]
+ self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, error.code)
+ self.assertEquals(7, error.token.line_number)
+
+ error = alias_errors[1]
+ self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code)
+ self.assertEquals(7, error.token.line_number)
+
+ error = alias_errors[2]
+ self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code)
+ self.assertEquals(11, error.token.line_number)
+
+
+_TEST_ALIAS_SCRIPT = """
+goog.scope(function() {
+var events = goog.events; // scope alias
+var Event = events.
+ Event; // nested multiline scope alias
+
+// This should not be registered as an aliased identifier because
+// it appears before the alias.
+var myClass = new MyClass();
+
+var Component = goog.ui.Component; // scope alias
+var MyClass = myproject.foo.MyClass; // scope alias
+
+// Scope alias of non-Closurized namespace.
+var NonClosurizedClass = aaa.bbb.NonClosurizedClass;
+
+var component = new Component(Event.Something);
+var nonClosurized = NonClosurizedClass();
+
+/**
+ * A created namespace with a really long identifier.
+ * @type {events.Event.<Component,Array<MyClass>}
+ */
+Event.
+ MultilineIdentifier.
+ someMethod = function() {};
+});
+"""
+
+_TEST_SCOPE_SCRIPT = """
+function foo () {
+ // This goog.scope call is invalid.
+ goog.scope(function() {
+
+ });
+}
+"""
+
+_TEST_MULTIPLE_SCOPE_SCRIPT = """
+goog.scope(function() {
+ // do nothing
+});
+
+function foo() {
+ var test = goog.scope; // We should not see goog.scope mentioned.
+}
+
+// This goog.scope invalid. There can be only one.
+goog.scope(function() {
+
+});
+"""
+
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/checker.py b/tools/closure_linter/build/lib/closure_linter/checker.py
new file mode 100644
index 0000000000..1c984173b0
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/checker.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Core methods for checking JS files for common style guide violations."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+import gflags as flags
+
+from closure_linter import aliaspass
+from closure_linter import checkerbase
+from closure_linter import closurizednamespacesinfo
+from closure_linter import javascriptlintrules
+
+
+flags.DEFINE_list('closurized_namespaces', '',
+ 'Namespace prefixes, used for testing of'
+ 'goog.provide/require')
+flags.DEFINE_list('ignored_extra_namespaces', '',
+ 'Fully qualified namespaces that should be not be reported '
+ 'as extra by the linter.')
+
+
+class JavaScriptStyleChecker(checkerbase.CheckerBase):
+ """Checker that applies JavaScriptLintRules."""
+
+ def __init__(self, state_tracker, error_handler):
+ """Initialize an JavaScriptStyleChecker object.
+
+ Args:
+ state_tracker: State tracker.
+ error_handler: Error handler to pass all errors to.
+ """
+ self._namespaces_info = None
+ self._alias_pass = None
+ if flags.FLAGS.closurized_namespaces:
+ self._namespaces_info = (
+ closurizednamespacesinfo.ClosurizedNamespacesInfo(
+ flags.FLAGS.closurized_namespaces,
+ flags.FLAGS.ignored_extra_namespaces))
+
+ self._alias_pass = aliaspass.AliasPass(
+ flags.FLAGS.closurized_namespaces, error_handler)
+
+ checkerbase.CheckerBase.__init__(
+ self,
+ error_handler=error_handler,
+ lint_rules=javascriptlintrules.JavaScriptLintRules(
+ self._namespaces_info),
+ state_tracker=state_tracker)
+
+ def Check(self, start_token, limited_doc_checks=False, is_html=False,
+ stop_token=None):
+ """Checks a token stream for lint warnings/errors.
+
+ Adds a separate pass for computing dependency information based on
+ goog.require and goog.provide statements prior to the main linting pass.
+
+ Args:
+ start_token: The first token in the token stream.
+ limited_doc_checks: Whether to perform limited checks.
+ is_html: Whether this token stream is HTML.
+ stop_token: If given, checks should stop at this token.
+ """
+ self._lint_rules.Initialize(self, limited_doc_checks, is_html)
+
+ self._state_tracker.DocFlagPass(start_token, self._error_handler)
+
+ if self._alias_pass:
+ self._alias_pass.Process(start_token)
+
+ # To maximize the amount of errors that get reported before a parse error
+ # is displayed, don't run the dependency pass if a parse error exists.
+ if self._namespaces_info:
+ self._namespaces_info.Reset()
+ self._ExecutePass(start_token, self._DependencyPass, stop_token)
+
+ self._ExecutePass(start_token, self._LintPass, stop_token)
+
+ # If we have a stop_token, we didn't end up reading the whole file and,
+ # thus, don't call Finalize to do end-of-file checks.
+ if not stop_token:
+ self._lint_rules.Finalize(self._state_tracker)
+
+ def _DependencyPass(self, token):
+ """Processes an individual token for dependency information.
+
+ Used to encapsulate the logic needed to process an individual token so that
+ it can be passed to _ExecutePass.
+
+ Args:
+ token: The token to process.
+ """
+ self._namespaces_info.ProcessToken(token, self._state_tracker)
diff --git a/tools/closure_linter/build/lib/closure_linter/checkerbase.py b/tools/closure_linter/build/lib/closure_linter/checkerbase.py
new file mode 100644
index 0000000000..6679ded05b
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/checkerbase.py
@@ -0,0 +1,192 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Base classes for writing checkers that operate on tokens."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)',
+ 'jacobr@google.com (Jacob Richman)')
+
+from closure_linter import errorrules
+from closure_linter.common import error
+
+
+class LintRulesBase(object):
+ """Base class for all classes defining the lint rules for a language."""
+
+ def __init__(self):
+ self.__checker = None
+
+ def Initialize(self, checker, limited_doc_checks, is_html):
+ """Initializes to prepare to check a file.
+
+ Args:
+ checker: Class to report errors to.
+ limited_doc_checks: Whether doc checking is relaxed for this file.
+ is_html: Whether the file is an HTML file with extracted contents.
+ """
+ self.__checker = checker
+ self._limited_doc_checks = limited_doc_checks
+ self._is_html = is_html
+
+ def _HandleError(self, code, message, token, position=None,
+ fix_data=None):
+ """Call the HandleError function for the checker we are associated with."""
+ if errorrules.ShouldReportError(code):
+ self.__checker.HandleError(code, message, token, position, fix_data)
+
+ def _SetLimitedDocChecks(self, limited_doc_checks):
+ """Sets whether doc checking is relaxed for this file.
+
+ Args:
+ limited_doc_checks: Whether doc checking is relaxed for this file.
+ """
+ self._limited_doc_checks = limited_doc_checks
+
+ def CheckToken(self, token, parser_state):
+ """Checks a token, given the current parser_state, for warnings and errors.
+
+ Args:
+ token: The current token under consideration.
+ parser_state: Object that indicates the parser state in the page.
+
+ Raises:
+ TypeError: If not overridden.
+ """
+ raise TypeError('Abstract method CheckToken not implemented')
+
+ def Finalize(self, parser_state):
+ """Perform all checks that need to occur after all lines are processed.
+
+ Args:
+ parser_state: State of the parser after parsing all tokens
+
+ Raises:
+ TypeError: If not overridden.
+ """
+ raise TypeError('Abstract method Finalize not implemented')
+
+
+class CheckerBase(object):
+ """This class handles checking a LintRules object against a file."""
+
+ def __init__(self, error_handler, lint_rules, state_tracker):
+ """Initialize a checker object.
+
+ Args:
+ error_handler: Object that handles errors.
+ lint_rules: LintRules object defining lint errors given a token
+ and state_tracker object.
+ state_tracker: Object that tracks the current state in the token stream.
+
+ """
+ self._error_handler = error_handler
+ self._lint_rules = lint_rules
+ self._state_tracker = state_tracker
+
+ self._has_errors = False
+
+ def HandleError(self, code, message, token, position=None,
+ fix_data=None):
+ """Prints out the given error message including a line number.
+
+ Args:
+ code: The error code.
+ message: The error to print.
+ token: The token where the error occurred, or None if it was a file-wide
+ issue.
+ position: The position of the error, defaults to None.
+ fix_data: Metadata used for fixing the error.
+ """
+ self._has_errors = True
+ self._error_handler.HandleError(
+ error.Error(code, message, token, position, fix_data))
+
+ def HasErrors(self):
+ """Returns true if the style checker has found any errors.
+
+ Returns:
+ True if the style checker has found any errors.
+ """
+ return self._has_errors
+
+ def Check(self, start_token, limited_doc_checks=False, is_html=False,
+ stop_token=None):
+ """Checks a token stream, reporting errors to the error reporter.
+
+ Args:
+ start_token: First token in token stream.
+ limited_doc_checks: Whether doc checking is relaxed for this file.
+ is_html: Whether the file being checked is an HTML file with extracted
+ contents.
+ stop_token: If given, check should stop at this token.
+ """
+
+ self._lint_rules.Initialize(self, limited_doc_checks, is_html)
+ self._ExecutePass(start_token, self._LintPass, stop_token=stop_token)
+ self._lint_rules.Finalize(self._state_tracker)
+
+ def _LintPass(self, token):
+ """Checks an individual token for lint warnings/errors.
+
+ Used to encapsulate the logic needed to check an individual token so that it
+ can be passed to _ExecutePass.
+
+ Args:
+ token: The token to check.
+ """
+ self._lint_rules.CheckToken(token, self._state_tracker)
+
+ def _ExecutePass(self, token, pass_function, stop_token=None):
+ """Calls the given function for every token in the given token stream.
+
+ As each token is passed to the given function, state is kept up to date and,
+ depending on the error_trace flag, errors are either caught and reported, or
+ allowed to bubble up so developers can see the full stack trace. If a parse
+ error is specified, the pass will proceed as normal until the token causing
+ the parse error is reached.
+
+ Args:
+ token: The first token in the token stream.
+ pass_function: The function to call for each token in the token stream.
+ stop_token: The last token to check (if given).
+
+ Raises:
+ Exception: If any error occurred while calling the given function.
+ """
+
+ self._state_tracker.Reset()
+ while token:
+ # When we are looking at a token and decided to delete the whole line, we
+ # will delete all of them in the "HandleToken()" below. So the current
+ # token and subsequent ones may already be deleted here. The way we
+ # delete a token does not wipe out the previous and next pointers of the
+ # deleted token. So we need to check the token itself to make sure it is
+ # not deleted.
+ if not token.is_deleted:
+ # End the pass at the stop token
+ if stop_token and token is stop_token:
+ return
+
+ self._state_tracker.HandleToken(
+ token, self._state_tracker.GetLastNonSpaceToken())
+ pass_function(token)
+ self._state_tracker.HandleAfterToken(token)
+
+ token = token.next
diff --git a/tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo.py b/tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo.py
new file mode 100644
index 0000000000..e7cbfd3318
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo.py
@@ -0,0 +1,578 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Logic for computing dependency information for closurized JavaScript files.
+
+Closurized JavaScript files express dependencies using goog.require and
+goog.provide statements. In order for the linter to detect when a statement is
+missing or unnecessary, all identifiers in the JavaScript file must first be
+processed to determine if they constitute the creation or usage of a dependency.
+"""
+
+
+
+import re
+
+from closure_linter import javascripttokens
+from closure_linter import tokenutil
+
+# pylint: disable=g-bad-name
+TokenType = javascripttokens.JavaScriptTokenType
+
+DEFAULT_EXTRA_NAMESPACES = [
+ 'goog.testing.asserts',
+ 'goog.testing.jsunit',
+]
+
+
+class UsedNamespace(object):
+ """A type for information about a used namespace."""
+
+ def __init__(self, namespace, identifier, token, alias_definition):
+ """Initializes the instance.
+
+ Args:
+ namespace: the namespace of an identifier used in the file
+ identifier: the complete identifier
+ token: the token that uses the namespace
+ alias_definition: a boolean stating whether the namespace is only to used
+ for an alias definition and should not be required.
+ """
+ self.namespace = namespace
+ self.identifier = identifier
+ self.token = token
+ self.alias_definition = alias_definition
+
+ def GetLine(self):
+ return self.token.line_number
+
+ def __repr__(self):
+ return 'UsedNamespace(%s)' % ', '.join(
+ ['%s=%s' % (k, repr(v)) for k, v in self.__dict__.iteritems()])
+
+
+class ClosurizedNamespacesInfo(object):
+ """Dependency information for closurized JavaScript files.
+
+ Processes token streams for dependency creation or usage and provides logic
+ for determining if a given require or provide statement is unnecessary or if
+ there are missing require or provide statements.
+ """
+
+ def __init__(self, closurized_namespaces, ignored_extra_namespaces):
+ """Initializes an instance the ClosurizedNamespacesInfo class.
+
+ Args:
+ closurized_namespaces: A list of namespace prefixes that should be
+ processed for dependency information. Non-matching namespaces are
+ ignored.
+ ignored_extra_namespaces: A list of namespaces that should not be reported
+ as extra regardless of whether they are actually used.
+ """
+ self._closurized_namespaces = closurized_namespaces
+ self._ignored_extra_namespaces = (ignored_extra_namespaces +
+ DEFAULT_EXTRA_NAMESPACES)
+ self.Reset()
+
+ def Reset(self):
+ """Resets the internal state to prepare for processing a new file."""
+
+ # A list of goog.provide tokens in the order they appeared in the file.
+ self._provide_tokens = []
+
+ # A list of goog.require tokens in the order they appeared in the file.
+ self._require_tokens = []
+
+ # Namespaces that are already goog.provided.
+ self._provided_namespaces = []
+
+ # Namespaces that are already goog.required.
+ self._required_namespaces = []
+
+ # Note that created_namespaces and used_namespaces contain both namespaces
+ # and identifiers because there are many existing cases where a method or
+ # constant is provided directly instead of its namespace. Ideally, these
+ # two lists would only have to contain namespaces.
+
+ # A list of tuples where the first element is the namespace of an identifier
+ # created in the file, the second is the identifier itself and the third is
+ # the line number where it's created.
+ self._created_namespaces = []
+
+ # A list of UsedNamespace instances.
+ self._used_namespaces = []
+
+ # A list of seemingly-unnecessary namespaces that are goog.required() and
+ # annotated with @suppress {extraRequire}.
+ self._suppressed_requires = []
+
+ # A list of goog.provide tokens which are duplicates.
+ self._duplicate_provide_tokens = []
+
+ # A list of goog.require tokens which are duplicates.
+ self._duplicate_require_tokens = []
+
+ # Whether this file is in a goog.scope. Someday, we may add support
+ # for checking scopified namespaces, but for now let's just fail
+ # in a more reasonable way.
+ self._scopified_file = False
+
+ # TODO(user): Handle the case where there are 2 different requires
+ # that can satisfy the same dependency, but only one is necessary.
+
+ def GetProvidedNamespaces(self):
+ """Returns the namespaces which are already provided by this file.
+
+ Returns:
+ A list of strings where each string is a 'namespace' corresponding to an
+ existing goog.provide statement in the file being checked.
+ """
+ return set(self._provided_namespaces)
+
+ def GetRequiredNamespaces(self):
+ """Returns the namespaces which are already required by this file.
+
+ Returns:
+ A list of strings where each string is a 'namespace' corresponding to an
+ existing goog.require statement in the file being checked.
+ """
+ return set(self._required_namespaces)
+
+ def IsExtraProvide(self, token):
+ """Returns whether the given goog.provide token is unnecessary.
+
+ Args:
+ token: A goog.provide token.
+
+ Returns:
+ True if the given token corresponds to an unnecessary goog.provide
+ statement, otherwise False.
+ """
+ namespace = tokenutil.GetStringAfterToken(token)
+
+ if self.GetClosurizedNamespace(namespace) is None:
+ return False
+
+ if token in self._duplicate_provide_tokens:
+ return True
+
+ # TODO(user): There's probably a faster way to compute this.
+ for created_namespace, created_identifier, _ in self._created_namespaces:
+ if namespace == created_namespace or namespace == created_identifier:
+ return False
+
+ return True
+
+ def IsExtraRequire(self, token):
+ """Returns whether the given goog.require token is unnecessary.
+
+ Args:
+ token: A goog.require token.
+
+ Returns:
+ True if the given token corresponds to an unnecessary goog.require
+ statement, otherwise False.
+ """
+ namespace = tokenutil.GetStringAfterToken(token)
+
+ if self.GetClosurizedNamespace(namespace) is None:
+ return False
+
+ if namespace in self._ignored_extra_namespaces:
+ return False
+
+ if token in self._duplicate_require_tokens:
+ return True
+
+ if namespace in self._suppressed_requires:
+ return False
+
+ # If the namespace contains a component that is initial caps, then that
+ # must be the last component of the namespace.
+ parts = namespace.split('.')
+ if len(parts) > 1 and parts[-2][0].isupper():
+ return True
+
+ # TODO(user): There's probably a faster way to compute this.
+ for ns in self._used_namespaces:
+ if (not ns.alias_definition and (
+ namespace == ns.namespace or namespace == ns.identifier)):
+ return False
+
+ return True
+
+ def GetMissingProvides(self):
+ """Returns the dict of missing provided namespaces for the current file.
+
+ Returns:
+ Returns a dictionary of key as string and value as integer where each
+ string(key) is a namespace that should be provided by this file, but is
+ not and integer(value) is first line number where it's defined.
+ """
+ missing_provides = dict()
+ for namespace, identifier, line_number in self._created_namespaces:
+ if (not self._IsPrivateIdentifier(identifier) and
+ namespace not in self._provided_namespaces and
+ identifier not in self._provided_namespaces and
+ namespace not in self._required_namespaces and
+ namespace not in missing_provides):
+ missing_provides[namespace] = line_number
+
+ return missing_provides
+
+ def GetMissingRequires(self):
+ """Returns the dict of missing required namespaces for the current file.
+
+ For each non-private identifier used in the file, find either a
+ goog.require, goog.provide or a created identifier that satisfies it.
+ goog.require statements can satisfy the identifier by requiring either the
+ namespace of the identifier or the identifier itself. goog.provide
+ statements can satisfy the identifier by providing the namespace of the
+ identifier. A created identifier can only satisfy the used identifier if
+ it matches it exactly (necessary since things can be defined on a
+ namespace in more than one file). Note that provided namespaces should be
+ a subset of created namespaces, but we check both because in some cases we
+ can't always detect the creation of the namespace.
+
+ Returns:
+ Returns a dictionary of key as string and value integer where each
+ string(key) is a namespace that should be required by this file, but is
+ not and integer(value) is first line number where it's used.
+ """
+ external_dependencies = set(self._required_namespaces)
+
+ # Assume goog namespace is always available.
+ external_dependencies.add('goog')
+ # goog.module is treated as a builtin, too (for goog.module.get).
+ external_dependencies.add('goog.module')
+
+ created_identifiers = set()
+ for unused_namespace, identifier, unused_line_number in (
+ self._created_namespaces):
+ created_identifiers.add(identifier)
+
+ missing_requires = dict()
+ illegal_alias_statements = dict()
+
+ def ShouldRequireNamespace(namespace, identifier):
+ """Checks if a namespace would normally be required."""
+ return (
+ not self._IsPrivateIdentifier(identifier) and
+ namespace not in external_dependencies and
+ namespace not in self._provided_namespaces and
+ identifier not in external_dependencies and
+ identifier not in created_identifiers and
+ namespace not in missing_requires)
+
+ # First check all the used identifiers where we know that their namespace
+ # needs to be provided (unless they are optional).
+ for ns in self._used_namespaces:
+ namespace = ns.namespace
+ identifier = ns.identifier
+ if (not ns.alias_definition and
+ ShouldRequireNamespace(namespace, identifier)):
+ missing_requires[namespace] = ns.GetLine()
+
+ # Now that all required namespaces are known, we can check if the alias
+ # definitions (that are likely being used for typeannotations that don't
+ # need explicit goog.require statements) are already covered. If not
+ # the user shouldn't use the alias.
+ for ns in self._used_namespaces:
+ if (not ns.alias_definition or
+ not ShouldRequireNamespace(ns.namespace, ns.identifier)):
+ continue
+ if self._FindNamespace(ns.identifier, self._provided_namespaces,
+ created_identifiers, external_dependencies,
+ missing_requires):
+ continue
+ namespace = ns.identifier.rsplit('.', 1)[0]
+ illegal_alias_statements[namespace] = ns.token
+
+ return missing_requires, illegal_alias_statements
+
+ def _FindNamespace(self, identifier, *namespaces_list):
+ """Finds the namespace of an identifier given a list of other namespaces.
+
+ Args:
+ identifier: An identifier whose parent needs to be defined.
+ e.g. for goog.bar.foo we search something that provides
+ goog.bar.
+ *namespaces_list: var args of iterables of namespace identifiers
+ Returns:
+ The namespace that the given identifier is part of or None.
+ """
+ identifier = identifier.rsplit('.', 1)[0]
+ identifier_prefix = identifier + '.'
+ for namespaces in namespaces_list:
+ for namespace in namespaces:
+ if namespace == identifier or namespace.startswith(identifier_prefix):
+ return namespace
+ return None
+
+ def _IsPrivateIdentifier(self, identifier):
+ """Returns whether the given identifier is private."""
+ pieces = identifier.split('.')
+ for piece in pieces:
+ if piece.endswith('_'):
+ return True
+ return False
+
+ def IsFirstProvide(self, token):
+ """Returns whether token is the first provide token."""
+ return self._provide_tokens and token == self._provide_tokens[0]
+
+ def IsFirstRequire(self, token):
+ """Returns whether token is the first require token."""
+ return self._require_tokens and token == self._require_tokens[0]
+
+ def IsLastProvide(self, token):
+ """Returns whether token is the last provide token."""
+ return self._provide_tokens and token == self._provide_tokens[-1]
+
+ def IsLastRequire(self, token):
+ """Returns whether token is the last require token."""
+ return self._require_tokens and token == self._require_tokens[-1]
+
+ def ProcessToken(self, token, state_tracker):
+ """Processes the given token for dependency information.
+
+ Args:
+ token: The token to process.
+ state_tracker: The JavaScript state tracker.
+ """
+
+ # Note that this method is in the critical path for the linter and has been
+ # optimized for performance in the following ways:
+ # - Tokens are checked by type first to minimize the number of function
+ # calls necessary to determine if action needs to be taken for the token.
+ # - The most common tokens types are checked for first.
+ # - The number of function calls has been minimized (thus the length of this
+ # function.
+
+ if token.type == TokenType.IDENTIFIER:
+ # TODO(user): Consider saving the whole identifier in metadata.
+ whole_identifier_string = tokenutil.GetIdentifierForToken(token)
+ if whole_identifier_string is None:
+ # We only want to process the identifier one time. If the whole string
+ # identifier is None, that means this token was part of a multi-token
+ # identifier, but it was not the first token of the identifier.
+ return
+
+ # In the odd case that a goog.require is encountered inside a function,
+ # just ignore it (e.g. dynamic loading in test runners).
+ if token.string == 'goog.require' and not state_tracker.InFunction():
+ self._require_tokens.append(token)
+ namespace = tokenutil.GetStringAfterToken(token)
+ if namespace in self._required_namespaces:
+ self._duplicate_require_tokens.append(token)
+ else:
+ self._required_namespaces.append(namespace)
+
+ # If there is a suppression for the require, add a usage for it so it
+ # gets treated as a regular goog.require (i.e. still gets sorted).
+ if self._HasSuppression(state_tracker, 'extraRequire'):
+ self._suppressed_requires.append(namespace)
+ self._AddUsedNamespace(state_tracker, namespace, token)
+
+ elif token.string == 'goog.provide':
+ self._provide_tokens.append(token)
+ namespace = tokenutil.GetStringAfterToken(token)
+ if namespace in self._provided_namespaces:
+ self._duplicate_provide_tokens.append(token)
+ else:
+ self._provided_namespaces.append(namespace)
+
+ # If there is a suppression for the provide, add a creation for it so it
+ # gets treated as a regular goog.provide (i.e. still gets sorted).
+ if self._HasSuppression(state_tracker, 'extraProvide'):
+ self._AddCreatedNamespace(state_tracker, namespace, token.line_number)
+
+ elif token.string == 'goog.scope':
+ self._scopified_file = True
+
+ elif token.string == 'goog.setTestOnly':
+
+ # Since the message is optional, we don't want to scan to later lines.
+ for t in tokenutil.GetAllTokensInSameLine(token):
+ if t.type == TokenType.STRING_TEXT:
+ message = t.string
+
+ if re.match(r'^\w+(\.\w+)+$', message):
+ # This looks like a namespace. If it's a Closurized namespace,
+ # consider it created.
+ base_namespace = message.split('.', 1)[0]
+ if base_namespace in self._closurized_namespaces:
+ self._AddCreatedNamespace(state_tracker, message,
+ token.line_number)
+
+ break
+ else:
+ jsdoc = state_tracker.GetDocComment()
+ if token.metadata and token.metadata.aliased_symbol:
+ whole_identifier_string = token.metadata.aliased_symbol
+ elif (token.string == 'goog.module.get' and
+ not self._HasSuppression(state_tracker, 'extraRequire')):
+ # Cannot use _AddUsedNamespace as this is not an identifier, but
+ # already the entire namespace that's required.
+ namespace = tokenutil.GetStringAfterToken(token)
+ namespace = UsedNamespace(namespace, namespace, token,
+ alias_definition=False)
+ self._used_namespaces.append(namespace)
+ if jsdoc and jsdoc.HasFlag('typedef'):
+ self._AddCreatedNamespace(state_tracker, whole_identifier_string,
+ token.line_number,
+ namespace=self.GetClosurizedNamespace(
+ whole_identifier_string))
+ else:
+ is_alias_definition = (token.metadata and
+ token.metadata.is_alias_definition)
+ self._AddUsedNamespace(state_tracker, whole_identifier_string,
+ token, is_alias_definition)
+
+ elif token.type == TokenType.SIMPLE_LVALUE:
+ identifier = token.values['identifier']
+ start_token = tokenutil.GetIdentifierStart(token)
+ if start_token and start_token != token:
+ # Multi-line identifier being assigned. Get the whole identifier.
+ identifier = tokenutil.GetIdentifierForToken(start_token)
+ else:
+ start_token = token
+ # If an alias is defined on the start_token, use it instead.
+ if (start_token and
+ start_token.metadata and
+ start_token.metadata.aliased_symbol and
+ not start_token.metadata.is_alias_definition):
+ identifier = start_token.metadata.aliased_symbol
+
+ if identifier:
+ namespace = self.GetClosurizedNamespace(identifier)
+ if state_tracker.InFunction():
+ self._AddUsedNamespace(state_tracker, identifier, token)
+ elif namespace and namespace != 'goog':
+ self._AddCreatedNamespace(state_tracker, identifier,
+ token.line_number, namespace=namespace)
+
+ elif token.type == TokenType.DOC_FLAG:
+ flag = token.attached_object
+ flag_type = flag.flag_type
+ if flag and flag.HasType() and flag.jstype:
+ is_interface = state_tracker.GetDocComment().HasFlag('interface')
+ if flag_type == 'implements' or (flag_type == 'extends'
+ and is_interface):
+ identifier = flag.jstype.alias or flag.jstype.identifier
+ self._AddUsedNamespace(state_tracker, identifier, token)
+ # Since we process doctypes only for implements and extends, the
+ # type is a simple one and we don't need any iteration for subtypes.
+
+ def _AddCreatedNamespace(self, state_tracker, identifier, line_number,
+ namespace=None):
+ """Adds the namespace of an identifier to the list of created namespaces.
+
+ If the identifier is annotated with a 'missingProvide' suppression, it is
+ not added.
+
+ Args:
+ state_tracker: The JavaScriptStateTracker instance.
+ identifier: The identifier to add.
+ line_number: Line number where namespace is created.
+ namespace: The namespace of the identifier or None if the identifier is
+ also the namespace.
+ """
+ if not namespace:
+ namespace = identifier
+
+ if self._HasSuppression(state_tracker, 'missingProvide'):
+ return
+
+ self._created_namespaces.append([namespace, identifier, line_number])
+
+ def _AddUsedNamespace(self, state_tracker, identifier, token,
+ is_alias_definition=False):
+ """Adds the namespace of an identifier to the list of used namespaces.
+
+ If the identifier is annotated with a 'missingRequire' suppression, it is
+ not added.
+
+ Args:
+ state_tracker: The JavaScriptStateTracker instance.
+ identifier: An identifier which has been used.
+ token: The token in which the namespace is used.
+ is_alias_definition: If the used namespace is part of an alias_definition.
+ Aliased symbols need their parent namespace to be available, if it is
+ not yet required through another symbol, an error will be thrown.
+ """
+ if self._HasSuppression(state_tracker, 'missingRequire'):
+ return
+
+ namespace = self.GetClosurizedNamespace(identifier)
+ # b/5362203 If its a variable in scope then its not a required namespace.
+ if namespace and not state_tracker.IsVariableInScope(namespace):
+ namespace = UsedNamespace(namespace, identifier, token,
+ is_alias_definition)
+ self._used_namespaces.append(namespace)
+
+ def _HasSuppression(self, state_tracker, suppression):
+ jsdoc = state_tracker.GetDocComment()
+ return jsdoc and suppression in jsdoc.suppressions
+
+ def GetClosurizedNamespace(self, identifier):
+ """Given an identifier, returns the namespace that identifier is from.
+
+ Args:
+ identifier: The identifier to extract a namespace from.
+
+ Returns:
+ The namespace the given identifier resides in, or None if one could not
+ be found.
+ """
+ if identifier.startswith('goog.global'):
+ # Ignore goog.global, since it is, by definition, global.
+ return None
+
+ parts = identifier.split('.')
+ for namespace in self._closurized_namespaces:
+ if not identifier.startswith(namespace + '.'):
+ continue
+
+ # The namespace for a class is the shortest prefix ending in a class
+ # name, which starts with a capital letter but is not a capitalized word.
+ #
+ # We ultimately do not want to allow requiring or providing of inner
+ # classes/enums. Instead, a file should provide only the top-level class
+ # and users should require only that.
+ namespace = []
+ for part in parts:
+ if part == 'prototype' or part.isupper():
+ return '.'.join(namespace)
+ namespace.append(part)
+ if part[0].isupper():
+ return '.'.join(namespace)
+
+ # At this point, we know there's no class or enum, so the namespace is
+ # just the identifier with the last part removed. With the exception of
+ # apply, inherits, and call, which should also be stripped.
+ if parts[-1] in ('apply', 'inherits', 'call'):
+ parts.pop()
+ parts.pop()
+
+ # If the last part ends with an underscore, it is a private variable,
+ # method, or enum. The namespace is whatever is before it.
+ if parts and parts[-1].endswith('_'):
+ parts.pop()
+
+ return '.'.join(parts)
+
+ return None
diff --git a/tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo_test.py b/tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo_test.py
new file mode 100644
index 0000000000..7aeae21956
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo_test.py
@@ -0,0 +1,873 @@
+#!/usr/bin/env python
+#
+# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for ClosurizedNamespacesInfo."""
+
+
+
+import unittest as googletest
+from closure_linter import aliaspass
+from closure_linter import closurizednamespacesinfo
+from closure_linter import ecmametadatapass
+from closure_linter import javascriptstatetracker
+from closure_linter import javascripttokens
+from closure_linter import testutil
+from closure_linter import tokenutil
+
+# pylint: disable=g-bad-name
+TokenType = javascripttokens.JavaScriptTokenType
+
+
+def _ToLineDict(illegal_alias_stmts):
+ """Replaces tokens with the respective line number."""
+ return {k: v.line_number for k, v in illegal_alias_stmts.iteritems()}
+
+
+class ClosurizedNamespacesInfoTest(googletest.TestCase):
+ """Tests for ClosurizedNamespacesInfo."""
+
+ _test_cases = {
+ 'goog.global.anything': None,
+ 'package.CONSTANT': 'package',
+ 'package.methodName': 'package',
+ 'package.subpackage.methodName': 'package.subpackage',
+ 'package.subpackage.methodName.apply': 'package.subpackage',
+ 'package.ClassName.something': 'package.ClassName',
+ 'package.ClassName.Enum.VALUE.methodName': 'package.ClassName',
+ 'package.ClassName.CONSTANT': 'package.ClassName',
+ 'package.namespace.CONSTANT.methodName': 'package.namespace',
+ 'package.ClassName.inherits': 'package.ClassName',
+ 'package.ClassName.apply': 'package.ClassName',
+ 'package.ClassName.methodName.apply': 'package.ClassName',
+ 'package.ClassName.methodName.call': 'package.ClassName',
+ 'package.ClassName.prototype.methodName': 'package.ClassName',
+ 'package.ClassName.privateMethod_': 'package.ClassName',
+ 'package.className.privateProperty_': 'package.className',
+ 'package.className.privateProperty_.methodName': 'package.className',
+ 'package.ClassName.PrivateEnum_': 'package.ClassName',
+ 'package.ClassName.prototype.methodName.apply': 'package.ClassName',
+ 'package.ClassName.property.subProperty': 'package.ClassName',
+ 'package.className.prototype.something.somethingElse': 'package.className'
+ }
+
+ def testGetClosurizedNamespace(self):
+ """Tests that the correct namespace is returned for various identifiers."""
+ namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
+ closurized_namespaces=['package'], ignored_extra_namespaces=[])
+ for identifier, expected_namespace in self._test_cases.items():
+ actual_namespace = namespaces_info.GetClosurizedNamespace(identifier)
+ self.assertEqual(
+ expected_namespace,
+ actual_namespace,
+ 'expected namespace "' + str(expected_namespace) +
+ '" for identifier "' + str(identifier) + '" but was "' +
+ str(actual_namespace) + '"')
+
+ def testIgnoredExtraNamespaces(self):
+ """Tests that ignored_extra_namespaces are ignored."""
+ token = self._GetRequireTokens('package.Something')
+ namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
+ closurized_namespaces=['package'],
+ ignored_extra_namespaces=['package.Something'])
+
+ self.assertFalse(namespaces_info.IsExtraRequire(token),
+ 'Should be valid since it is in ignored namespaces.')
+
+ namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
+ ['package'], [])
+
+ self.assertTrue(namespaces_info.IsExtraRequire(token),
+ 'Should be invalid since it is not in ignored namespaces.')
+
+ def testIsExtraProvide_created(self):
+ """Tests that provides for created namespaces are not extra."""
+ input_lines = [
+ 'goog.provide(\'package.Foo\');',
+ 'package.Foo = function() {};'
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertFalse(namespaces_info.IsExtraProvide(token),
+ 'Should not be extra since it is created.')
+
+ def testIsExtraProvide_createdIdentifier(self):
+ """Tests that provides for created identifiers are not extra."""
+ input_lines = [
+ 'goog.provide(\'package.Foo.methodName\');',
+ 'package.Foo.methodName = function() {};'
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertFalse(namespaces_info.IsExtraProvide(token),
+ 'Should not be extra since it is created.')
+
+ def testIsExtraProvide_notCreated(self):
+ """Tests that provides for non-created namespaces are extra."""
+ input_lines = ['goog.provide(\'package.Foo\');']
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertTrue(namespaces_info.IsExtraProvide(token),
+ 'Should be extra since it is not created.')
+
+ def testIsExtraProvide_notCreatedMultipartClosurizedNamespace(self):
+ """Tests that provides for non-created namespaces are extra."""
+ input_lines = ['goog.provide(\'multi.part.namespace.Foo\');']
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['multi.part'])
+
+ self.assertTrue(namespaces_info.IsExtraProvide(token),
+ 'Should be extra since it is not created.')
+
+ def testIsExtraProvide_duplicate(self):
+ """Tests that providing a namespace twice makes the second one extra."""
+ input_lines = [
+ 'goog.provide(\'package.Foo\');',
+ 'goog.provide(\'package.Foo\');',
+ 'package.Foo = function() {};'
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ # Advance to the second goog.provide token.
+ token = tokenutil.Search(token.next, TokenType.IDENTIFIER)
+
+ self.assertTrue(namespaces_info.IsExtraProvide(token),
+ 'Should be extra since it is already provided.')
+
+ def testIsExtraProvide_notClosurized(self):
+ """Tests that provides of non-closurized namespaces are not extra."""
+ input_lines = ['goog.provide(\'notclosurized.Foo\');']
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertFalse(namespaces_info.IsExtraProvide(token),
+ 'Should not be extra since it is not closurized.')
+
+ def testIsExtraRequire_used(self):
+ """Tests that requires for used namespaces are not extra."""
+ input_lines = [
+ 'goog.require(\'package.Foo\');',
+ 'var x = package.Foo.methodName();'
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertFalse(namespaces_info.IsExtraRequire(token),
+ 'Should not be extra since it is used.')
+
+ def testIsExtraRequire_usedIdentifier(self):
+ """Tests that requires for used methods on classes are extra."""
+ input_lines = [
+ 'goog.require(\'package.Foo.methodName\');',
+ 'var x = package.Foo.methodName();'
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertTrue(namespaces_info.IsExtraRequire(token),
+ 'Should require the package, not the method specifically.')
+
+ def testIsExtraRequire_notUsed(self):
+ """Tests that requires for unused namespaces are extra."""
+ input_lines = ['goog.require(\'package.Foo\');']
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertTrue(namespaces_info.IsExtraRequire(token),
+ 'Should be extra since it is not used.')
+
+ def testIsExtraRequire_notUsedMultiPartClosurizedNamespace(self):
+ """Tests unused require with multi-part closurized namespaces."""
+
+ input_lines = ['goog.require(\'multi.part.namespace.Foo\');']
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['multi.part'])
+
+ self.assertTrue(namespaces_info.IsExtraRequire(token),
+ 'Should be extra since it is not used.')
+
+ def testIsExtraRequire_notClosurized(self):
+ """Tests that requires of non-closurized namespaces are not extra."""
+ input_lines = ['goog.require(\'notclosurized.Foo\');']
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertFalse(namespaces_info.IsExtraRequire(token),
+ 'Should not be extra since it is not closurized.')
+
+ def testIsExtraRequire_objectOnClass(self):
+ """Tests that requiring an object on a class is extra."""
+ input_lines = [
+ 'goog.require(\'package.Foo.Enum\');',
+ 'var x = package.Foo.Enum.VALUE1;',
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertTrue(namespaces_info.IsExtraRequire(token),
+ 'The whole class, not the object, should be required.');
+
+ def testIsExtraRequire_constantOnClass(self):
+ """Tests that requiring a constant on a class is extra."""
+ input_lines = [
+ 'goog.require(\'package.Foo.CONSTANT\');',
+ 'var x = package.Foo.CONSTANT',
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertTrue(namespaces_info.IsExtraRequire(token),
+ 'The class, not the constant, should be required.');
+
+ def testIsExtraRequire_constantNotOnClass(self):
+ """Tests that requiring a constant not on a class is OK."""
+ input_lines = [
+ 'goog.require(\'package.subpackage.CONSTANT\');',
+ 'var x = package.subpackage.CONSTANT',
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertFalse(namespaces_info.IsExtraRequire(token),
+ 'Constants can be required except on classes.');
+
+ def testIsExtraRequire_methodNotOnClass(self):
+ """Tests that requiring a method not on a class is OK."""
+ input_lines = [
+ 'goog.require(\'package.subpackage.method\');',
+ 'var x = package.subpackage.method()',
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertFalse(namespaces_info.IsExtraRequire(token),
+ 'Methods can be required except on classes.');
+
+ def testIsExtraRequire_defaults(self):
+ """Tests that there are no warnings about extra requires for test utils"""
+ input_lines = ['goog.require(\'goog.testing.jsunit\');']
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['goog'])
+
+ self.assertFalse(namespaces_info.IsExtraRequire(token),
+ 'Should not be extra since it is for testing.')
+
+ def testGetMissingProvides_provided(self):
+ """Tests that provided functions don't cause a missing provide."""
+ input_lines = [
+ 'goog.provide(\'package.Foo\');',
+ 'package.Foo = function() {};'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+
+ def testGetMissingProvides_providedIdentifier(self):
+ """Tests that provided identifiers don't cause a missing provide."""
+ input_lines = [
+ 'goog.provide(\'package.Foo.methodName\');',
+ 'package.Foo.methodName = function() {};'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+
+ def testGetMissingProvides_providedParentIdentifier(self):
+ """Tests that provided identifiers on a class don't cause a missing provide
+ on objects attached to that class."""
+ input_lines = [
+ 'goog.provide(\'package.foo.ClassName\');',
+ 'package.foo.ClassName.methodName = function() {};',
+ 'package.foo.ClassName.ObjectName = 1;',
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+
+ def testGetMissingProvides_unprovided(self):
+ """Tests that unprovided functions cause a missing provide."""
+ input_lines = ['package.Foo = function() {};']
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+
+ missing_provides = namespaces_info.GetMissingProvides()
+ self.assertEquals(1, len(missing_provides))
+ missing_provide = missing_provides.popitem()
+ self.assertEquals('package.Foo', missing_provide[0])
+ self.assertEquals(1, missing_provide[1])
+
+ def testGetMissingProvides_privatefunction(self):
+ """Tests that unprovided private functions don't cause a missing provide."""
+ input_lines = ['package.Foo_ = function() {};']
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+
+ def testGetMissingProvides_required(self):
+ """Tests that required namespaces don't cause a missing provide."""
+ input_lines = [
+ 'goog.require(\'package.Foo\');',
+ 'package.Foo.methodName = function() {};'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+
+ def testGetMissingRequires_required(self):
+ """Tests that required namespaces don't cause a missing require."""
+ input_lines = [
+ 'goog.require(\'package.Foo\');',
+ 'package.Foo();'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(0, len(missing_requires))
+
+ def testGetMissingRequires_requiredIdentifier(self):
+ """Tests that required namespaces satisfy identifiers on that namespace."""
+ input_lines = [
+ 'goog.require(\'package.Foo\');',
+ 'package.Foo.methodName();'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(0, len(missing_requires))
+
+ def testGetMissingRequires_requiredNamespace(self):
+ """Tests that required namespaces satisfy the namespace."""
+ input_lines = [
+ 'goog.require(\'package.soy.fooTemplate\');',
+ 'render(package.soy.fooTemplate);'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(0, len(missing_requires))
+
+ def testGetMissingRequires_requiredParentClass(self):
+ """Tests that requiring a parent class of an object is sufficient to prevent
+ a missing require on that object."""
+ input_lines = [
+ 'goog.require(\'package.Foo\');',
+ 'package.Foo.methodName();',
+ 'package.Foo.methodName(package.Foo.ObjectName);'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(0, len(missing_requires))
+
+ def testGetMissingRequires_unrequired(self):
+ """Tests that unrequired namespaces cause a missing require."""
+ input_lines = ['package.Foo();']
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(1, len(missing_requires))
+ missing_req = missing_requires.popitem()
+ self.assertEquals('package.Foo', missing_req[0])
+ self.assertEquals(1, missing_req[1])
+
+ def testGetMissingRequires_provided(self):
+ """Tests that provided namespaces satisfy identifiers on that namespace."""
+ input_lines = [
+ 'goog.provide(\'package.Foo\');',
+ 'package.Foo.methodName();'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(0, len(missing_requires))
+
+ def testGetMissingRequires_created(self):
+ """Tests that created namespaces do not satisfy usage of an identifier."""
+ input_lines = [
+ 'package.Foo = function();',
+ 'package.Foo.methodName();',
+ 'package.Foo.anotherMethodName1();',
+ 'package.Foo.anotherMethodName2();'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(1, len(missing_requires))
+ missing_require = missing_requires.popitem()
+ self.assertEquals('package.Foo', missing_require[0])
+ # Make sure line number of first occurrence is reported
+ self.assertEquals(2, missing_require[1])
+
+ def testGetMissingRequires_createdIdentifier(self):
+ """Tests that created identifiers satisfy usage of the identifier."""
+ input_lines = [
+ 'package.Foo.methodName = function();',
+ 'package.Foo.methodName();'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(0, len(missing_requires))
+
+ def testGetMissingRequires_implements(self):
+ """Tests that a parametrized type requires the correct identifier."""
+ input_lines = [
+ '/** @constructor @implements {package.Bar<T>} */',
+ 'package.Foo = function();',
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertItemsEqual({'package.Bar': 1}, missing_requires)
+
+ def testGetMissingRequires_objectOnClass(self):
+ """Tests that we should require a class, not the object on the class."""
+ input_lines = [
+ 'goog.require(\'package.Foo.Enum\');',
+ 'var x = package.Foo.Enum.VALUE1;',
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(1, len(missing_requires),
+ 'The whole class, not the object, should be required.')
+
+ def testGetMissingRequires_variableWithSameName(self):
+ """Tests that we should not goog.require variables and parameters.
+
+ b/5362203 Variables in scope are not missing namespaces.
+ """
+ input_lines = [
+ 'goog.provide(\'Foo\');',
+ 'Foo.A = function();',
+ 'Foo.A.prototype.method = function(ab) {',
+ ' if (ab) {',
+ ' var docs;',
+ ' var lvalue = new Obj();',
+ ' // Variable in scope hence not goog.require here.',
+ ' docs.foo.abc = 1;',
+ ' lvalue.next();',
+ ' }',
+ ' // Since js is function scope this should also not goog.require.',
+ ' docs.foo.func();',
+ ' // Its not a variable in scope hence goog.require.',
+ ' dummy.xyz.reset();',
+ ' return this.method2();',
+ '};',
+ 'Foo.A.prototype.method1 = function(docs, abcd, xyz) {',
+ ' // Parameter hence not goog.require.',
+ ' docs.nodes.length = 2;',
+ ' lvalue.abc.reset();',
+ '};'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['Foo',
+ 'docs',
+ 'lvalue',
+ 'dummy'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(2, len(missing_requires))
+ self.assertItemsEqual(
+ {'dummy.xyz': 14,
+ 'lvalue.abc': 20}, missing_requires)
+
+ def testIsFirstProvide(self):
+ """Tests operation of the isFirstProvide method."""
+ input_lines = [
+ 'goog.provide(\'package.Foo\');',
+ 'package.Foo.methodName();'
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+ self.assertTrue(namespaces_info.IsFirstProvide(token))
+
+ def testGetWholeIdentifierString(self):
+ """Tests that created identifiers satisfy usage of the identifier."""
+ input_lines = [
+ 'package.Foo.',
+ ' veryLong.',
+ ' identifier;'
+ ]
+
+ token = testutil.TokenizeSource(input_lines)
+
+ self.assertEquals('package.Foo.veryLong.identifier',
+ tokenutil.GetIdentifierForToken(token))
+
+ self.assertEquals(None,
+ tokenutil.GetIdentifierForToken(token.next))
+
+ def testScopified(self):
+ """Tests that a goog.scope call is noticed."""
+ input_lines = [
+ 'goog.scope(function() {',
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ self.assertTrue(namespaces_info._scopified_file)
+
+ def testScope_unusedAlias(self):
+ """Tests that an unused alias symbol is illegal."""
+ input_lines = [
+ 'goog.scope(function() {',
+ 'var Event = goog.events.Event;',
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, missing_requires)
+ self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
+
+ def testScope_usedMultilevelAlias(self):
+ """Tests that an used alias symbol in a deep namespace is ok."""
+ input_lines = [
+ 'goog.require(\'goog.Events\');',
+ 'goog.scope(function() {',
+ 'var Event = goog.Events.DeepNamespace.Event;',
+ 'Event();',
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, missing_requires)
+ self.assertEquals({}, illegal_alias_stmts)
+
+ def testScope_usedAlias(self):
+ """Tests that aliased symbols result in correct requires."""
+ input_lines = [
+ 'goog.scope(function() {',
+ 'var Event = goog.events.Event;',
+ 'var dom = goog.dom;',
+ 'Event(dom.classes.get);',
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, illegal_alias_stmts)
+ self.assertEquals({'goog.dom.classes': 4, 'goog.events.Event': 4},
+ missing_requires)
+
+ def testModule_alias(self):
+ """Tests that goog.module style aliases are supported."""
+ input_lines = [
+ 'goog.module(\'test.module\');',
+ 'var Unused = goog.require(\'goog.Unused\');',
+ 'var AliasedClass = goog.require(\'goog.AliasedClass\');',
+ 'var x = new AliasedClass();',
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ namespaceToken = self._GetRequireTokens('goog.AliasedClass')
+ self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken),
+ 'AliasedClass should be marked as used')
+ unusedToken = self._GetRequireTokens('goog.Unused')
+ self.assertTrue(namespaces_info.IsExtraRequire(unusedToken),
+ 'Unused should be marked as not used')
+
+ def testModule_aliasInScope(self):
+ """Tests that goog.module style aliases are supported."""
+ input_lines = [
+ 'goog.module(\'test.module\');',
+ 'var AliasedClass = goog.require(\'goog.AliasedClass\');',
+ 'goog.scope(function() {',
+ 'var x = new AliasedClass();',
+ '});',
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ namespaceToken = self._GetRequireTokens('goog.AliasedClass')
+ self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken),
+ 'AliasedClass should be marked as used')
+
+ def testModule_getAlwaysProvided(self):
+ """Tests that goog.module.get is recognized as a built-in."""
+ input_lines = [
+ 'goog.provide(\'test.MyClass\');',
+ 'goog.require(\'goog.someModule\');',
+ 'goog.scope(function() {',
+ 'var someModule = goog.module.get(\'goog.someModule\');',
+ 'test.MyClass = function() {};',
+ '});',
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ self.assertEquals({}, namespaces_info.GetMissingRequires()[0])
+
+ def testModule_requireForGet(self):
+ """Tests that goog.module.get needs a goog.require call."""
+ input_lines = [
+ 'goog.provide(\'test.MyClass\');',
+ 'function foo() {',
+ ' var someModule = goog.module.get(\'goog.someModule\');',
+ ' someModule.doSth();',
+ '}',
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ self.assertEquals({'goog.someModule': 3},
+ namespaces_info.GetMissingRequires()[0])
+
+ def testScope_usedTypeAlias(self):
+ """Tests aliased symbols in type annotations."""
+ input_lines = [
+ 'goog.scope(function() {',
+ 'var Event = goog.events.Event;',
+ '/** @type {Event} */;',
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, missing_requires)
+ self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
+
+ def testScope_partialAlias_typeOnly(self):
+ """Tests a partial alias only used in type annotations.
+
+ In this example, some goog.events namespace would need to be required
+ so that evaluating goog.events.bar doesn't throw an error.
+ """
+ input_lines = [
+ 'goog.scope(function() {',
+ 'var bar = goog.events.bar;',
+ '/** @type {bar.Foo} */;',
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, missing_requires)
+ self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
+
+ def testScope_partialAlias(self):
+ """Tests a partial alias in conjunction with a type annotation.
+
+ In this example, the partial alias is already defined by another type,
+ therefore the doc-only type doesn't need to be required.
+ """
+ input_lines = [
+ 'goog.scope(function() {',
+ 'var bar = goog.events.bar;',
+ '/** @type {bar.Event} */;',
+ 'bar.EventType();'
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({'goog.events.bar.EventType': 4}, missing_requires)
+ self.assertEquals({}, illegal_alias_stmts)
+
+ def testScope_partialAliasRequires(self):
+ """Tests partial aliases with correct requires."""
+ input_lines = [
+ 'goog.require(\'goog.events.bar.EventType\');',
+ 'goog.scope(function() {',
+ 'var bar = goog.events.bar;',
+ '/** @type {bar.Event} */;',
+ 'bar.EventType();'
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, missing_requires)
+ self.assertEquals({}, illegal_alias_stmts)
+
+ def testScope_partialAliasRequiresBoth(self):
+ """Tests partial aliases with correct requires."""
+ input_lines = [
+ 'goog.require(\'goog.events.bar.Event\');',
+ 'goog.require(\'goog.events.bar.EventType\');',
+ 'goog.scope(function() {',
+ 'var bar = goog.events.bar;',
+ '/** @type {bar.Event} */;',
+ 'bar.EventType();'
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, missing_requires)
+ self.assertEquals({}, illegal_alias_stmts)
+ event_token = self._GetRequireTokens('goog.events.bar.Event')
+ self.assertTrue(namespaces_info.IsExtraRequire(event_token))
+
+ def testScope_partialAliasNoSubtypeRequires(self):
+ """Tests that partial aliases don't yield subtype requires (regression)."""
+ input_lines = [
+ 'goog.provide(\'goog.events.Foo\');',
+ 'goog.scope(function() {',
+ 'goog.events.Foo = {};',
+ 'var Foo = goog.events.Foo;'
+ 'Foo.CssName_ = {};'
+ 'var CssName_ = Foo.CssName_;'
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, missing_requires)
+
+ def testScope_aliasNamespace(self):
+ """Tests that an unused alias namespace is not required when available.
+
+ In the example goog.events.Bar is not required, because the namespace
+ goog.events is already defined because goog.events.Foo is required.
+ """
+ input_lines = [
+ 'goog.require(\'goog.events.Foo\');',
+ 'goog.scope(function() {',
+ 'var Bar = goog.events.Bar;',
+ '/** @type {Bar} */;',
+ 'goog.events.Foo;',
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, missing_requires)
+ self.assertEquals({}, illegal_alias_stmts)
+
+ def testScope_aliasNamespaceIllegal(self):
+ """Tests that an unused alias namespace is not required when available."""
+ input_lines = [
+ 'goog.scope(function() {',
+ 'var Bar = goog.events.Bar;',
+ '/** @type {Bar} */;',
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, missing_requires)
+ self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
+
+ def testScope_provides(self):
+ """Tests that aliased symbols result in correct provides."""
+ input_lines = [
+ 'goog.scope(function() {',
+ 'goog.bar = {};',
+ 'var bar = goog.bar;',
+ 'bar.Foo = {};',
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_provides = namespaces_info.GetMissingProvides()
+ self.assertEquals({'goog.bar.Foo': 4}, missing_provides)
+ _, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, illegal_alias_stmts)
+
+ def testSetTestOnlyNamespaces(self):
+ """Tests that a namespace in setTestOnly makes it a valid provide."""
+ namespaces_info = self._GetNamespacesInfoForScript([
+ 'goog.setTestOnly(\'goog.foo.barTest\');'
+ ], ['goog'])
+
+ token = self._GetProvideTokens('goog.foo.barTest')
+ self.assertFalse(namespaces_info.IsExtraProvide(token))
+
+ token = self._GetProvideTokens('goog.foo.bazTest')
+ self.assertTrue(namespaces_info.IsExtraProvide(token))
+
+ def testSetTestOnlyComment(self):
+ """Ensure a comment in setTestOnly does not cause a created namespace."""
+ namespaces_info = self._GetNamespacesInfoForScript([
+ 'goog.setTestOnly(\'this is a comment\');'
+ ], ['goog'])
+
+ self.assertEquals(
+ [], namespaces_info._created_namespaces,
+ 'A comment in setTestOnly should not modify created namespaces.')
+
+ def _GetNamespacesInfoForScript(self, script, closurized_namespaces=None):
+ _, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ script, closurized_namespaces)
+
+ return namespaces_info
+
+ def _GetStartTokenAndNamespacesInfoForScript(
+ self, script, closurized_namespaces):
+
+ token = testutil.TokenizeSource(script)
+ return token, self._GetInitializedNamespacesInfo(
+ token, closurized_namespaces, [])
+
+ def _GetInitializedNamespacesInfo(self, token, closurized_namespaces,
+ ignored_extra_namespaces):
+ """Returns a namespaces info initialized with the given token stream."""
+ namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
+ closurized_namespaces=closurized_namespaces,
+ ignored_extra_namespaces=ignored_extra_namespaces)
+ state_tracker = javascriptstatetracker.JavaScriptStateTracker()
+
+ ecma_pass = ecmametadatapass.EcmaMetaDataPass()
+ ecma_pass.Process(token)
+
+ state_tracker.DocFlagPass(token, error_handler=None)
+
+ alias_pass = aliaspass.AliasPass(closurized_namespaces)
+ alias_pass.Process(token)
+
+ while token:
+ state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
+ namespaces_info.ProcessToken(token, state_tracker)
+ state_tracker.HandleAfterToken(token)
+ token = token.next
+
+ return namespaces_info
+
+ def _GetProvideTokens(self, namespace):
+ """Returns a list of tokens for a goog.require of the given namespace."""
+ line_text = 'goog.require(\'' + namespace + '\');\n'
+ return testutil.TokenizeSource([line_text])
+
+ def _GetRequireTokens(self, namespace):
+ """Returns a list of tokens for a goog.require of the given namespace."""
+ line_text = 'goog.require(\'' + namespace + '\');\n'
+ return testutil.TokenizeSource([line_text])
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/common/__init__.py b/tools/closure_linter/build/lib/closure_linter/common/__init__.py
new file mode 100644
index 0000000000..57930436ce
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/common/__init__.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Package indicator for gjslint.common."""
diff --git a/tools/closure_linter/build/lib/closure_linter/common/error.py b/tools/closure_linter/build/lib/closure_linter/common/error.py
new file mode 100644
index 0000000000..4209c235b8
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/common/error.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Error object commonly used in linters."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+
+class Error(object):
+ """Object representing a style error."""
+
+ def __init__(self, code, message, token=None, position=None, fix_data=None):
+ """Initialize the error object.
+
+ Args:
+ code: The numeric error code.
+ message: The error message string.
+ token: The tokens.Token where the error occurred.
+ position: The position of the error within the token.
+ fix_data: Data to be used in autofixing. Codes with fix_data are:
+ GOOG_REQUIRES_NOT_ALPHABETIZED - List of string value tokens that are
+ class names in goog.requires calls.
+ """
+ self.code = code
+ self.message = message
+ self.token = token
+ self.position = position
+ if token:
+ self.start_index = token.start_index
+ else:
+ self.start_index = 0
+ self.fix_data = fix_data
+ if self.position:
+ self.start_index += self.position.start
+
+ def Compare(a, b):
+ """Compare two error objects, by source code order.
+
+ Args:
+ a: First error object.
+ b: Second error object.
+
+ Returns:
+ A Negative/0/Positive number when a is before/the same as/after b.
+ """
+ line_diff = a.token.line_number - b.token.line_number
+ if line_diff:
+ return line_diff
+
+ return a.start_index - b.start_index
+ Compare = staticmethod(Compare)
diff --git a/tools/closure_linter/build/lib/closure_linter/common/erroraccumulator.py b/tools/closure_linter/build/lib/closure_linter/common/erroraccumulator.py
new file mode 100644
index 0000000000..55844ba603
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/common/erroraccumulator.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Linter error handler class that accumulates an array of errors."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+
+from closure_linter.common import errorhandler
+
+
+class ErrorAccumulator(errorhandler.ErrorHandler):
+ """Error handler object that accumulates errors in a list."""
+
+ def __init__(self):
+ self._errors = []
+
+ def HandleError(self, error):
+ """Append the error to the list.
+
+ Args:
+ error: The error object
+ """
+ self._errors.append(error)
+
+ def GetErrors(self):
+ """Returns the accumulated errors.
+
+ Returns:
+ A sequence of errors.
+ """
+ return self._errors
diff --git a/tools/closure_linter/build/lib/closure_linter/common/errorhandler.py b/tools/closure_linter/build/lib/closure_linter/common/errorhandler.py
new file mode 100644
index 0000000000..764d54d84c
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/common/errorhandler.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Interface for a linter error handler.
+
+Error handlers aggregate a set of errors from multiple files and can optionally
+perform some action based on the reported errors, for example, logging the error
+or automatically fixing it.
+"""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+
+class ErrorHandler(object):
+ """Error handler interface."""
+
+ def __init__(self):
+ if self.__class__ == ErrorHandler:
+ raise NotImplementedError('class ErrorHandler is abstract')
+
+ def HandleFile(self, filename, first_token):
+ """Notifies this ErrorHandler that subsequent errors are in filename.
+
+ Args:
+ filename: The file being linted.
+ first_token: The first token of the file.
+ """
+
+ def HandleError(self, error):
+ """Append the error to the list.
+
+ Args:
+ error: The error object
+ """
+
+ def FinishFile(self):
+ """Finishes handling the current file.
+
+ Should be called after all errors in a file have been handled.
+ """
+
+ def GetErrors(self):
+ """Returns the accumulated errors.
+
+ Returns:
+ A sequence of errors.
+ """
diff --git a/tools/closure_linter/build/lib/closure_linter/common/erroroutput.py b/tools/closure_linter/build/lib/closure_linter/common/erroroutput.py
new file mode 100644
index 0000000000..149738b5d4
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/common/erroroutput.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utility functions to format errors."""
+
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)',
+ 'nnaze@google.com (Nathan Naze)')
+
+
+def GetUnixErrorOutput(filename, error, new_error=False):
+ """Get a output line for an error in UNIX format."""
+
+ line = ''
+
+ if error.token:
+ line = '%d' % error.token.line_number
+
+ error_code = '%04d' % error.code
+ if new_error:
+ error_code = 'New Error ' + error_code
+ return '%s:%s:(%s) %s' % (filename, line, error_code, error.message)
+
+
+def GetErrorOutput(error, new_error=False):
+ """Get a output line for an error in regular format."""
+
+ line = ''
+ if error.token:
+ line = 'Line %d, ' % error.token.line_number
+
+ code = 'E:%04d' % error.code
+
+ error_message = error.message
+ if new_error:
+ error_message = 'New Error ' + error_message
+
+ return '%s%s: %s' % (line, code, error.message)
diff --git a/tools/closure_linter/build/lib/closure_linter/common/filetestcase.py b/tools/closure_linter/build/lib/closure_linter/common/filetestcase.py
new file mode 100644
index 0000000000..7cd83cd1dc
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/common/filetestcase.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Test case that runs a checker on a file, matching errors against annotations.
+
+Runs the given checker on the given file, accumulating all errors. The list
+of errors is then matched against those annotated in the file. Based heavily
+on devtools/javascript/gpylint/full_test.py.
+"""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+import re
+
+import gflags as flags
+import unittest as googletest
+from closure_linter.common import erroraccumulator
+
+
+class AnnotatedFileTestCase(googletest.TestCase):
+ """Test case to run a linter against a single file."""
+
+ # Matches an all caps letters + underscores error identifer
+ _MESSAGE = {'msg': '[A-Z][A-Z_]+'}
+ # Matches a //, followed by an optional line number with a +/-, followed by a
+ # list of message IDs. Used to extract expected messages from testdata files.
+ # TODO(robbyw): Generalize to use different commenting patterns.
+ _EXPECTED_RE = re.compile(r'\s*//\s*(?:(?P<line>[+-]?[0-9]+):)?'
+ r'\s*(?P<msgs>%(msg)s(?:,\s*%(msg)s)*)' % _MESSAGE)
+
+ def __init__(self, filename, lint_callable, converter):
+ """Create a single file lint test case.
+
+ Args:
+ filename: Filename to test.
+ lint_callable: Callable that lints a file. This is usually runner.Run().
+ converter: Function taking an error string and returning an error code.
+ """
+
+ googletest.TestCase.__init__(self, 'runTest')
+ self._filename = filename
+ self._messages = []
+ self._lint_callable = lint_callable
+ self._converter = converter
+
+ def setUp(self):
+ flags.FLAGS.dot_on_next_line = True
+
+ def tearDown(self):
+ flags.FLAGS.dot_on_next_line = False
+
+ def shortDescription(self):
+ """Provides a description for the test."""
+ return 'Run linter on %s' % self._filename
+
+ def runTest(self):
+ """Runs the test."""
+ try:
+ filename = self._filename
+ stream = open(filename)
+ except IOError as ex:
+ raise IOError('Could not find testdata resource for %s: %s' %
+ (self._filename, ex))
+
+ expected = self._GetExpectedMessages(stream)
+ got = self._ProcessFileAndGetMessages(filename)
+ self.assertEqual(expected, got)
+
+ def _GetExpectedMessages(self, stream):
+ """Parse a file and get a sorted list of expected messages."""
+ messages = []
+ for i, line in enumerate(stream):
+ match = self._EXPECTED_RE.search(line)
+ if match:
+ line = match.group('line')
+ msg_ids = match.group('msgs')
+ if line is None:
+ line = i + 1
+ elif line.startswith('+') or line.startswith('-'):
+ line = i + 1 + int(line)
+ else:
+ line = int(line)
+ for msg_id in msg_ids.split(','):
+ # Ignore a spurious message from the license preamble.
+ if msg_id != 'WITHOUT':
+ messages.append((line, self._converter(msg_id.strip())))
+ stream.seek(0)
+ messages.sort()
+ return messages
+
+ def _ProcessFileAndGetMessages(self, filename):
+ """Trap gjslint's output parse it to get messages added."""
+ error_accumulator = erroraccumulator.ErrorAccumulator()
+ self._lint_callable(filename, error_accumulator)
+
+ errors = error_accumulator.GetErrors()
+
+ # Convert to expected tuple format.
+
+ error_msgs = [(error.token.line_number, error.code) for error in errors]
+ error_msgs.sort()
+ return error_msgs
diff --git a/tools/closure_linter/build/lib/closure_linter/common/htmlutil.py b/tools/closure_linter/build/lib/closure_linter/common/htmlutil.py
new file mode 100644
index 0000000000..26d44c5908
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/common/htmlutil.py
@@ -0,0 +1,170 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for dealing with HTML."""
+
+__author__ = ('robbyw@google.com (Robert Walker)')
+
+import cStringIO
+import formatter
+import htmllib
+import HTMLParser
+import re
+
+
+class ScriptExtractor(htmllib.HTMLParser):
+ """Subclass of HTMLParser that extracts script contents from an HTML file.
+
+ Also inserts appropriate blank lines so that line numbers in the extracted
+ code match the line numbers in the original HTML.
+ """
+
+ def __init__(self):
+ """Initialize a ScriptExtractor."""
+ htmllib.HTMLParser.__init__(self, formatter.NullFormatter())
+ self._in_script = False
+ self._text = ''
+
+ def start_script(self, attrs):
+ """Internal handler for the start of a script tag.
+
+ Args:
+ attrs: The attributes of the script tag, as a list of tuples.
+ """
+ for attribute in attrs:
+ if attribute[0].lower() == 'src':
+ # Skip script tags with a src specified.
+ return
+ self._in_script = True
+
+ def end_script(self):
+ """Internal handler for the end of a script tag."""
+ self._in_script = False
+
+ def handle_data(self, data):
+ """Internal handler for character data.
+
+ Args:
+ data: The character data from the HTML file.
+ """
+ if self._in_script:
+ # If the last line contains whitespace only, i.e. is just there to
+ # properly align a </script> tag, strip the whitespace.
+ if data.rstrip(' \t') != data.rstrip(' \t\n\r\f'):
+ data = data.rstrip(' \t')
+ self._text += data
+ else:
+ self._AppendNewlines(data)
+
+ def handle_comment(self, data):
+ """Internal handler for HTML comments.
+
+ Args:
+ data: The text of the comment.
+ """
+ self._AppendNewlines(data)
+
+ def _AppendNewlines(self, data):
+ """Count the number of newlines in the given string and append them.
+
+ This ensures line numbers are correct for reported errors.
+
+ Args:
+ data: The data to count newlines in.
+ """
+ # We append 'x' to both sides of the string to ensure that splitlines
+ # gives us an accurate count.
+ for i in xrange(len(('x' + data + 'x').splitlines()) - 1):
+ self._text += '\n'
+
+ def GetScriptLines(self):
+ """Return the extracted script lines.
+
+ Returns:
+ The extracted script lines as a list of strings.
+ """
+ return self._text.splitlines()
+
+
+def GetScriptLines(f):
+ """Extract script tag contents from the given HTML file.
+
+ Args:
+ f: The HTML file.
+
+ Returns:
+ Lines in the HTML file that are from script tags.
+ """
+ extractor = ScriptExtractor()
+
+ # The HTML parser chokes on text like Array.<!string>, so we patch
+ # that bug by replacing the < with &lt; - escaping all text inside script
+ # tags would be better but it's a bit of a catch 22.
+ contents = f.read()
+ contents = re.sub(r'<([^\s\w/])',
+ lambda x: '&lt;%s' % x.group(1),
+ contents)
+
+ extractor.feed(contents)
+ extractor.close()
+ return extractor.GetScriptLines()
+
+
+def StripTags(str):
+ """Returns the string with HTML tags stripped.
+
+ Args:
+ str: An html string.
+
+ Returns:
+ The html string with all tags stripped. If there was a parse error, returns
+ the text successfully parsed so far.
+ """
+ # Brute force approach to stripping as much HTML as possible. If there is a
+ # parsing error, don't strip text before parse error position, and continue
+ # trying from there.
+ final_text = ''
+ finished = False
+ while not finished:
+ try:
+ strip = _HtmlStripper()
+ strip.feed(str)
+ strip.close()
+ str = strip.get_output()
+ final_text += str
+ finished = True
+ except HTMLParser.HTMLParseError, e:
+ final_text += str[:e.offset]
+ str = str[e.offset + 1:]
+
+ return final_text
+
+
+class _HtmlStripper(HTMLParser.HTMLParser):
+ """Simple class to strip tags from HTML.
+
+ Does so by doing nothing when encountering tags, and appending character data
+ to a buffer when that is encountered.
+ """
+ def __init__(self):
+ self.reset()
+ self.__output = cStringIO.StringIO()
+
+ def handle_data(self, d):
+ self.__output.write(d)
+
+ def get_output(self):
+ return self.__output.getvalue()
diff --git a/tools/closure_linter/build/lib/closure_linter/common/lintrunner.py b/tools/closure_linter/build/lib/closure_linter/common/lintrunner.py
new file mode 100644
index 0000000000..07842c7bfe
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/common/lintrunner.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Interface for a lint running wrapper."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+
+class LintRunner(object):
+ """Interface for a lint running wrapper."""
+
+ def __init__(self):
+ if self.__class__ == LintRunner:
+ raise NotImplementedError('class LintRunner is abstract')
+
+ def Run(self, filenames, error_handler):
+ """Run a linter on the given filenames.
+
+ Args:
+ filenames: The filenames to check
+ error_handler: An ErrorHandler object
+
+ Returns:
+ The error handler, which may have been used to collect error info.
+ """
diff --git a/tools/closure_linter/build/lib/closure_linter/common/matcher.py b/tools/closure_linter/build/lib/closure_linter/common/matcher.py
new file mode 100644
index 0000000000..9b4402c671
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/common/matcher.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Regular expression based JavaScript matcher classes."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+from closure_linter.common import position
+from closure_linter.common import tokens
+
+# Shorthand
+Token = tokens.Token
+Position = position.Position
+
+
+class Matcher(object):
+ """A token matcher.
+
+ Specifies a pattern to match, the type of token it represents, what mode the
+ token changes to, and what mode the token applies to.
+
+ Modes allow more advanced grammars to be incorporated, and are also necessary
+ to tokenize line by line. We can have different patterns apply to different
+ modes - i.e. looking for documentation while in comment mode.
+
+ Attributes:
+ regex: The regular expression representing this matcher.
+ type: The type of token indicated by a successful match.
+ result_mode: The mode to move to after a successful match.
+ """
+
+ def __init__(self, regex, token_type, result_mode=None, line_start=False):
+ """Create a new matcher template.
+
+ Args:
+ regex: The regular expression to match.
+ token_type: The type of token a successful match indicates.
+ result_mode: What mode to change to after a successful match. Defaults to
+ None, which means to not change the current mode.
+ line_start: Whether this matcher should only match string at the start
+ of a line.
+ """
+ self.regex = regex
+ self.type = token_type
+ self.result_mode = result_mode
+ self.line_start = line_start
diff --git a/tools/closure_linter/build/lib/closure_linter/common/position.py b/tools/closure_linter/build/lib/closure_linter/common/position.py
new file mode 100644
index 0000000000..cebf17ef36
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/common/position.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Classes to represent positions within strings."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+
+class Position(object):
+ """Object representing a segment of a string.
+
+ Attributes:
+ start: The index in to the string where the segment starts.
+ length: The length of the string segment.
+ """
+
+ def __init__(self, start, length):
+ """Initialize the position object.
+
+ Args:
+ start: The start index.
+ length: The number of characters to include.
+ """
+ self.start = start
+ self.length = length
+
+ def Get(self, string):
+ """Returns this range of the given string.
+
+ Args:
+ string: The string to slice.
+
+ Returns:
+ The string within the range specified by this object.
+ """
+ return string[self.start:self.start + self.length]
+
+ def Set(self, target, source):
+ """Sets this range within the target string to the source string.
+
+ Args:
+ target: The target string.
+ source: The source string.
+
+ Returns:
+ The resulting string
+ """
+ return target[:self.start] + source + target[self.start + self.length:]
+
+ def AtEnd(string):
+ """Create a Position representing the end of the given string.
+
+ Args:
+ string: The string to represent the end of.
+
+ Returns:
+ The created Position object.
+ """
+ return Position(len(string), 0)
+ AtEnd = staticmethod(AtEnd)
+
+ def IsAtEnd(self, string):
+ """Returns whether this position is at the end of the given string.
+
+ Args:
+ string: The string to test for the end of.
+
+ Returns:
+ Whether this position is at the end of the given string.
+ """
+ return self.start == len(string) and self.length == 0
+
+ def AtBeginning():
+ """Create a Position representing the beginning of any string.
+
+ Returns:
+ The created Position object.
+ """
+ return Position(0, 0)
+ AtBeginning = staticmethod(AtBeginning)
+
+ def IsAtBeginning(self):
+ """Returns whether this position is at the beginning of any string.
+
+ Returns:
+ Whether this position is at the beginning of any string.
+ """
+ return self.start == 0 and self.length == 0
+
+ def All(string):
+ """Create a Position representing the entire string.
+
+ Args:
+ string: The string to represent the entirety of.
+
+ Returns:
+ The created Position object.
+ """
+ return Position(0, len(string))
+ All = staticmethod(All)
+
+ def Index(index):
+ """Returns a Position object for the specified index.
+
+ Args:
+ index: The index to select, inclusively.
+
+ Returns:
+ The created Position object.
+ """
+ return Position(index, 1)
+ Index = staticmethod(Index)
diff --git a/tools/closure_linter/build/lib/closure_linter/common/simplefileflags.py b/tools/closure_linter/build/lib/closure_linter/common/simplefileflags.py
new file mode 100644
index 0000000000..3402bef3a1
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/common/simplefileflags.py
@@ -0,0 +1,190 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Determines the list of files to be checked from command line arguments."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+import glob
+import os
+import re
+
+import gflags as flags
+
+
+FLAGS = flags.FLAGS
+
+flags.DEFINE_multistring(
+ 'recurse',
+ None,
+ 'Recurse in to the subdirectories of the given path',
+ short_name='r')
+flags.DEFINE_list(
+ 'exclude_directories',
+ ('_demos'),
+ 'Exclude the specified directories (only applicable along with -r or '
+ '--presubmit)',
+ short_name='e')
+flags.DEFINE_list(
+ 'exclude_files',
+ ('deps.js'),
+ 'Exclude the specified files',
+ short_name='x')
+
+
+def MatchesSuffixes(filename, suffixes):
+ """Returns whether the given filename matches one of the given suffixes.
+
+ Args:
+ filename: Filename to check.
+ suffixes: Sequence of suffixes to check.
+
+ Returns:
+ Whether the given filename matches one of the given suffixes.
+ """
+ suffix = filename[filename.rfind('.'):]
+ return suffix in suffixes
+
+
+def _GetUserSpecifiedFiles(argv, suffixes):
+ """Returns files to be linted, specified directly on the command line.
+
+ Can handle the '*' wildcard in filenames, but no other wildcards.
+
+ Args:
+ argv: Sequence of command line arguments. The second and following arguments
+ are assumed to be files that should be linted.
+ suffixes: Expected suffixes for the file type being checked.
+
+ Returns:
+ A sequence of files to be linted.
+ """
+ files = argv[1:] or []
+ all_files = []
+ lint_files = []
+
+ # Perform any necessary globs.
+ for f in files:
+ if f.find('*') != -1:
+ for result in glob.glob(f):
+ all_files.append(result)
+ else:
+ all_files.append(f)
+
+ for f in all_files:
+ if MatchesSuffixes(f, suffixes):
+ lint_files.append(f)
+ return lint_files
+
+
+def _GetRecursiveFiles(suffixes):
+ """Returns files to be checked specified by the --recurse flag.
+
+ Args:
+ suffixes: Expected suffixes for the file type being checked.
+
+ Returns:
+ A list of files to be checked.
+ """
+ lint_files = []
+ # Perform any request recursion
+ if FLAGS.recurse:
+ for start in FLAGS.recurse:
+ for root, subdirs, files in os.walk(start):
+ for f in files:
+ if MatchesSuffixes(f, suffixes):
+ lint_files.append(os.path.join(root, f))
+ return lint_files
+
+
+def GetAllSpecifiedFiles(argv, suffixes):
+ """Returns all files specified by the user on the commandline.
+
+ Args:
+ argv: Sequence of command line arguments. The second and following arguments
+ are assumed to be files that should be linted.
+ suffixes: Expected suffixes for the file type
+
+ Returns:
+ A list of all files specified directly or indirectly (via flags) on the
+ command line by the user.
+ """
+ files = _GetUserSpecifiedFiles(argv, suffixes)
+
+ if FLAGS.recurse:
+ files += _GetRecursiveFiles(suffixes)
+
+ return FilterFiles(files)
+
+
+def FilterFiles(files):
+ """Filters the list of files to be linted be removing any excluded files.
+
+ Filters out files excluded using --exclude_files and --exclude_directories.
+
+ Args:
+ files: Sequence of files that needs filtering.
+
+ Returns:
+ Filtered list of files to be linted.
+ """
+ num_files = len(files)
+
+ ignore_dirs_regexs = []
+ for ignore in FLAGS.exclude_directories:
+ ignore_dirs_regexs.append(re.compile(r'(^|[\\/])%s[\\/]' % ignore))
+
+ result_files = []
+ for f in files:
+ add_file = True
+ for exclude in FLAGS.exclude_files:
+ if f.endswith('/' + exclude) or f == exclude:
+ add_file = False
+ break
+ for ignore in ignore_dirs_regexs:
+ if ignore.search(f):
+ # Break out of ignore loop so we don't add to
+ # filtered files.
+ add_file = False
+ break
+ if add_file:
+ # Convert everything to absolute paths so we can easily remove duplicates
+ # using a set.
+ result_files.append(os.path.abspath(f))
+
+ skipped = num_files - len(result_files)
+ if skipped:
+ print 'Skipping %d file(s).' % skipped
+
+ return set(result_files)
+
+
+def GetFileList(argv, file_type, suffixes):
+ """Parse the flags and return the list of files to check.
+
+ Args:
+ argv: Sequence of command line arguments.
+ suffixes: Sequence of acceptable suffixes for the file type.
+
+ Returns:
+ The list of files to check.
+ """
+ return sorted(GetAllSpecifiedFiles(argv, suffixes))
+
+
+def IsEmptyArgumentList(argv):
+ return not (len(argv[1:]) or FLAGS.recurse)
diff --git a/tools/closure_linter/build/lib/closure_linter/common/tokenizer.py b/tools/closure_linter/build/lib/closure_linter/common/tokenizer.py
new file mode 100644
index 0000000000..9420ea3267
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/common/tokenizer.py
@@ -0,0 +1,185 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Regular expression based lexer."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+from closure_linter.common import tokens
+
+# Shorthand
+Type = tokens.TokenType
+
+
+class Tokenizer(object):
+ """General purpose tokenizer.
+
+ Attributes:
+ mode: The latest mode of the tokenizer. This allows patterns to distinguish
+ if they are mid-comment, mid-parameter list, etc.
+ matchers: Dictionary of modes to sequences of matchers that define the
+ patterns to check at any given time.
+ default_types: Dictionary of modes to types, defining what type to give
+ non-matched text when in the given mode. Defaults to Type.NORMAL.
+ """
+
+ def __init__(self, starting_mode, matchers, default_types):
+ """Initialize the tokenizer.
+
+ Args:
+ starting_mode: Mode to start in.
+ matchers: Dictionary of modes to sequences of matchers that defines the
+ patterns to check at any given time.
+ default_types: Dictionary of modes to types, defining what type to give
+ non-matched text when in the given mode. Defaults to Type.NORMAL.
+ """
+ self.__starting_mode = starting_mode
+ self.matchers = matchers
+ self.default_types = default_types
+
+ def TokenizeFile(self, file):
+ """Tokenizes the given file.
+
+ Args:
+ file: An iterable that yields one line of the file at a time.
+
+ Returns:
+ The first token in the file
+ """
+ # The current mode.
+ self.mode = self.__starting_mode
+ # The first token in the stream.
+ self.__first_token = None
+ # The last token added to the token stream.
+ self.__last_token = None
+ # The current line number.
+ self.__line_number = 0
+
+ for line in file:
+ self.__line_number += 1
+ self.__TokenizeLine(line)
+
+ return self.__first_token
+
+ def _CreateToken(self, string, token_type, line, line_number, values=None):
+ """Creates a new Token object (or subclass).
+
+ Args:
+ string: The string of input the token represents.
+ token_type: The type of token.
+ line: The text of the line this token is in.
+ line_number: The line number of the token.
+ values: A dict of named values within the token. For instance, a
+ function declaration may have a value called 'name' which captures the
+ name of the function.
+
+ Returns:
+ The newly created Token object.
+ """
+ return tokens.Token(string, token_type, line, line_number, values,
+ line_number)
+
+ def __TokenizeLine(self, line):
+ """Tokenizes the given line.
+
+ Args:
+ line: The contents of the line.
+ """
+ string = line.rstrip('\n\r\f')
+ line_number = self.__line_number
+ self.__start_index = 0
+
+ if not string:
+ self.__AddToken(self._CreateToken('', Type.BLANK_LINE, line, line_number))
+ return
+
+ normal_token = ''
+ index = 0
+ while index < len(string):
+ for matcher in self.matchers[self.mode]:
+ if matcher.line_start and index > 0:
+ continue
+
+ match = matcher.regex.match(string, index)
+
+ if match:
+ if normal_token:
+ self.__AddToken(
+ self.__CreateNormalToken(self.mode, normal_token, line,
+ line_number))
+ normal_token = ''
+
+ # Add the match.
+ self.__AddToken(self._CreateToken(match.group(), matcher.type, line,
+ line_number, match.groupdict()))
+
+ # Change the mode to the correct one for after this match.
+ self.mode = matcher.result_mode or self.mode
+
+ # Shorten the string to be matched.
+ index = match.end()
+
+ break
+
+ else:
+ # If the for loop finishes naturally (i.e. no matches) we just add the
+ # first character to the string of consecutive non match characters.
+ # These will constitute a NORMAL token.
+ if string:
+ normal_token += string[index:index + 1]
+ index += 1
+
+ if normal_token:
+ self.__AddToken(
+ self.__CreateNormalToken(self.mode, normal_token, line, line_number))
+
+ def __CreateNormalToken(self, mode, string, line, line_number):
+ """Creates a normal token.
+
+ Args:
+ mode: The current mode.
+ string: The string to tokenize.
+ line: The line of text.
+ line_number: The line number within the file.
+
+ Returns:
+ A Token object, of the default type for the current mode.
+ """
+ type = Type.NORMAL
+ if mode in self.default_types:
+ type = self.default_types[mode]
+ return self._CreateToken(string, type, line, line_number)
+
+ def __AddToken(self, token):
+ """Add the given token to the token stream.
+
+ Args:
+ token: The token to add.
+ """
+ # Store the first token, or point the previous token to this one.
+ if not self.__first_token:
+ self.__first_token = token
+ else:
+ self.__last_token.next = token
+
+ # Establish the doubly linked list
+ token.previous = self.__last_token
+ self.__last_token = token
+
+ # Compute the character indices
+ token.start_index = self.__start_index
+ self.__start_index += token.length
diff --git a/tools/closure_linter/build/lib/closure_linter/common/tokens.py b/tools/closure_linter/build/lib/closure_linter/common/tokens.py
new file mode 100644
index 0000000000..4703998752
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/common/tokens.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Classes to represent tokens and positions within them."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+
+class TokenType(object):
+ """Token types common to all languages."""
+ NORMAL = 'normal'
+ WHITESPACE = 'whitespace'
+ BLANK_LINE = 'blank line'
+
+
+class Token(object):
+ """Token class for intelligent text splitting.
+
+ The token class represents a string of characters and an identifying type.
+
+ Attributes:
+ type: The type of token.
+ string: The characters the token comprises.
+ length: The length of the token.
+ line: The text of the line the token is found in.
+ line_number: The number of the line the token is found in.
+ values: Dictionary of values returned from the tokens regex match.
+ previous: The token before this one.
+ next: The token after this one.
+ start_index: The character index in the line where this token starts.
+ attached_object: Object containing more information about this token.
+ metadata: Object containing metadata about this token. Must be added by
+ a separate metadata pass.
+ """
+
+ def __init__(self, string, token_type, line, line_number, values=None,
+ orig_line_number=None):
+ """Creates a new Token object.
+
+ Args:
+ string: The string of input the token contains.
+ token_type: The type of token.
+ line: The text of the line this token is in.
+ line_number: The line number of the token.
+ values: A dict of named values within the token. For instance, a
+ function declaration may have a value called 'name' which captures the
+ name of the function.
+ orig_line_number: The line number of the original file this token comes
+ from. This should be only set during the tokenization process. For newly
+ created error fix tokens after that, it should be None.
+ """
+ self.type = token_type
+ self.string = string
+ self.length = len(string)
+ self.line = line
+ self.line_number = line_number
+ self.orig_line_number = orig_line_number
+ self.values = values
+ self.is_deleted = False
+
+ # These parts can only be computed when the file is fully tokenized
+ self.previous = None
+ self.next = None
+ self.start_index = None
+
+ # This part is set in statetracker.py
+ # TODO(robbyw): Wrap this in to metadata
+ self.attached_object = None
+
+ # This part is set in *metadatapass.py
+ self.metadata = None
+
+ def IsFirstInLine(self):
+ """Tests if this token is the first token in its line.
+
+ Returns:
+ Whether the token is the first token in its line.
+ """
+ return not self.previous or self.previous.line_number != self.line_number
+
+ def IsLastInLine(self):
+ """Tests if this token is the last token in its line.
+
+ Returns:
+ Whether the token is the last token in its line.
+ """
+ return not self.next or self.next.line_number != self.line_number
+
+ def IsType(self, token_type):
+ """Tests if this token is of the given type.
+
+ Args:
+ token_type: The type to test for.
+
+ Returns:
+ True if the type of this token matches the type passed in.
+ """
+ return self.type == token_type
+
+ def IsAnyType(self, *token_types):
+ """Tests if this token is any of the given types.
+
+ Args:
+ token_types: The types to check. Also accepts a single array.
+
+ Returns:
+ True if the type of this token is any of the types passed in.
+ """
+ if not isinstance(token_types[0], basestring):
+ return self.type in token_types[0]
+ else:
+ return self.type in token_types
+
+ def __repr__(self):
+ return '<Token: %s, "%s", %r, %d, %r>' % (self.type, self.string,
+ self.values, self.line_number,
+ self.metadata)
+
+ def __iter__(self):
+ """Returns a token iterator."""
+ node = self
+ while node:
+ yield node
+ node = node.next
+
+ def __reversed__(self):
+ """Returns a reverse-direction token iterator."""
+ node = self
+ while node:
+ yield node
+ node = node.previous
diff --git a/tools/closure_linter/build/lib/closure_linter/common/tokens_test.py b/tools/closure_linter/build/lib/closure_linter/common/tokens_test.py
new file mode 100644
index 0000000000..01ec89d01b
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/common/tokens_test.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+__author__ = 'nnaze@google.com (Nathan Naze)'
+
+import unittest as googletest
+from closure_linter.common import tokens
+
+
+def _CreateDummyToken():
+ return tokens.Token('foo', None, 1, 1)
+
+
+def _CreateDummyTokens(count):
+ dummy_tokens = []
+ for _ in xrange(count):
+ dummy_tokens.append(_CreateDummyToken())
+ return dummy_tokens
+
+
+def _SetTokensAsNeighbors(neighbor_tokens):
+ for i in xrange(len(neighbor_tokens)):
+ prev_index = i - 1
+ next_index = i + 1
+
+ if prev_index >= 0:
+ neighbor_tokens[i].previous = neighbor_tokens[prev_index]
+
+ if next_index < len(neighbor_tokens):
+ neighbor_tokens[i].next = neighbor_tokens[next_index]
+
+
+class TokensTest(googletest.TestCase):
+
+ def testIsFirstInLine(self):
+
+ # First token in file (has no previous).
+ self.assertTrue(_CreateDummyToken().IsFirstInLine())
+
+ a, b = _CreateDummyTokens(2)
+ _SetTokensAsNeighbors([a, b])
+
+ # Tokens on same line
+ a.line_number = 30
+ b.line_number = 30
+
+ self.assertFalse(b.IsFirstInLine())
+
+ # Tokens on different lines
+ b.line_number = 31
+ self.assertTrue(b.IsFirstInLine())
+
+ def testIsLastInLine(self):
+ # Last token in file (has no next).
+ self.assertTrue(_CreateDummyToken().IsLastInLine())
+
+ a, b = _CreateDummyTokens(2)
+ _SetTokensAsNeighbors([a, b])
+
+ # Tokens on same line
+ a.line_number = 30
+ b.line_number = 30
+ self.assertFalse(a.IsLastInLine())
+
+ b.line_number = 31
+ self.assertTrue(a.IsLastInLine())
+
+ def testIsType(self):
+ a = tokens.Token('foo', 'fakeType1', 1, 1)
+ self.assertTrue(a.IsType('fakeType1'))
+ self.assertFalse(a.IsType('fakeType2'))
+
+ def testIsAnyType(self):
+ a = tokens.Token('foo', 'fakeType1', 1, 1)
+ self.assertTrue(a.IsAnyType(['fakeType1', 'fakeType2']))
+ self.assertFalse(a.IsAnyType(['fakeType3', 'fakeType4']))
+
+ def testRepr(self):
+ a = tokens.Token('foo', 'fakeType1', 1, 1)
+ self.assertEquals('<Token: fakeType1, "foo", None, 1, None>', str(a))
+
+ def testIter(self):
+ dummy_tokens = _CreateDummyTokens(5)
+ _SetTokensAsNeighbors(dummy_tokens)
+ a, b, c, d, e = dummy_tokens
+
+ i = iter(a)
+ self.assertListEqual([a, b, c, d, e], list(i))
+
+ def testReverseIter(self):
+ dummy_tokens = _CreateDummyTokens(5)
+ _SetTokensAsNeighbors(dummy_tokens)
+ a, b, c, d, e = dummy_tokens
+
+ ri = reversed(e)
+ self.assertListEqual([e, d, c, b, a], list(ri))
+
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/ecmalintrules.py b/tools/closure_linter/build/lib/closure_linter/ecmalintrules.py
new file mode 100644
index 0000000000..c07dffc86e
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/ecmalintrules.py
@@ -0,0 +1,844 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Core methods for checking EcmaScript files for common style guide violations.
+"""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)',
+ 'jacobr@google.com (Jacob Richman)')
+
+import re
+
+import gflags as flags
+
+from closure_linter import checkerbase
+from closure_linter import ecmametadatapass
+from closure_linter import error_check
+from closure_linter import errorrules
+from closure_linter import errors
+from closure_linter import indentation
+from closure_linter import javascripttokenizer
+from closure_linter import javascripttokens
+from closure_linter import statetracker
+from closure_linter import tokenutil
+from closure_linter.common import error
+from closure_linter.common import position
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow')
+# TODO(user): When flipping this to True, remove logic from unit tests
+# that overrides this flag.
+flags.DEFINE_boolean('dot_on_next_line', False, 'Require dots to be'
+ 'placed on the next line for wrapped expressions')
+
+# TODO(robbyw): Check for extra parens on return statements
+# TODO(robbyw): Check for 0px in strings
+# TODO(robbyw): Ensure inline jsDoc is in {}
+# TODO(robbyw): Check for valid JS types in parameter docs
+
+# Shorthand
+Context = ecmametadatapass.EcmaContext
+Error = error.Error
+Modes = javascripttokenizer.JavaScriptModes
+Position = position.Position
+Rule = error_check.Rule
+Type = javascripttokens.JavaScriptTokenType
+
+
+class EcmaScriptLintRules(checkerbase.LintRulesBase):
+ """EmcaScript lint style checking rules.
+
+ Can be used to find common style errors in JavaScript, ActionScript and other
+ Ecma like scripting languages. Style checkers for Ecma scripting languages
+ should inherit from this style checker.
+ Please do not add any state to EcmaScriptLintRules or to any subclasses.
+
+ All state should be added to the StateTracker subclass used for a particular
+ language.
+ """
+
+ # It will be initialized in constructor so the flags are initialized.
+ max_line_length = -1
+
+ # Static constants.
+ MISSING_PARAMETER_SPACE = re.compile(r',\S')
+
+ EXTRA_SPACE = re.compile(r'(\(\s|\s\))')
+
+ ENDS_WITH_SPACE = re.compile(r'\s$')
+
+ ILLEGAL_TAB = re.compile(r'\t')
+
+ # Regex used to split up complex types to check for invalid use of ? and |.
+ TYPE_SPLIT = re.compile(r'[,<>()]')
+
+ # Regex for form of author lines after the @author tag.
+ AUTHOR_SPEC = re.compile(r'(\s*)[^\s]+@[^(\s]+(\s*)\(.+\)')
+
+ # Acceptable tokens to remove for line too long testing.
+ LONG_LINE_IGNORE = frozenset(
+ ['*', '//', '@see'] +
+ ['@%s' % tag for tag in statetracker.DocFlag.HAS_TYPE])
+
+ JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED = frozenset([
+ '@fileoverview', '@param', '@return', '@returns'])
+
+ def __init__(self):
+ """Initialize this lint rule object."""
+ checkerbase.LintRulesBase.__init__(self)
+ if EcmaScriptLintRules.max_line_length == -1:
+ EcmaScriptLintRules.max_line_length = errorrules.GetMaxLineLength()
+
+ def Initialize(self, checker, limited_doc_checks, is_html):
+ """Initialize this lint rule object before parsing a new file."""
+ checkerbase.LintRulesBase.Initialize(self, checker, limited_doc_checks,
+ is_html)
+ self._indentation = indentation.IndentationRules()
+
+ def HandleMissingParameterDoc(self, token, param_name):
+ """Handle errors associated with a parameter missing a @param tag."""
+ raise TypeError('Abstract method HandleMissingParameterDoc not implemented')
+
+ def _CheckLineLength(self, last_token, state):
+ """Checks whether the line is too long.
+
+ Args:
+ last_token: The last token in the line.
+ state: parser_state object that indicates the current state in the page
+ """
+ # Start from the last token so that we have the flag object attached to
+ # and DOC_FLAG tokens.
+ line_number = last_token.line_number
+ token = last_token
+
+ # Build a representation of the string where spaces indicate potential
+ # line-break locations.
+ line = []
+ while token and token.line_number == line_number:
+ if state.IsTypeToken(token):
+ line.insert(0, 'x' * len(token.string))
+ elif token.type in (Type.IDENTIFIER, Type.OPERATOR):
+ # Dots are acceptable places to wrap (may be tokenized as identifiers).
+ line.insert(0, token.string.replace('.', ' '))
+ else:
+ line.insert(0, token.string)
+ token = token.previous
+
+ line = ''.join(line)
+ line = line.rstrip('\n\r\f')
+ try:
+ length = len(unicode(line, 'utf-8'))
+ except (LookupError, UnicodeDecodeError):
+ # Unknown encoding. The line length may be wrong, as was originally the
+ # case for utf-8 (see bug 1735846). For now just accept the default
+ # length, but as we find problems we can either add test for other
+ # possible encodings or return without an error to protect against
+ # false positives at the cost of more false negatives.
+ length = len(line)
+
+ if length > EcmaScriptLintRules.max_line_length:
+
+ # If the line matches one of the exceptions, then it's ok.
+ for long_line_regexp in self.GetLongLineExceptions():
+ if long_line_regexp.match(last_token.line):
+ return
+
+ # If the line consists of only one "word", or multiple words but all
+ # except one are ignoreable, then it's ok.
+ parts = set(line.split())
+
+ # We allow two "words" (type and name) when the line contains @param
+ max_parts = 1
+ if '@param' in parts:
+ max_parts = 2
+
+ # Custom tags like @requires may have url like descriptions, so ignore
+ # the tag, similar to how we handle @see.
+ custom_tags = set(['@%s' % f for f in FLAGS.custom_jsdoc_tags])
+ if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags))
+ > max_parts):
+ self._HandleError(
+ errors.LINE_TOO_LONG,
+ 'Line too long (%d characters).' % len(line), last_token)
+
+ def _CheckJsDocType(self, token, js_type):
+ """Checks the given type for style errors.
+
+ Args:
+ token: The DOC_FLAG token for the flag whose type to check.
+ js_type: The flag's typeannotation.TypeAnnotation instance.
+ """
+ if not js_type: return
+
+ if js_type.type_group and len(js_type.sub_types) == 2:
+ identifiers = [t.identifier for t in js_type.sub_types]
+ if 'null' in identifiers:
+ # Don't warn if the identifier is a template type (e.g. {TYPE|null}.
+ if not identifiers[0].isupper() and not identifiers[1].isupper():
+ self._HandleError(
+ errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL,
+ 'Prefer "?Type" to "Type|null": "%s"' % js_type, token)
+
+ # TODO(user): We should report an error for wrong usage of '?' and '|'
+ # e.g. {?number|string|null} etc.
+
+ for sub_type in js_type.IterTypes():
+ self._CheckJsDocType(token, sub_type)
+
+ def _CheckForMissingSpaceBeforeToken(self, token):
+ """Checks for a missing space at the beginning of a token.
+
+ Reports a MISSING_SPACE error if the token does not begin with a space or
+ the previous token doesn't end with a space and the previous token is on the
+ same line as the token.
+
+ Args:
+ token: The token being checked
+ """
+ # TODO(user): Check if too many spaces?
+ if (len(token.string) == len(token.string.lstrip()) and
+ token.previous and token.line_number == token.previous.line_number and
+ len(token.previous.string) - len(token.previous.string.rstrip()) == 0):
+ self._HandleError(
+ errors.MISSING_SPACE,
+ 'Missing space before "%s"' % token.string,
+ token,
+ position=Position.AtBeginning())
+
+ def _CheckOperator(self, token):
+ """Checks an operator for spacing and line style.
+
+ Args:
+ token: The operator token.
+ """
+ last_code = token.metadata.last_code
+
+ if not self._ExpectSpaceBeforeOperator(token):
+ if (token.previous and token.previous.type == Type.WHITESPACE and
+ last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER) and
+ last_code.line_number == token.line_number):
+ self._HandleError(
+ errors.EXTRA_SPACE, 'Extra space before "%s"' % token.string,
+ token.previous, position=Position.All(token.previous.string))
+
+ elif (token.previous and
+ not token.previous.IsComment() and
+ not tokenutil.IsDot(token) and
+ token.previous.type in Type.EXPRESSION_ENDER_TYPES):
+ self._HandleError(errors.MISSING_SPACE,
+ 'Missing space before "%s"' % token.string, token,
+ position=Position.AtBeginning())
+
+ # Check wrapping of operators.
+ next_code = tokenutil.GetNextCodeToken(token)
+
+ is_dot = tokenutil.IsDot(token)
+ wrapped_before = last_code and last_code.line_number != token.line_number
+ wrapped_after = next_code and next_code.line_number != token.line_number
+
+ if FLAGS.dot_on_next_line and is_dot and wrapped_after:
+ self._HandleError(
+ errors.LINE_ENDS_WITH_DOT,
+ '"." must go on the following line',
+ token)
+ if (not is_dot and wrapped_before and
+ not token.metadata.IsUnaryOperator()):
+ self._HandleError(
+ errors.LINE_STARTS_WITH_OPERATOR,
+ 'Binary operator must go on previous line "%s"' % token.string,
+ token)
+
+ def _IsLabel(self, token):
+ # A ':' token is considered part of a label if it occurs in a case
+ # statement, a plain label, or an object literal, i.e. is not part of a
+ # ternary.
+
+ return (token.string == ':' and
+ token.metadata.context.type in (Context.LITERAL_ELEMENT,
+ Context.CASE_BLOCK,
+ Context.STATEMENT))
+
+ def _ExpectSpaceBeforeOperator(self, token):
+ """Returns whether a space should appear before the given operator token.
+
+ Args:
+ token: The operator token.
+
+ Returns:
+ Whether there should be a space before the token.
+ """
+ if token.string == ',' or token.metadata.IsUnaryPostOperator():
+ return False
+
+ if tokenutil.IsDot(token):
+ return False
+
+ # Colons should appear in labels, object literals, the case of a switch
+ # statement, and ternary operator. Only want a space in the case of the
+ # ternary operator.
+ if self._IsLabel(token):
+ return False
+
+ if token.metadata.IsUnaryOperator() and token.IsFirstInLine():
+ return False
+
+ return True
+
+ def CheckToken(self, token, state):
+ """Checks a token, given the current parser_state, for warnings and errors.
+
+ Args:
+ token: The current token under consideration
+ state: parser_state object that indicates the current state in the page
+ """
+ # Store some convenience variables
+ first_in_line = token.IsFirstInLine()
+ last_in_line = token.IsLastInLine()
+ last_non_space_token = state.GetLastNonSpaceToken()
+
+ token_type = token.type
+
+ # Process the line change.
+ if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION):
+ # TODO(robbyw): Support checking indentation in HTML files.
+ indentation_errors = self._indentation.CheckToken(token, state)
+ for indentation_error in indentation_errors:
+ self._HandleError(*indentation_error)
+
+ if last_in_line:
+ self._CheckLineLength(token, state)
+
+ if token_type == Type.PARAMETERS:
+ # Find missing spaces in parameter lists.
+ if self.MISSING_PARAMETER_SPACE.search(token.string):
+ fix_data = ', '.join([s.strip() for s in token.string.split(',')])
+ self._HandleError(errors.MISSING_SPACE, 'Missing space after ","',
+ token, position=None, fix_data=fix_data.strip())
+
+ # Find extra spaces at the beginning of parameter lists. Make sure
+ # we aren't at the beginning of a continuing multi-line list.
+ if not first_in_line:
+ space_count = len(token.string) - len(token.string.lstrip())
+ if space_count:
+ self._HandleError(errors.EXTRA_SPACE, 'Extra space after "("',
+ token, position=Position(0, space_count))
+
+ elif (token_type == Type.START_BLOCK and
+ token.metadata.context.type == Context.BLOCK):
+ self._CheckForMissingSpaceBeforeToken(token)
+
+ elif token_type == Type.END_BLOCK:
+ last_code = token.metadata.last_code
+ if state.InFunction() and state.IsFunctionClose():
+ if state.InTopLevelFunction():
+ # A semicolons should not be included at the end of a function
+ # declaration.
+ if not state.InAssignedFunction():
+ if not last_in_line and token.next.type == Type.SEMICOLON:
+ self._HandleError(
+ errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
+ 'Illegal semicolon after function declaration',
+ token.next, position=Position.All(token.next.string))
+
+ # A semicolon should be included at the end of a function expression
+ # that is not immediately called or used by a dot operator.
+ if (state.InAssignedFunction() and token.next
+ and token.next.type != Type.SEMICOLON):
+ next_token = tokenutil.GetNextCodeToken(token)
+ is_immediately_used = (next_token.type == Type.START_PAREN or
+ tokenutil.IsDot(next_token))
+ if not is_immediately_used:
+ self._HandleError(
+ errors.MISSING_SEMICOLON_AFTER_FUNCTION,
+ 'Missing semicolon after function assigned to a variable',
+ token, position=Position.AtEnd(token.string))
+
+ if state.InInterfaceMethod() and last_code.type != Type.START_BLOCK:
+ self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE,
+ 'Interface methods cannot contain code', last_code)
+
+ elif (state.IsBlockClose() and
+ token.next and token.next.type == Type.SEMICOLON):
+ if (last_code.metadata.context.parent.type != Context.OBJECT_LITERAL
+ and last_code.metadata.context.type != Context.OBJECT_LITERAL):
+ self._HandleError(
+ errors.REDUNDANT_SEMICOLON,
+ 'No semicolon is required to end a code block',
+ token.next, position=Position.All(token.next.string))
+
+ elif token_type == Type.SEMICOLON:
+ if token.previous and token.previous.type == Type.WHITESPACE:
+ self._HandleError(
+ errors.EXTRA_SPACE, 'Extra space before ";"',
+ token.previous, position=Position.All(token.previous.string))
+
+ if token.next and token.next.line_number == token.line_number:
+ if token.metadata.context.type != Context.FOR_GROUP_BLOCK:
+ # TODO(robbyw): Error about no multi-statement lines.
+ pass
+
+ elif token.next.type not in (
+ Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN):
+ self._HandleError(
+ errors.MISSING_SPACE,
+ 'Missing space after ";" in for statement',
+ token.next,
+ position=Position.AtBeginning())
+
+ last_code = token.metadata.last_code
+ if last_code and last_code.type == Type.SEMICOLON:
+ # Allow a single double semi colon in for loops for cases like:
+ # for (;;) { }.
+ # NOTE(user): This is not a perfect check, and will not throw an error
+ # for cases like: for (var i = 0;; i < n; i++) {}, but then your code
+ # probably won't work either.
+ for_token = tokenutil.CustomSearch(
+ last_code,
+ lambda token: token.type == Type.KEYWORD and token.string == 'for',
+ end_func=lambda token: token.type == Type.SEMICOLON,
+ distance=None,
+ reverse=True)
+
+ if not for_token:
+ self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon',
+ token, position=Position.All(token.string))
+
+ elif token_type == Type.START_PAREN:
+ # Ensure that opening parentheses have a space before any keyword
+ # that is not being invoked like a member function.
+ if (token.previous and token.previous.type == Type.KEYWORD and
+ (not token.previous.metadata or
+ not token.previous.metadata.last_code or
+ not token.previous.metadata.last_code.string or
+ token.previous.metadata.last_code.string[-1:] != '.')):
+ self._HandleError(errors.MISSING_SPACE, 'Missing space before "("',
+ token, position=Position.AtBeginning())
+ elif token.previous and token.previous.type == Type.WHITESPACE:
+ before_space = token.previous.previous
+ # Ensure that there is no extra space before a function invocation,
+ # even if the function being invoked happens to be a keyword.
+ if (before_space and before_space.line_number == token.line_number and
+ before_space.type == Type.IDENTIFIER or
+ (before_space.type == Type.KEYWORD and before_space.metadata and
+ before_space.metadata.last_code and
+ before_space.metadata.last_code.string and
+ before_space.metadata.last_code.string[-1:] == '.')):
+ self._HandleError(
+ errors.EXTRA_SPACE, 'Extra space before "("',
+ token.previous, position=Position.All(token.previous.string))
+
+ elif token_type == Type.START_BRACKET:
+ self._HandleStartBracket(token, last_non_space_token)
+ elif token_type in (Type.END_PAREN, Type.END_BRACKET):
+ # Ensure there is no space before closing parentheses, except when
+ # it's in a for statement with an omitted section, or when it's at the
+ # beginning of a line.
+ if (token.previous and token.previous.type == Type.WHITESPACE and
+ not token.previous.IsFirstInLine() and
+ not (last_non_space_token and last_non_space_token.line_number ==
+ token.line_number and
+ last_non_space_token.type == Type.SEMICOLON)):
+ self._HandleError(
+ errors.EXTRA_SPACE, 'Extra space before "%s"' %
+ token.string, token.previous,
+ position=Position.All(token.previous.string))
+
+ elif token_type == Type.WHITESPACE:
+ if self.ILLEGAL_TAB.search(token.string):
+ if token.IsFirstInLine():
+ if token.next:
+ self._HandleError(
+ errors.ILLEGAL_TAB,
+ 'Illegal tab in whitespace before "%s"' % token.next.string,
+ token, position=Position.All(token.string))
+ else:
+ self._HandleError(
+ errors.ILLEGAL_TAB,
+ 'Illegal tab in whitespace',
+ token, position=Position.All(token.string))
+ else:
+ self._HandleError(
+ errors.ILLEGAL_TAB,
+ 'Illegal tab in whitespace after "%s"' % token.previous.string,
+ token, position=Position.All(token.string))
+
+ # Check whitespace length if it's not the first token of the line and
+ # if it's not immediately before a comment.
+ if last_in_line:
+ # Check for extra whitespace at the end of a line.
+ self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
+ token, position=Position.All(token.string))
+ elif not first_in_line and not token.next.IsComment():
+ if token.length > 1:
+ self._HandleError(
+ errors.EXTRA_SPACE, 'Extra space after "%s"' %
+ token.previous.string, token,
+ position=Position(1, len(token.string) - 1))
+
+ elif token_type == Type.OPERATOR:
+ self._CheckOperator(token)
+ elif token_type == Type.DOC_FLAG:
+ flag = token.attached_object
+
+ if flag.flag_type == 'bug':
+ # TODO(robbyw): Check for exactly 1 space on the left.
+ string = token.next.string.lstrip()
+ string = string.split(' ', 1)[0]
+
+ if not string.isdigit():
+ self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG,
+ '@bug should be followed by a bug number', token)
+
+ elif flag.flag_type == 'suppress':
+ if flag.type is None:
+ # A syntactically invalid suppress tag will get tokenized as a normal
+ # flag, indicating an error.
+ self._HandleError(
+ errors.INCORRECT_SUPPRESS_SYNTAX,
+ 'Invalid suppress syntax: should be @suppress {errortype}. '
+ 'Spaces matter.', token)
+ else:
+ for suppress_type in flag.jstype.IterIdentifiers():
+ if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES:
+ self._HandleError(
+ errors.INVALID_SUPPRESS_TYPE,
+ 'Invalid suppression type: %s' % suppress_type, token)
+
+ elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and
+ flag.flag_type == 'author'):
+ # TODO(user): In non strict mode check the author tag for as much as
+ # it exists, though the full form checked below isn't required.
+ string = token.next.string
+ result = self.AUTHOR_SPEC.match(string)
+ if not result:
+ self._HandleError(errors.INVALID_AUTHOR_TAG_DESCRIPTION,
+ 'Author tag line should be of the form: '
+ '@author foo@somewhere.com (Your Name)',
+ token.next)
+ else:
+ # Check spacing between email address and name. Do this before
+ # checking earlier spacing so positions are easier to calculate for
+ # autofixing.
+ num_spaces = len(result.group(2))
+ if num_spaces < 1:
+ self._HandleError(errors.MISSING_SPACE,
+ 'Missing space after email address',
+ token.next, position=Position(result.start(2), 0))
+ elif num_spaces > 1:
+ self._HandleError(
+ errors.EXTRA_SPACE, 'Extra space after email address',
+ token.next,
+ position=Position(result.start(2) + 1, num_spaces - 1))
+
+ # Check for extra spaces before email address. Can't be too few, if
+ # not at least one we wouldn't match @author tag.
+ num_spaces = len(result.group(1))
+ if num_spaces > 1:
+ self._HandleError(errors.EXTRA_SPACE,
+ 'Extra space before email address',
+ token.next, position=Position(1, num_spaces - 1))
+
+ elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and
+ not self._limited_doc_checks):
+ if flag.flag_type == 'param':
+ if flag.name is None:
+ self._HandleError(errors.MISSING_JSDOC_PARAM_NAME,
+ 'Missing name in @param tag', token)
+
+ if not flag.description or flag.description is None:
+ flag_name = token.type
+ if 'name' in token.values:
+ flag_name = '@' + token.values['name']
+
+ if flag_name not in self.JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED:
+ self._HandleError(
+ errors.MISSING_JSDOC_TAG_DESCRIPTION,
+ 'Missing description in %s tag' % flag_name, token)
+ else:
+ self._CheckForMissingSpaceBeforeToken(flag.description_start_token)
+
+ if flag.HasType():
+ if flag.type_start_token is not None:
+ self._CheckForMissingSpaceBeforeToken(
+ token.attached_object.type_start_token)
+
+ if flag.jstype and not flag.jstype.IsEmpty():
+ self._CheckJsDocType(token, flag.jstype)
+
+ if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and (
+ flag.type_start_token.type != Type.DOC_START_BRACE or
+ flag.type_end_token.type != Type.DOC_END_BRACE):
+ self._HandleError(
+ errors.MISSING_BRACES_AROUND_TYPE,
+ 'Type must always be surrounded by curly braces.', token)
+
+ if token_type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
+ if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and
+ token.values['name'] not in FLAGS.custom_jsdoc_tags):
+ self._HandleError(
+ errors.INVALID_JSDOC_TAG,
+ 'Invalid JsDoc tag: %s' % token.values['name'], token)
+
+ if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and
+ token.values['name'] == 'inheritDoc' and
+ token_type == Type.DOC_INLINE_FLAG):
+ self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
+ 'Unnecessary braces around @inheritDoc',
+ token)
+
+ elif token_type == Type.SIMPLE_LVALUE:
+ identifier = token.values['identifier']
+
+ if ((not state.InFunction() or state.InConstructor()) and
+ state.InTopLevel() and not state.InObjectLiteralDescendant()):
+ jsdoc = state.GetDocComment()
+ if not state.HasDocComment(identifier):
+ # Only test for documentation on identifiers with .s in them to
+ # avoid checking things like simple variables. We don't require
+ # documenting assignments to .prototype itself (bug 1880803).
+ if (not state.InConstructor() and
+ identifier.find('.') != -1 and not
+ identifier.endswith('.prototype') and not
+ self._limited_doc_checks):
+ comment = state.GetLastComment()
+ if not (comment and comment.lower().count('jsdoc inherited')):
+ self._HandleError(
+ errors.MISSING_MEMBER_DOCUMENTATION,
+ "No docs found for member '%s'" % identifier,
+ token)
+ elif jsdoc and (not state.InConstructor() or
+ identifier.startswith('this.')):
+ # We are at the top level and the function/member is documented.
+ if identifier.endswith('_') and not identifier.endswith('__'):
+ # Can have a private class which inherits documentation from a
+ # public superclass.
+ #
+ # @inheritDoc is deprecated in favor of using @override, and they
+ if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor')
+ and ('accessControls' not in jsdoc.suppressions)):
+ self._HandleError(
+ errors.INVALID_OVERRIDE_PRIVATE,
+ '%s should not override a private member.' % identifier,
+ jsdoc.GetFlag('override').flag_token)
+ if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor')
+ and ('accessControls' not in jsdoc.suppressions)):
+ self._HandleError(
+ errors.INVALID_INHERIT_DOC_PRIVATE,
+ '%s should not inherit from a private member.' % identifier,
+ jsdoc.GetFlag('inheritDoc').flag_token)
+ if (not jsdoc.HasFlag('private') and
+ ('underscore' not in jsdoc.suppressions) and not
+ ((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and
+ ('accessControls' in jsdoc.suppressions))):
+ self._HandleError(
+ errors.MISSING_PRIVATE,
+ 'Member "%s" must have @private JsDoc.' %
+ identifier, token)
+ if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions:
+ self._HandleError(
+ errors.UNNECESSARY_SUPPRESS,
+ '@suppress {underscore} is not necessary with @private',
+ jsdoc.suppressions['underscore'])
+ elif (jsdoc.HasFlag('private') and
+ not self.InExplicitlyTypedLanguage()):
+ # It is convention to hide public fields in some ECMA
+ # implementations from documentation using the @private tag.
+ self._HandleError(
+ errors.EXTRA_PRIVATE,
+ 'Member "%s" must not have @private JsDoc' %
+ identifier, token)
+
+ # These flags are only legal on localizable message definitions;
+ # such variables always begin with the prefix MSG_.
+ for f in ('desc', 'hidden', 'meaning'):
+ if (jsdoc.HasFlag(f)
+ and not identifier.startswith('MSG_')
+ and identifier.find('.MSG_') == -1):
+ self._HandleError(
+ errors.INVALID_USE_OF_DESC_TAG,
+ 'Member "%s" should not have @%s JsDoc' % (identifier, f),
+ token)
+
+ # Check for illegaly assigning live objects as prototype property values.
+ index = identifier.find('.prototype.')
+ # Ignore anything with additional .s after the prototype.
+ if index != -1 and identifier.find('.', index + 11) == -1:
+ equal_operator = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
+ next_code = tokenutil.SearchExcept(equal_operator, Type.NON_CODE_TYPES)
+ if next_code and (
+ next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or
+ next_code.IsOperator('new')):
+ self._HandleError(
+ errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,
+ 'Member %s cannot have a non-primitive value' % identifier,
+ token)
+
+ elif token_type == Type.END_PARAMETERS:
+ # Find extra space at the end of parameter lists. We check the token
+ # prior to the current one when it is a closing paren.
+ if (token.previous and token.previous.type == Type.PARAMETERS
+ and self.ENDS_WITH_SPACE.search(token.previous.string)):
+ self._HandleError(errors.EXTRA_SPACE, 'Extra space before ")"',
+ token.previous)
+
+ jsdoc = state.GetDocComment()
+ if state.GetFunction().is_interface:
+ if token.previous and token.previous.type == Type.PARAMETERS:
+ self._HandleError(
+ errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,
+ 'Interface constructor cannot have parameters',
+ token.previous)
+ elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see')
+ and not jsdoc.InheritsDocumentation()
+ and not state.InObjectLiteralDescendant() and not
+ jsdoc.IsInvalidated()):
+ distance, edit = jsdoc.CompareParameters(state.GetParams())
+ if distance:
+ params_iter = iter(state.GetParams())
+ docs_iter = iter(jsdoc.ordered_params)
+
+ for op in edit:
+ if op == 'I':
+ # Insertion.
+ # Parsing doc comments is the same for all languages
+ # but some languages care about parameters that don't have
+ # doc comments and some languages don't care.
+ # Languages that don't allow variables to by typed such as
+ # JavaScript care but languages such as ActionScript or Java
+ # that allow variables to be typed don't care.
+ if not self._limited_doc_checks:
+ self.HandleMissingParameterDoc(token, params_iter.next())
+
+ elif op == 'D':
+ # Deletion
+ self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION,
+ 'Found docs for non-existing parameter: "%s"' %
+ docs_iter.next(), token)
+ elif op == 'S':
+ # Substitution
+ if not self._limited_doc_checks:
+ self._HandleError(
+ errors.WRONG_PARAMETER_DOCUMENTATION,
+ 'Parameter mismatch: got "%s", expected "%s"' %
+ (params_iter.next(), docs_iter.next()), token)
+
+ else:
+ # Equality - just advance the iterators
+ params_iter.next()
+ docs_iter.next()
+
+ elif token_type == Type.STRING_TEXT:
+ # If this is the first token after the start of the string, but it's at
+ # the end of a line, we know we have a multi-line string.
+ if token.previous.type in (
+ Type.SINGLE_QUOTE_STRING_START,
+ Type.DOUBLE_QUOTE_STRING_START) and last_in_line:
+ self._HandleError(errors.MULTI_LINE_STRING,
+ 'Multi-line strings are not allowed', token)
+
+ # This check is orthogonal to the ones above, and repeats some types, so
+ # it is a plain if and not an elif.
+ if token.type in Type.COMMENT_TYPES:
+ if self.ILLEGAL_TAB.search(token.string):
+ self._HandleError(errors.ILLEGAL_TAB,
+ 'Illegal tab in comment "%s"' % token.string, token)
+
+ trimmed = token.string.rstrip()
+ if last_in_line and token.string != trimmed:
+ # Check for extra whitespace at the end of a line.
+ self._HandleError(
+ errors.EXTRA_SPACE, 'Extra space at end of line', token,
+ position=Position(len(trimmed), len(token.string) - len(trimmed)))
+
+ # This check is also orthogonal since it is based on metadata.
+ if token.metadata.is_implied_semicolon:
+ self._HandleError(errors.MISSING_SEMICOLON,
+ 'Missing semicolon at end of line', token)
+
+ def _HandleStartBracket(self, token, last_non_space_token):
+ """Handles a token that is an open bracket.
+
+ Args:
+ token: The token to handle.
+ last_non_space_token: The last token that was not a space.
+ """
+ if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and
+ last_non_space_token and
+ last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
+ self._HandleError(
+ errors.EXTRA_SPACE, 'Extra space before "["',
+ token.previous, position=Position.All(token.previous.string))
+ # If the [ token is the first token in a line we shouldn't complain
+ # about a missing space before [. This is because some Ecma script
+ # languages allow syntax like:
+ # [Annotation]
+ # class MyClass {...}
+ # So we don't want to blindly warn about missing spaces before [.
+ # In the the future, when rules for computing exactly how many spaces
+ # lines should be indented are added, then we can return errors for
+ # [ tokens that are improperly indented.
+ # For example:
+ # var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
+ # [a,b,c];
+ # should trigger a proper indentation warning message as [ is not indented
+ # by four spaces.
+ elif (not token.IsFirstInLine() and token.previous and
+ token.previous.type not in (
+ [Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
+ Type.EXPRESSION_ENDER_TYPES)):
+ self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
+ token, position=Position.AtBeginning())
+
+ def Finalize(self, state):
+ """Perform all checks that need to occur after all lines are processed.
+
+ Args:
+ state: State of the parser after parsing all tokens
+
+ Raises:
+ TypeError: If not overridden.
+ """
+ last_non_space_token = state.GetLastNonSpaceToken()
+ # Check last line for ending with newline.
+ if state.GetLastLine() and not (
+ state.GetLastLine().isspace() or
+ state.GetLastLine().rstrip('\n\r\f') != state.GetLastLine()):
+ self._HandleError(
+ errors.FILE_MISSING_NEWLINE,
+ 'File does not end with new line. (%s)' % state.GetLastLine(),
+ last_non_space_token)
+
+ try:
+ self._indentation.Finalize()
+ except Exception, e:
+ self._HandleError(
+ errors.FILE_DOES_NOT_PARSE,
+ str(e),
+ last_non_space_token)
+
+ def GetLongLineExceptions(self):
+ """Gets a list of regexps for lines which can be longer than the limit.
+
+ Returns:
+ A list of regexps, used as matches (rather than searches).
+ """
+ return []
+
+ def InExplicitlyTypedLanguage(self):
+ """Returns whether this ecma implementation is explicitly typed."""
+ return False
diff --git a/tools/closure_linter/build/lib/closure_linter/ecmametadatapass.py b/tools/closure_linter/build/lib/closure_linter/ecmametadatapass.py
new file mode 100644
index 0000000000..50621610ef
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/ecmametadatapass.py
@@ -0,0 +1,574 @@
+#!/usr/bin/env python
+#
+# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Metadata pass for annotating tokens in EcmaScript files."""
+
+__author__ = ('robbyw@google.com (Robert Walker)')
+
+from closure_linter import javascripttokens
+from closure_linter import tokenutil
+
+
+TokenType = javascripttokens.JavaScriptTokenType
+
+
+class ParseError(Exception):
+ """Exception indicating a parse error at the given token.
+
+ Attributes:
+ token: The token where the parse error occurred.
+ """
+
+ def __init__(self, token, message=None):
+ """Initialize a parse error at the given token with an optional message.
+
+ Args:
+ token: The token where the parse error occurred.
+ message: A message describing the parse error.
+ """
+ Exception.__init__(self, message)
+ self.token = token
+
+
+class EcmaContext(object):
+ """Context object for EcmaScript languages.
+
+ Attributes:
+ type: The context type.
+ start_token: The token where this context starts.
+ end_token: The token where this context ends.
+ parent: The parent context.
+ """
+
+ # The root context.
+ ROOT = 'root'
+
+ # A block of code.
+ BLOCK = 'block'
+
+ # A pseudo-block of code for a given case or default section.
+ CASE_BLOCK = 'case_block'
+
+ # Block of statements in a for loop's parentheses.
+ FOR_GROUP_BLOCK = 'for_block'
+
+ # An implied block of code for 1 line if, while, and for statements
+ IMPLIED_BLOCK = 'implied_block'
+
+ # An index in to an array or object.
+ INDEX = 'index'
+
+ # An array literal in [].
+ ARRAY_LITERAL = 'array_literal'
+
+ # An object literal in {}.
+ OBJECT_LITERAL = 'object_literal'
+
+ # An individual element in an array or object literal.
+ LITERAL_ELEMENT = 'literal_element'
+
+ # The portion of a ternary statement between ? and :
+ TERNARY_TRUE = 'ternary_true'
+
+ # The portion of a ternary statment after :
+ TERNARY_FALSE = 'ternary_false'
+
+ # The entire switch statment. This will contain a GROUP with the variable
+ # and a BLOCK with the code.
+
+ # Since that BLOCK is not a normal block, it can not contain statements except
+ # for case and default.
+ SWITCH = 'switch'
+
+ # A normal comment.
+ COMMENT = 'comment'
+
+ # A JsDoc comment.
+ DOC = 'doc'
+
+ # An individual statement.
+ STATEMENT = 'statement'
+
+ # Code within parentheses.
+ GROUP = 'group'
+
+ # Parameter names in a function declaration.
+ PARAMETERS = 'parameters'
+
+ # A set of variable declarations appearing after the 'var' keyword.
+ VAR = 'var'
+
+ # Context types that are blocks.
+ BLOCK_TYPES = frozenset([
+ ROOT, BLOCK, CASE_BLOCK, FOR_GROUP_BLOCK, IMPLIED_BLOCK])
+
+ def __init__(self, context_type, start_token, parent=None):
+ """Initializes the context object.
+
+ Args:
+ context_type: The context type.
+ start_token: The token where this context starts.
+ parent: The parent context.
+
+ Attributes:
+ type: The context type.
+ start_token: The token where this context starts.
+ end_token: The token where this context ends.
+ parent: The parent context.
+ children: The child contexts of this context, in order.
+ """
+ self.type = context_type
+ self.start_token = start_token
+ self.end_token = None
+
+ self.parent = None
+ self.children = []
+
+ if parent:
+ parent.AddChild(self)
+
+ def __repr__(self):
+ """Returns a string representation of the context object."""
+ stack = []
+ context = self
+ while context:
+ stack.append(context.type)
+ context = context.parent
+ return 'Context(%s)' % ' > '.join(stack)
+
+ def AddChild(self, child):
+ """Adds a child to this context and sets child's parent to this context.
+
+ Args:
+ child: A child EcmaContext. The child's parent will be set to this
+ context.
+ """
+
+ child.parent = self
+
+ self.children.append(child)
+ self.children.sort(EcmaContext._CompareContexts)
+
+ def GetRoot(self):
+ """Get the root context that contains this context, if any."""
+ context = self
+ while context:
+ if context.type is EcmaContext.ROOT:
+ return context
+ context = context.parent
+
+ @staticmethod
+ def _CompareContexts(context1, context2):
+ """Sorts contexts 1 and 2 by start token document position."""
+ return tokenutil.Compare(context1.start_token, context2.start_token)
+
+
+class EcmaMetaData(object):
+ """Token metadata for EcmaScript languages.
+
+ Attributes:
+ last_code: The last code token to appear before this one.
+ context: The context this token appears in.
+ operator_type: The operator type, will be one of the *_OPERATOR constants
+ defined below.
+ aliased_symbol: The full symbol being identified, as a string (e.g. an
+ 'XhrIo' alias for 'goog.net.XhrIo'). Only applicable to identifier
+ tokens. This is set in aliaspass.py and is a best guess.
+ is_alias_definition: True if the symbol is part of an alias definition.
+ If so, these symbols won't be counted towards goog.requires/provides.
+ """
+
+ UNARY_OPERATOR = 'unary'
+
+ UNARY_POST_OPERATOR = 'unary_post'
+
+ BINARY_OPERATOR = 'binary'
+
+ TERNARY_OPERATOR = 'ternary'
+
+ def __init__(self):
+ """Initializes a token metadata object."""
+ self.last_code = None
+ self.context = None
+ self.operator_type = None
+ self.is_implied_semicolon = False
+ self.is_implied_block = False
+ self.is_implied_block_close = False
+ self.aliased_symbol = None
+ self.is_alias_definition = False
+
+ def __repr__(self):
+ """Returns a string representation of the context object."""
+ parts = ['%r' % self.context]
+ if self.operator_type:
+ parts.append('optype: %r' % self.operator_type)
+ if self.is_implied_semicolon:
+ parts.append('implied;')
+ if self.aliased_symbol:
+ parts.append('alias for: %s' % self.aliased_symbol)
+ return 'MetaData(%s)' % ', '.join(parts)
+
+ def IsUnaryOperator(self):
+ return self.operator_type in (EcmaMetaData.UNARY_OPERATOR,
+ EcmaMetaData.UNARY_POST_OPERATOR)
+
+ def IsUnaryPostOperator(self):
+ return self.operator_type == EcmaMetaData.UNARY_POST_OPERATOR
+
+
+class EcmaMetaDataPass(object):
+ """A pass that iterates over all tokens and builds metadata about them."""
+
+ def __init__(self):
+ """Initialize the meta data pass object."""
+ self.Reset()
+
+ def Reset(self):
+ """Resets the metadata pass to prepare for the next file."""
+ self._token = None
+ self._context = None
+ self._AddContext(EcmaContext.ROOT)
+ self._last_code = None
+
+ def _CreateContext(self, context_type):
+ """Overridable by subclasses to create the appropriate context type."""
+ return EcmaContext(context_type, self._token, self._context)
+
+ def _CreateMetaData(self):
+ """Overridable by subclasses to create the appropriate metadata type."""
+ return EcmaMetaData()
+
+ def _AddContext(self, context_type):
+ """Adds a context of the given type to the context stack.
+
+ Args:
+ context_type: The type of context to create
+ """
+ self._context = self._CreateContext(context_type)
+
+ def _PopContext(self):
+ """Moves up one level in the context stack.
+
+ Returns:
+ The former context.
+
+ Raises:
+ ParseError: If the root context is popped.
+ """
+ top_context = self._context
+ top_context.end_token = self._token
+ self._context = top_context.parent
+ if self._context:
+ return top_context
+ else:
+ raise ParseError(self._token)
+
+ def _PopContextType(self, *stop_types):
+ """Pops the context stack until a context of the given type is popped.
+
+ Args:
+ *stop_types: The types of context to pop to - stops at the first match.
+
+ Returns:
+ The context object of the given type that was popped.
+ """
+ last = None
+ while not last or last.type not in stop_types:
+ last = self._PopContext()
+ return last
+
+ def _EndStatement(self):
+ """Process the end of a statement."""
+ self._PopContextType(EcmaContext.STATEMENT)
+ if self._context.type == EcmaContext.IMPLIED_BLOCK:
+ self._token.metadata.is_implied_block_close = True
+ self._PopContext()
+
+ def _ProcessContext(self):
+ """Process the context at the current token.
+
+ Returns:
+ The context that should be assigned to the current token, or None if
+ the current context after this method should be used.
+
+ Raises:
+ ParseError: When the token appears in an invalid context.
+ """
+ token = self._token
+ token_type = token.type
+
+ if self._context.type in EcmaContext.BLOCK_TYPES:
+ # Whenever we're in a block, we add a statement context. We make an
+ # exception for switch statements since they can only contain case: and
+ # default: and therefore don't directly contain statements.
+ # The block we add here may be immediately removed in some cases, but
+ # that causes no harm.
+ parent = self._context.parent
+ if not parent or parent.type != EcmaContext.SWITCH:
+ self._AddContext(EcmaContext.STATEMENT)
+
+ elif self._context.type == EcmaContext.ARRAY_LITERAL:
+ self._AddContext(EcmaContext.LITERAL_ELEMENT)
+
+ if token_type == TokenType.START_PAREN:
+ if self._last_code and self._last_code.IsKeyword('for'):
+ # for loops contain multiple statements in the group unlike while,
+ # switch, if, etc.
+ self._AddContext(EcmaContext.FOR_GROUP_BLOCK)
+ else:
+ self._AddContext(EcmaContext.GROUP)
+
+ elif token_type == TokenType.END_PAREN:
+ result = self._PopContextType(EcmaContext.GROUP,
+ EcmaContext.FOR_GROUP_BLOCK)
+ keyword_token = result.start_token.metadata.last_code
+ # keyword_token will not exist if the open paren is the first line of the
+ # file, for example if all code is wrapped in an immediately executed
+ # annonymous function.
+ if keyword_token and keyword_token.string in ('if', 'for', 'while'):
+ next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES)
+ if next_code.type != TokenType.START_BLOCK:
+ # Check for do-while.
+ is_do_while = False
+ pre_keyword_token = keyword_token.metadata.last_code
+ if (pre_keyword_token and
+ pre_keyword_token.type == TokenType.END_BLOCK):
+ start_block_token = pre_keyword_token.metadata.context.start_token
+ is_do_while = start_block_token.metadata.last_code.string == 'do'
+
+ # If it's not do-while, it's an implied block.
+ if not is_do_while:
+ self._AddContext(EcmaContext.IMPLIED_BLOCK)
+ token.metadata.is_implied_block = True
+
+ return result
+
+ # else (not else if) with no open brace after it should be considered the
+ # start of an implied block, similar to the case with if, for, and while
+ # above.
+ elif (token_type == TokenType.KEYWORD and
+ token.string == 'else'):
+ next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES)
+ if (next_code.type != TokenType.START_BLOCK and
+ (next_code.type != TokenType.KEYWORD or next_code.string != 'if')):
+ self._AddContext(EcmaContext.IMPLIED_BLOCK)
+ token.metadata.is_implied_block = True
+
+ elif token_type == TokenType.START_PARAMETERS:
+ self._AddContext(EcmaContext.PARAMETERS)
+
+ elif token_type == TokenType.END_PARAMETERS:
+ return self._PopContextType(EcmaContext.PARAMETERS)
+
+ elif token_type == TokenType.START_BRACKET:
+ if (self._last_code and
+ self._last_code.type in TokenType.EXPRESSION_ENDER_TYPES):
+ self._AddContext(EcmaContext.INDEX)
+ else:
+ self._AddContext(EcmaContext.ARRAY_LITERAL)
+
+ elif token_type == TokenType.END_BRACKET:
+ return self._PopContextType(EcmaContext.INDEX, EcmaContext.ARRAY_LITERAL)
+
+ elif token_type == TokenType.START_BLOCK:
+ if (self._last_code.type in (TokenType.END_PAREN,
+ TokenType.END_PARAMETERS) or
+ self._last_code.IsKeyword('else') or
+ self._last_code.IsKeyword('do') or
+ self._last_code.IsKeyword('try') or
+ self._last_code.IsKeyword('finally') or
+ (self._last_code.IsOperator(':') and
+ self._last_code.metadata.context.type == EcmaContext.CASE_BLOCK)):
+ # else, do, try, and finally all might have no () before {.
+ # Also, handle the bizzare syntax case 10: {...}.
+ self._AddContext(EcmaContext.BLOCK)
+ else:
+ self._AddContext(EcmaContext.OBJECT_LITERAL)
+
+ elif token_type == TokenType.END_BLOCK:
+ context = self._PopContextType(EcmaContext.BLOCK,
+ EcmaContext.OBJECT_LITERAL)
+ if self._context.type == EcmaContext.SWITCH:
+ # The end of the block also means the end of the switch statement it
+ # applies to.
+ return self._PopContext()
+ return context
+
+ elif token.IsKeyword('switch'):
+ self._AddContext(EcmaContext.SWITCH)
+
+ elif (token_type == TokenType.KEYWORD and
+ token.string in ('case', 'default') and
+ self._context.type != EcmaContext.OBJECT_LITERAL):
+ # Pop up to but not including the switch block.
+ while self._context.parent.type != EcmaContext.SWITCH:
+ self._PopContext()
+ if self._context.parent is None:
+ raise ParseError(token, 'Encountered case/default statement '
+ 'without switch statement')
+
+ elif token.IsOperator('?'):
+ self._AddContext(EcmaContext.TERNARY_TRUE)
+
+ elif token.IsOperator(':'):
+ if self._context.type == EcmaContext.OBJECT_LITERAL:
+ self._AddContext(EcmaContext.LITERAL_ELEMENT)
+
+ elif self._context.type == EcmaContext.TERNARY_TRUE:
+ self._PopContext()
+ self._AddContext(EcmaContext.TERNARY_FALSE)
+
+ # Handle nested ternary statements like:
+ # foo = bar ? baz ? 1 : 2 : 3
+ # When we encounter the second ":" the context is
+ # ternary_false > ternary_true > statement > root
+ elif (self._context.type == EcmaContext.TERNARY_FALSE and
+ self._context.parent.type == EcmaContext.TERNARY_TRUE):
+ self._PopContext() # Leave current ternary false context.
+ self._PopContext() # Leave current parent ternary true
+ self._AddContext(EcmaContext.TERNARY_FALSE)
+
+ elif self._context.parent.type == EcmaContext.SWITCH:
+ self._AddContext(EcmaContext.CASE_BLOCK)
+
+ elif token.IsKeyword('var'):
+ self._AddContext(EcmaContext.VAR)
+
+ elif token.IsOperator(','):
+ while self._context.type not in (EcmaContext.VAR,
+ EcmaContext.ARRAY_LITERAL,
+ EcmaContext.OBJECT_LITERAL,
+ EcmaContext.STATEMENT,
+ EcmaContext.PARAMETERS,
+ EcmaContext.GROUP):
+ self._PopContext()
+
+ elif token_type == TokenType.SEMICOLON:
+ self._EndStatement()
+
+ def Process(self, first_token):
+ """Processes the token stream starting with the given token."""
+ self._token = first_token
+ while self._token:
+ self._ProcessToken()
+
+ if self._token.IsCode():
+ self._last_code = self._token
+
+ self._token = self._token.next
+
+ try:
+ self._PopContextType(self, EcmaContext.ROOT)
+ except ParseError:
+ # Ignore the "popped to root" error.
+ pass
+
+ def _ProcessToken(self):
+ """Process the given token."""
+ token = self._token
+ token.metadata = self._CreateMetaData()
+ context = (self._ProcessContext() or self._context)
+ token.metadata.context = context
+ token.metadata.last_code = self._last_code
+
+ # Determine the operator type of the token, if applicable.
+ if token.type == TokenType.OPERATOR:
+ token.metadata.operator_type = self._GetOperatorType(token)
+
+ # Determine if there is an implied semicolon after the token.
+ if token.type != TokenType.SEMICOLON:
+ next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES)
+ # A statement like if (x) does not need a semicolon after it
+ is_implied_block = self._context == EcmaContext.IMPLIED_BLOCK
+ is_last_code_in_line = token.IsCode() and (
+ not next_code or next_code.line_number != token.line_number)
+ is_continued_operator = (token.type == TokenType.OPERATOR and
+ not token.metadata.IsUnaryPostOperator())
+ is_continued_dot = token.string == '.'
+ next_code_is_operator = next_code and next_code.type == TokenType.OPERATOR
+ is_end_of_block = (
+ token.type == TokenType.END_BLOCK and
+ token.metadata.context.type != EcmaContext.OBJECT_LITERAL)
+ is_multiline_string = token.type == TokenType.STRING_TEXT
+ is_continued_var_decl = (token.IsKeyword('var') and
+ next_code and
+ (next_code.type in [TokenType.IDENTIFIER,
+ TokenType.SIMPLE_LVALUE]) and
+ token.line_number < next_code.line_number)
+ next_code_is_block = next_code and next_code.type == TokenType.START_BLOCK
+ if (is_last_code_in_line and
+ self._StatementCouldEndInContext() and
+ not is_multiline_string and
+ not is_end_of_block and
+ not is_continued_var_decl and
+ not is_continued_operator and
+ not is_continued_dot and
+ not next_code_is_operator and
+ not is_implied_block and
+ not next_code_is_block):
+ token.metadata.is_implied_semicolon = True
+ self._EndStatement()
+
+ def _StatementCouldEndInContext(self):
+ """Returns if the current statement (if any) may end in this context."""
+ # In the basic statement or variable declaration context, statement can
+ # always end in this context.
+ if self._context.type in (EcmaContext.STATEMENT, EcmaContext.VAR):
+ return True
+
+ # End of a ternary false branch inside a statement can also be the
+ # end of the statement, for example:
+ # var x = foo ? foo.bar() : null
+ # In this case the statement ends after the null, when the context stack
+ # looks like ternary_false > var > statement > root.
+ if (self._context.type == EcmaContext.TERNARY_FALSE and
+ self._context.parent.type in (EcmaContext.STATEMENT, EcmaContext.VAR)):
+ return True
+
+ # In all other contexts like object and array literals, ternary true, etc.
+ # the statement can't yet end.
+ return False
+
+ def _GetOperatorType(self, token):
+ """Returns the operator type of the given operator token.
+
+ Args:
+ token: The token to get arity for.
+
+ Returns:
+ The type of the operator. One of the *_OPERATOR constants defined in
+ EcmaMetaData.
+ """
+ if token.string == '?':
+ return EcmaMetaData.TERNARY_OPERATOR
+
+ if token.string in TokenType.UNARY_OPERATORS:
+ return EcmaMetaData.UNARY_OPERATOR
+
+ last_code = token.metadata.last_code
+ if not last_code or last_code.type == TokenType.END_BLOCK:
+ return EcmaMetaData.UNARY_OPERATOR
+
+ if (token.string in TokenType.UNARY_POST_OPERATORS and
+ last_code.type in TokenType.EXPRESSION_ENDER_TYPES):
+ return EcmaMetaData.UNARY_POST_OPERATOR
+
+ if (token.string in TokenType.UNARY_OK_OPERATORS and
+ last_code.type not in TokenType.EXPRESSION_ENDER_TYPES and
+ last_code.string not in TokenType.UNARY_POST_OPERATORS):
+ return EcmaMetaData.UNARY_OPERATOR
+
+ return EcmaMetaData.BINARY_OPERATOR
diff --git a/tools/closure_linter/build/lib/closure_linter/error_check.py b/tools/closure_linter/build/lib/closure_linter/error_check.py
new file mode 100644
index 0000000000..8d657fe917
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/error_check.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Specific JSLint errors checker."""
+
+
+
+import gflags as flags
+
+FLAGS = flags.FLAGS
+
+
+class Rule(object):
+ """Different rules to check."""
+
+ # Documentations for specific rules goes in flag definition.
+ BLANK_LINES_AT_TOP_LEVEL = 'blank_lines_at_top_level'
+ INDENTATION = 'indentation'
+ WELL_FORMED_AUTHOR = 'well_formed_author'
+ NO_BRACES_AROUND_INHERIT_DOC = 'no_braces_around_inherit_doc'
+ BRACES_AROUND_TYPE = 'braces_around_type'
+ OPTIONAL_TYPE_MARKER = 'optional_type_marker'
+ VARIABLE_ARG_MARKER = 'variable_arg_marker'
+ UNUSED_PRIVATE_MEMBERS = 'unused_private_members'
+ UNUSED_LOCAL_VARIABLES = 'unused_local_variables'
+
+ # Rule to raise all known errors.
+ ALL = 'all'
+
+ # All rules that are to be checked when using the strict flag. E.g. the rules
+ # that are specific to the stricter Closure style.
+ CLOSURE_RULES = frozenset([BLANK_LINES_AT_TOP_LEVEL,
+ INDENTATION,
+ WELL_FORMED_AUTHOR,
+ NO_BRACES_AROUND_INHERIT_DOC,
+ BRACES_AROUND_TYPE,
+ OPTIONAL_TYPE_MARKER,
+ VARIABLE_ARG_MARKER])
+
+
+flags.DEFINE_boolean('strict', False,
+ 'Whether to validate against the stricter Closure style. '
+ 'This includes ' + (', '.join(Rule.CLOSURE_RULES)) + '.')
+flags.DEFINE_multistring('jslint_error', [],
+ 'List of specific lint errors to check. Here is a list'
+ ' of accepted values:\n'
+ ' - ' + Rule.ALL + ': enables all following errors.\n'
+ ' - ' + Rule.BLANK_LINES_AT_TOP_LEVEL + ': validates'
+ 'number of blank lines between blocks at top level.\n'
+ ' - ' + Rule.INDENTATION + ': checks correct '
+ 'indentation of code.\n'
+ ' - ' + Rule.WELL_FORMED_AUTHOR + ': validates the '
+ '@author JsDoc tags.\n'
+ ' - ' + Rule.NO_BRACES_AROUND_INHERIT_DOC + ': '
+ 'forbids braces around @inheritdoc JsDoc tags.\n'
+ ' - ' + Rule.BRACES_AROUND_TYPE + ': enforces braces '
+ 'around types in JsDoc tags.\n'
+ ' - ' + Rule.OPTIONAL_TYPE_MARKER + ': checks correct '
+ 'use of optional marker = in param types.\n'
+ ' - ' + Rule.UNUSED_PRIVATE_MEMBERS + ': checks for '
+ 'unused private variables.\n'
+ ' - ' + Rule.UNUSED_LOCAL_VARIABLES + ': checks for '
+ 'unused local variables.\n')
+
+
+def ShouldCheck(rule):
+ """Returns whether the optional rule should be checked.
+
+ Computes different flags (strict, jslint_error, jslint_noerror) to find out if
+ this specific rule should be checked.
+
+ Args:
+ rule: Name of the rule (see Rule).
+
+ Returns:
+ True if the rule should be checked according to the flags, otherwise False.
+ """
+ if rule in FLAGS.jslint_error or Rule.ALL in FLAGS.jslint_error:
+ return True
+ # Checks strict rules.
+ return FLAGS.strict and rule in Rule.CLOSURE_RULES
diff --git a/tools/closure_linter/build/lib/closure_linter/error_fixer.py b/tools/closure_linter/build/lib/closure_linter/error_fixer.py
new file mode 100644
index 0000000000..88f9c720ab
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/error_fixer.py
@@ -0,0 +1,618 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Main class responsible for automatically fixing simple style violations."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = 'robbyw@google.com (Robert Walker)'
+
+import re
+
+import gflags as flags
+from closure_linter import errors
+from closure_linter import javascriptstatetracker
+from closure_linter import javascripttokens
+from closure_linter import requireprovidesorter
+from closure_linter import tokenutil
+from closure_linter.common import errorhandler
+
+# Shorthand
+Token = javascripttokens.JavaScriptToken
+Type = javascripttokens.JavaScriptTokenType
+
+END_OF_FLAG_TYPE = re.compile(r'(}?\s*)$')
+
+# Regex to represent common mistake inverting author name and email as
+# @author User Name (user@company)
+INVERTED_AUTHOR_SPEC = re.compile(r'(?P<leading_whitespace>\s*)'
+ r'(?P<name>[^(]+)'
+ r'(?P<whitespace_after_name>\s+)'
+ r'\('
+ r'(?P<email>[^\s]+@[^)\s]+)'
+ r'\)'
+ r'(?P<trailing_characters>.*)')
+
+FLAGS = flags.FLAGS
+flags.DEFINE_boolean('disable_indentation_fixing', False,
+ 'Whether to disable automatic fixing of indentation.')
+flags.DEFINE_list('fix_error_codes', [], 'A list of specific error codes to '
+ 'fix. Defaults to all supported error codes when empty. '
+ 'See errors.py for a list of error codes.')
+
+
+class ErrorFixer(errorhandler.ErrorHandler):
+ """Object that fixes simple style errors."""
+
+ def __init__(self, external_file=None):
+ """Initialize the error fixer.
+
+ Args:
+ external_file: If included, all output will be directed to this file
+ instead of overwriting the files the errors are found in.
+ """
+ errorhandler.ErrorHandler.__init__(self)
+
+ self._file_name = None
+ self._file_token = None
+ self._external_file = external_file
+
+ try:
+ self._fix_error_codes = set([errors.ByName(error.upper()) for error in
+ FLAGS.fix_error_codes])
+ except KeyError as ke:
+ raise ValueError('Unknown error code ' + ke.args[0])
+
+ def HandleFile(self, filename, first_token):
+ """Notifies this ErrorPrinter that subsequent errors are in filename.
+
+ Args:
+ filename: The name of the file about to be checked.
+ first_token: The first token in the file.
+ """
+ self._file_name = filename
+ self._file_is_html = filename.endswith('.html') or filename.endswith('.htm')
+ self._file_token = first_token
+ self._file_fix_count = 0
+ self._file_changed_lines = set()
+
+ def _AddFix(self, tokens):
+ """Adds the fix to the internal count.
+
+ Args:
+ tokens: The token or sequence of tokens changed to fix an error.
+ """
+ self._file_fix_count += 1
+ if hasattr(tokens, 'line_number'):
+ self._file_changed_lines.add(tokens.line_number)
+ else:
+ for token in tokens:
+ self._file_changed_lines.add(token.line_number)
+
+ def _FixJsDocPipeNull(self, js_type):
+ """Change number|null or null|number to ?number.
+
+ Args:
+ js_type: The typeannotation.TypeAnnotation instance to fix.
+ """
+
+ # Recurse into all sub_types if the error was at a deeper level.
+ map(self._FixJsDocPipeNull, js_type.IterTypes())
+
+ if js_type.type_group and len(js_type.sub_types) == 2:
+ # Find and remove the null sub_type:
+ sub_type = None
+ for sub_type in js_type.sub_types:
+ if sub_type.identifier == 'null':
+ map(tokenutil.DeleteToken, sub_type.tokens)
+ self._AddFix(sub_type.tokens)
+ break
+ else:
+ return
+
+ first_token = js_type.FirstToken()
+ question_mark = Token('?', Type.DOC_TYPE_MODIFIER, first_token.line,
+ first_token.line_number)
+ tokenutil.InsertTokenBefore(question_mark, first_token)
+ js_type.tokens.insert(0, question_mark)
+ js_type.tokens.remove(sub_type)
+ js_type.or_null = True
+
+ # Now also remove the separator, which is in the parent's token list,
+ # either before or after the sub_type, there is exactly one. Scan for it.
+ for token in js_type.tokens:
+ if (token and isinstance(token, Token) and
+ token.type == Type.DOC_TYPE_MODIFIER and token.string == '|'):
+ tokenutil.DeleteToken(token)
+ self._AddFix(token)
+ break
+
+ def HandleError(self, error):
+ """Attempts to fix the error.
+
+ Args:
+ error: The error object
+ """
+ code = error.code
+ token = error.token
+
+ if self._fix_error_codes and code not in self._fix_error_codes:
+ return
+
+ if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL:
+ self._FixJsDocPipeNull(token.attached_object.jstype)
+
+ elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE:
+ iterator = token.attached_object.type_end_token
+ if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace():
+ iterator = iterator.previous
+
+ ending_space = len(iterator.string) - len(iterator.string.rstrip())
+ iterator.string = '%s=%s' % (iterator.string.rstrip(),
+ ' ' * ending_space)
+
+ # Create a new flag object with updated type info.
+ token.attached_object = javascriptstatetracker.JsDocFlag(token)
+ self._AddFix(token)
+
+ elif code == errors.JSDOC_MISSING_VAR_ARGS_TYPE:
+ iterator = token.attached_object.type_start_token
+ if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace():
+ iterator = iterator.next
+
+ starting_space = len(iterator.string) - len(iterator.string.lstrip())
+ iterator.string = '%s...%s' % (' ' * starting_space,
+ iterator.string.lstrip())
+
+ # Create a new flag object with updated type info.
+ token.attached_object = javascriptstatetracker.JsDocFlag(token)
+ self._AddFix(token)
+
+ elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION,
+ errors.MISSING_SEMICOLON):
+ semicolon_token = Token(';', Type.SEMICOLON, token.line,
+ token.line_number)
+ tokenutil.InsertTokenAfter(semicolon_token, token)
+ token.metadata.is_implied_semicolon = False
+ semicolon_token.metadata.is_implied_semicolon = False
+ self._AddFix(token)
+
+ elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
+ errors.REDUNDANT_SEMICOLON,
+ errors.COMMA_AT_END_OF_LITERAL):
+ self._DeleteToken(token)
+ self._AddFix(token)
+
+ elif code == errors.INVALID_JSDOC_TAG:
+ if token.string == '@returns':
+ token.string = '@return'
+ self._AddFix(token)
+
+ elif code == errors.FILE_MISSING_NEWLINE:
+ # This error is fixed implicitly by the way we restore the file
+ self._AddFix(token)
+
+ elif code == errors.MISSING_SPACE:
+ if error.fix_data:
+ token.string = error.fix_data
+ self._AddFix(token)
+ elif error.position:
+ if error.position.IsAtBeginning():
+ tokenutil.InsertSpaceTokenAfter(token.previous)
+ elif error.position.IsAtEnd(token.string):
+ tokenutil.InsertSpaceTokenAfter(token)
+ else:
+ token.string = error.position.Set(token.string, ' ')
+ self._AddFix(token)
+
+ elif code == errors.EXTRA_SPACE:
+ if error.position:
+ token.string = error.position.Set(token.string, '')
+ self._AddFix(token)
+
+ elif code == errors.MISSING_LINE:
+ if error.position.IsAtBeginning():
+ tokenutil.InsertBlankLineAfter(token.previous)
+ else:
+ tokenutil.InsertBlankLineAfter(token)
+ self._AddFix(token)
+
+ elif code == errors.EXTRA_LINE:
+ self._DeleteToken(token)
+ self._AddFix(token)
+
+ elif code == errors.WRONG_BLANK_LINE_COUNT:
+ if not token.previous:
+ # TODO(user): Add an insertBefore method to tokenutil.
+ return
+
+ num_lines = error.fix_data
+ should_delete = False
+
+ if num_lines < 0:
+ num_lines *= -1
+ should_delete = True
+
+ for unused_i in xrange(1, num_lines + 1):
+ if should_delete:
+ # TODO(user): DeleteToken should update line numbers.
+ self._DeleteToken(token.previous)
+ else:
+ tokenutil.InsertBlankLineAfter(token.previous)
+ self._AddFix(token)
+
+ elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING:
+ end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END)
+ if end_quote:
+ single_quote_start = Token(
+ "'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number)
+ single_quote_end = Token(
+ "'", Type.SINGLE_QUOTE_STRING_START, end_quote.line,
+ token.line_number)
+
+ tokenutil.InsertTokenAfter(single_quote_start, token)
+ tokenutil.InsertTokenAfter(single_quote_end, end_quote)
+ self._DeleteToken(token)
+ self._DeleteToken(end_quote)
+ self._AddFix([token, end_quote])
+
+ elif code == errors.MISSING_BRACES_AROUND_TYPE:
+ fixed_tokens = []
+ start_token = token.attached_object.type_start_token
+
+ if start_token.type != Type.DOC_START_BRACE:
+ leading_space = (
+ len(start_token.string) - len(start_token.string.lstrip()))
+ if leading_space:
+ start_token = tokenutil.SplitToken(start_token, leading_space)
+ # Fix case where start and end token were the same.
+ if token.attached_object.type_end_token == start_token.previous:
+ token.attached_object.type_end_token = start_token
+
+ new_token = Token('{', Type.DOC_START_BRACE, start_token.line,
+ start_token.line_number)
+ tokenutil.InsertTokenAfter(new_token, start_token.previous)
+ token.attached_object.type_start_token = new_token
+ fixed_tokens.append(new_token)
+
+ end_token = token.attached_object.type_end_token
+ if end_token.type != Type.DOC_END_BRACE:
+ # If the start token was a brace, the end token will be a
+ # FLAG_ENDING_TYPE token, if there wasn't a starting brace then
+ # the end token is the last token of the actual type.
+ last_type = end_token
+ if not fixed_tokens:
+ last_type = end_token.previous
+
+ while last_type.string.isspace():
+ last_type = last_type.previous
+
+ # If there was no starting brace then a lone end brace wouldn't have
+ # been type end token. Now that we've added any missing start brace,
+ # see if the last effective type token was an end brace.
+ if last_type.type != Type.DOC_END_BRACE:
+ trailing_space = (len(last_type.string) -
+ len(last_type.string.rstrip()))
+ if trailing_space:
+ tokenutil.SplitToken(last_type,
+ len(last_type.string) - trailing_space)
+
+ new_token = Token('}', Type.DOC_END_BRACE, last_type.line,
+ last_type.line_number)
+ tokenutil.InsertTokenAfter(new_token, last_type)
+ token.attached_object.type_end_token = new_token
+ fixed_tokens.append(new_token)
+
+ self._AddFix(fixed_tokens)
+
+ elif code == errors.LINE_STARTS_WITH_OPERATOR:
+ # Remove whitespace following the operator so the line starts clean.
+ self._StripSpace(token, before=False)
+
+ # Remove the operator.
+ tokenutil.DeleteToken(token)
+ self._AddFix(token)
+
+ insertion_point = tokenutil.GetPreviousCodeToken(token)
+
+ # Insert a space between the previous token and the new operator.
+ space = Token(' ', Type.WHITESPACE, insertion_point.line,
+ insertion_point.line_number)
+ tokenutil.InsertTokenAfter(space, insertion_point)
+
+ # Insert the operator on the end of the previous line.
+ new_token = Token(token.string, token.type, insertion_point.line,
+ insertion_point.line_number)
+ tokenutil.InsertTokenAfter(new_token, space)
+ self._AddFix(new_token)
+
+ elif code == errors.LINE_ENDS_WITH_DOT:
+ # Remove whitespace preceding the operator to remove trailing whitespace.
+ self._StripSpace(token, before=True)
+
+ # Remove the dot.
+ tokenutil.DeleteToken(token)
+ self._AddFix(token)
+
+ insertion_point = tokenutil.GetNextCodeToken(token)
+
+ # Insert the dot at the beginning of the next line of code.
+ new_token = Token(token.string, token.type, insertion_point.line,
+ insertion_point.line_number)
+ tokenutil.InsertTokenBefore(new_token, insertion_point)
+ self._AddFix(new_token)
+
+ elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED:
+ require_start_token = error.fix_data
+ sorter = requireprovidesorter.RequireProvideSorter()
+ sorter.FixRequires(require_start_token)
+
+ self._AddFix(require_start_token)
+
+ elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED:
+ provide_start_token = error.fix_data
+ sorter = requireprovidesorter.RequireProvideSorter()
+ sorter.FixProvides(provide_start_token)
+
+ self._AddFix(provide_start_token)
+
+ elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
+ if token.previous.string == '{' and token.next.string == '}':
+ self._DeleteToken(token.previous)
+ self._DeleteToken(token.next)
+ self._AddFix([token])
+
+ elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION:
+ match = INVERTED_AUTHOR_SPEC.match(token.string)
+ if match:
+ token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'),
+ match.group('email'),
+ match.group('whitespace_after_name'),
+ match.group('name'),
+ match.group('trailing_characters'))
+ self._AddFix(token)
+
+ elif (code == errors.WRONG_INDENTATION and
+ not FLAGS.disable_indentation_fixing):
+ token = tokenutil.GetFirstTokenInSameLine(token)
+ actual = error.position.start
+ expected = error.position.length
+
+ # Cases where first token is param but with leading spaces.
+ if (len(token.string.lstrip()) == len(token.string) - actual and
+ token.string.lstrip()):
+ token.string = token.string.lstrip()
+ actual = 0
+
+ if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0:
+ token.string = token.string.lstrip() + (' ' * expected)
+ self._AddFix([token])
+ else:
+ # We need to add indentation.
+ new_token = Token(' ' * expected, Type.WHITESPACE,
+ token.line, token.line_number)
+ # Note that we'll never need to add indentation at the first line,
+ # since it will always not be indented. Therefore it's safe to assume
+ # token.previous exists.
+ tokenutil.InsertTokenAfter(new_token, token.previous)
+ self._AddFix([token])
+
+ elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT,
+ errors.MISSING_END_OF_SCOPE_COMMENT]:
+ # Only fix cases where }); is found with no trailing content on the line
+ # other than a comment. Value of 'token' is set to } for this error.
+ if (token.type == Type.END_BLOCK and
+ token.next.type == Type.END_PAREN and
+ token.next.next.type == Type.SEMICOLON):
+ current_token = token.next.next.next
+ removed_tokens = []
+ while current_token and current_token.line_number == token.line_number:
+ if current_token.IsAnyType(Type.WHITESPACE,
+ Type.START_SINGLE_LINE_COMMENT,
+ Type.COMMENT):
+ removed_tokens.append(current_token)
+ current_token = current_token.next
+ else:
+ return
+
+ if removed_tokens:
+ self._DeleteTokens(removed_tokens[0], len(removed_tokens))
+
+ whitespace_token = Token(' ', Type.WHITESPACE, token.line,
+ token.line_number)
+ start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT,
+ token.line, token.line_number)
+ comment_token = Token(' goog.scope', Type.COMMENT, token.line,
+ token.line_number)
+ insertion_tokens = [whitespace_token, start_comment_token,
+ comment_token]
+
+ tokenutil.InsertTokensAfter(insertion_tokens, token.next.next)
+ self._AddFix(removed_tokens + insertion_tokens)
+
+ elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]:
+ tokens_in_line = tokenutil.GetAllTokensInSameLine(token)
+ num_delete_tokens = len(tokens_in_line)
+ # If line being deleted is preceded and succeed with blank lines then
+ # delete one blank line also.
+ if (tokens_in_line[0].previous and tokens_in_line[-1].next
+ and tokens_in_line[0].previous.type == Type.BLANK_LINE
+ and tokens_in_line[-1].next.type == Type.BLANK_LINE):
+ num_delete_tokens += 1
+ self._DeleteTokens(tokens_in_line[0], num_delete_tokens)
+ self._AddFix(tokens_in_line)
+
+ elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]:
+ missing_namespaces = error.fix_data[0]
+ need_blank_line = error.fix_data[1] or (not token.previous)
+
+ insert_location = Token('', Type.NORMAL, '', token.line_number - 1)
+ dummy_first_token = insert_location
+ tokenutil.InsertTokenBefore(insert_location, token)
+
+ # If inserting a blank line check blank line does not exist before
+ # token to avoid extra blank lines.
+ if (need_blank_line and insert_location.previous
+ and insert_location.previous.type != Type.BLANK_LINE):
+ tokenutil.InsertBlankLineAfter(insert_location)
+ insert_location = insert_location.next
+
+ for missing_namespace in missing_namespaces:
+ new_tokens = self._GetNewRequireOrProvideTokens(
+ code == errors.MISSING_GOOG_PROVIDE,
+ missing_namespace, insert_location.line_number + 1)
+ tokenutil.InsertLineAfter(insert_location, new_tokens)
+ insert_location = new_tokens[-1]
+ self._AddFix(new_tokens)
+
+ # If inserting a blank line check blank line does not exist after
+ # token to avoid extra blank lines.
+ if (need_blank_line and insert_location.next
+ and insert_location.next.type != Type.BLANK_LINE):
+ tokenutil.InsertBlankLineAfter(insert_location)
+
+ tokenutil.DeleteToken(dummy_first_token)
+
+ def _StripSpace(self, token, before):
+ """Strip whitespace tokens either preceding or following the given token.
+
+ Args:
+ token: The token.
+ before: If true, strip space before the token, if false, after it.
+ """
+ token = token.previous if before else token.next
+ while token and token.type == Type.WHITESPACE:
+ tokenutil.DeleteToken(token)
+ token = token.previous if before else token.next
+
+ def _GetNewRequireOrProvideTokens(self, is_provide, namespace, line_number):
+ """Returns a list of tokens to create a goog.require/provide statement.
+
+ Args:
+ is_provide: True if getting tokens for a provide, False for require.
+ namespace: The required or provided namespaces to get tokens for.
+ line_number: The line number the new require or provide statement will be
+ on.
+
+ Returns:
+ Tokens to create a new goog.require or goog.provide statement.
+ """
+ string = 'goog.require'
+ if is_provide:
+ string = 'goog.provide'
+ line_text = string + '(\'' + namespace + '\');\n'
+ return [
+ Token(string, Type.IDENTIFIER, line_text, line_number),
+ Token('(', Type.START_PAREN, line_text, line_number),
+ Token('\'', Type.SINGLE_QUOTE_STRING_START, line_text, line_number),
+ Token(namespace, Type.STRING_TEXT, line_text, line_number),
+ Token('\'', Type.SINGLE_QUOTE_STRING_END, line_text, line_number),
+ Token(')', Type.END_PAREN, line_text, line_number),
+ Token(';', Type.SEMICOLON, line_text, line_number)
+ ]
+
+ def _DeleteToken(self, token):
+ """Deletes the specified token from the linked list of tokens.
+
+ Updates instance variables pointing to tokens such as _file_token if
+ they reference the deleted token.
+
+ Args:
+ token: The token to delete.
+ """
+ if token == self._file_token:
+ self._file_token = token.next
+
+ tokenutil.DeleteToken(token)
+
+ def _DeleteTokens(self, token, token_count):
+ """Deletes the given number of tokens starting with the given token.
+
+ Updates instance variables pointing to tokens such as _file_token if
+ they reference the deleted token.
+
+ Args:
+ token: The first token to delete.
+ token_count: The total number of tokens to delete.
+ """
+ if token == self._file_token:
+ for unused_i in xrange(token_count):
+ self._file_token = self._file_token.next
+
+ tokenutil.DeleteTokens(token, token_count)
+
+ def FinishFile(self):
+ """Called when the current file has finished style checking.
+
+ Used to go back and fix any errors in the file. It currently supports both
+ js and html files. For js files it does a simple dump of all tokens, but in
+ order to support html file, we need to merge the original file with the new
+ token set back together. This works because the tokenized html file is the
+ original html file with all non js lines kept but blanked out with one blank
+ line token per line of html.
+ """
+ if self._file_fix_count:
+ # Get the original file content for html.
+ if self._file_is_html:
+ f = open(self._file_name, 'r')
+ original_lines = f.readlines()
+ f.close()
+
+ f = self._external_file
+ if not f:
+ error_noun = 'error' if self._file_fix_count == 1 else 'errors'
+ print 'Fixed %d %s in %s' % (
+ self._file_fix_count, error_noun, self._file_name)
+ f = open(self._file_name, 'w')
+
+ token = self._file_token
+ # Finding the first not deleted token.
+ while token.is_deleted:
+ token = token.next
+ # If something got inserted before first token (e.g. due to sorting)
+ # then move to start. Bug 8398202.
+ while token.previous:
+ token = token.previous
+ char_count = 0
+ line = ''
+ while token:
+ line += token.string
+ char_count += len(token.string)
+
+ if token.IsLastInLine():
+ # We distinguish if a blank line in html was from stripped original
+ # file or newly added error fix by looking at the "org_line_number"
+ # field on the token. It is only set in the tokenizer, so for all
+ # error fixes, the value should be None.
+ if (line or not self._file_is_html or
+ token.orig_line_number is None):
+ f.write(line)
+ f.write('\n')
+ else:
+ f.write(original_lines[token.orig_line_number - 1])
+ line = ''
+ if char_count > 80 and token.line_number in self._file_changed_lines:
+ print 'WARNING: Line %d of %s is now longer than 80 characters.' % (
+ token.line_number, self._file_name)
+
+ char_count = 0
+
+ token = token.next
+
+ if not self._external_file:
+ # Close the file if we created it
+ f.close()
diff --git a/tools/closure_linter/build/lib/closure_linter/error_fixer_test.py b/tools/closure_linter/build/lib/closure_linter/error_fixer_test.py
new file mode 100644
index 0000000000..49f449de42
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/error_fixer_test.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the error_fixer module."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+
+
+import unittest as googletest
+from closure_linter import error_fixer
+from closure_linter import testutil
+
+
+class ErrorFixerTest(googletest.TestCase):
+ """Unit tests for error_fixer."""
+
+ def setUp(self):
+ self.error_fixer = error_fixer.ErrorFixer()
+
+ def testDeleteToken(self):
+ start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
+ second_token = start_token.next
+ self.error_fixer.HandleFile('test_file', start_token)
+
+ self.error_fixer._DeleteToken(start_token)
+
+ self.assertEqual(second_token, self.error_fixer._file_token)
+
+ def testDeleteTokens(self):
+ start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
+ fourth_token = start_token.next.next.next
+ self.error_fixer.HandleFile('test_file', start_token)
+
+ self.error_fixer._DeleteTokens(start_token, 3)
+
+ self.assertEqual(fourth_token, self.error_fixer._file_token)
+
+_TEST_SCRIPT = """\
+var x = 3;
+"""
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/errorrecord.py b/tools/closure_linter/build/lib/closure_linter/errorrecord.py
new file mode 100644
index 0000000000..ce9fb908c7
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/errorrecord.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""A simple, pickle-serializable class to represent a lint error."""
+
+__author__ = 'nnaze@google.com (Nathan Naze)'
+
+import gflags as flags
+
+from closure_linter import errors
+from closure_linter.common import erroroutput
+
+FLAGS = flags.FLAGS
+
+
+class ErrorRecord(object):
+ """Record-keeping struct that can be serialized back from a process.
+
+ Attributes:
+ path: Path to the file.
+ error_string: Error string for the user.
+ new_error: Whether this is a "new error" (see errors.NEW_ERRORS).
+ """
+
+ def __init__(self, path, error_string, new_error):
+ self.path = path
+ self.error_string = error_string
+ self.new_error = new_error
+
+
+def MakeErrorRecord(path, error):
+ """Make an error record with correctly formatted error string.
+
+ Errors are not able to be serialized (pickled) over processes because of
+ their pointers to the complex token/context graph. We use an intermediary
+ serializable class to pass back just the relevant information.
+
+ Args:
+ path: Path of file the error was found in.
+ error: An error.Error instance.
+
+ Returns:
+ _ErrorRecord instance.
+ """
+ new_error = error.code in errors.NEW_ERRORS
+
+ if FLAGS.unix_mode:
+ error_string = erroroutput.GetUnixErrorOutput(
+ path, error, new_error=new_error)
+ else:
+ error_string = erroroutput.GetErrorOutput(error, new_error=new_error)
+
+ return ErrorRecord(path, error_string, new_error)
diff --git a/tools/closure_linter/build/lib/closure_linter/errorrules.py b/tools/closure_linter/build/lib/closure_linter/errorrules.py
new file mode 100644
index 0000000000..b1b72aab6d
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/errorrules.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+#
+# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Linter error rules class for Closure Linter."""
+
+__author__ = 'robbyw@google.com (Robert Walker)'
+
+import gflags as flags
+from closure_linter import errors
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_boolean('jsdoc', True,
+ 'Whether to report errors for missing JsDoc.')
+flags.DEFINE_list('disable', None,
+ 'Disable specific error. Usage Ex.: gjslint --disable 1,'
+ '0011 foo.js.')
+flags.DEFINE_integer('max_line_length', 80, 'Maximum line length allowed '
+ 'without warning.', lower_bound=1)
+
+disabled_error_nums = None
+
+
+def GetMaxLineLength():
+ """Returns allowed maximum length of line.
+
+ Returns:
+ Length of line allowed without any warning.
+ """
+ return FLAGS.max_line_length
+
+
+def ShouldReportError(error):
+ """Whether the given error should be reported.
+
+ Returns:
+ True for all errors except missing documentation errors and disabled
+ errors. For missing documentation, it returns the value of the
+ jsdoc flag.
+ """
+ global disabled_error_nums
+ if disabled_error_nums is None:
+ disabled_error_nums = []
+ if FLAGS.disable:
+ for error_str in FLAGS.disable:
+ error_num = 0
+ try:
+ error_num = int(error_str)
+ except ValueError:
+ pass
+ disabled_error_nums.append(error_num)
+
+ return ((FLAGS.jsdoc or error not in (
+ errors.MISSING_PARAMETER_DOCUMENTATION,
+ errors.MISSING_RETURN_DOCUMENTATION,
+ errors.MISSING_MEMBER_DOCUMENTATION,
+ errors.MISSING_PRIVATE,
+ errors.MISSING_JSDOC_TAG_THIS)) and
+ (not FLAGS.disable or error not in disabled_error_nums))
diff --git a/tools/closure_linter/build/lib/closure_linter/errorrules_test.py b/tools/closure_linter/build/lib/closure_linter/errorrules_test.py
new file mode 100644
index 0000000000..cb903785e6
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/errorrules_test.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+# Copyright 2013 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Medium tests for the gjslint errorrules.
+
+Currently its just verifying that warnings can't be disabled.
+"""
+
+
+
+import gflags as flags
+import unittest as googletest
+
+from closure_linter import errors
+from closure_linter import runner
+from closure_linter.common import erroraccumulator
+
+flags.FLAGS.strict = True
+flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
+flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
+
+
+class ErrorRulesTest(googletest.TestCase):
+ """Test case to for gjslint errorrules."""
+
+ def testNoMaxLineLengthFlagExists(self):
+ """Tests that --max_line_length flag does not exists."""
+ self.assertTrue('max_line_length' not in flags.FLAGS.FlagDict())
+
+ def testGetMaxLineLength(self):
+ """Tests warning are reported for line greater than 80.
+ """
+
+ # One line > 100 and one line > 80 and < 100. So should produce two
+ # line too long error.
+ original = [
+ 'goog.require(\'dummy.aa\');',
+ '',
+ 'function a() {',
+ ' dummy.aa.i = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13'
+ ' + 14 + 15 + 16 + 17 + 18 + 19 + 20;',
+ ' dummy.aa.j = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13'
+ ' + 14 + 15 + 16 + 17 + 18;',
+ '}',
+ ''
+ ]
+
+ # Expect line too long.
+ expected = [errors.LINE_TOO_LONG, errors.LINE_TOO_LONG]
+
+ self._AssertErrors(original, expected)
+
+ def testNoDisableFlagExists(self):
+ """Tests that --disable flag does not exists."""
+ self.assertTrue('disable' not in flags.FLAGS.FlagDict())
+
+ def testWarningsNotDisabled(self):
+ """Tests warnings are reported when nothing is disabled.
+ """
+ original = [
+ 'goog.require(\'dummy.aa\');',
+ 'goog.require(\'dummy.Cc\');',
+ 'goog.require(\'dummy.Dd\');',
+ '',
+ 'function a() {',
+ ' dummy.aa.i = 1;',
+ ' dummy.Cc.i = 1;',
+ ' dummy.Dd.i = 1;',
+ '}',
+ ]
+
+ expected = [errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
+ errors.FILE_MISSING_NEWLINE]
+
+ self._AssertErrors(original, expected)
+
+ def _AssertErrors(self, original, expected_errors, include_header=True):
+ """Asserts that the error fixer corrects original to expected."""
+ if include_header:
+ original = self._GetHeader() + original
+
+ # Trap gjslint's output parse it to get messages added.
+ error_accumulator = erroraccumulator.ErrorAccumulator()
+ runner.Run('testing.js', error_accumulator, source=original)
+ error_nums = [e.code for e in error_accumulator.GetErrors()]
+
+ error_nums.sort()
+ expected_errors.sort()
+ self.assertListEqual(error_nums, expected_errors)
+
+ def _GetHeader(self):
+ """Returns a fake header for a JavaScript file."""
+ return [
+ '// Copyright 2011 Google Inc. All Rights Reserved.',
+ '',
+ '/**',
+ ' * @fileoverview Fake file overview.',
+ ' * @author fake@google.com (Fake Person)',
+ ' */',
+ ''
+ ]
+
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/errors.py b/tools/closure_linter/build/lib/closure_linter/errors.py
new file mode 100644
index 0000000000..356ee0c5a6
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/errors.py
@@ -0,0 +1,154 @@
+#!/usr/bin/env python
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Error codes for JavaScript style checker."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+
+def ByName(name):
+ """Get the error code for the given error name.
+
+ Args:
+ name: The name of the error
+
+ Returns:
+ The error code
+ """
+ return globals()[name]
+
+
+# "File-fatal" errors - these errors stop further parsing of a single file
+FILE_NOT_FOUND = -1
+FILE_DOES_NOT_PARSE = -2
+
+# Spacing
+EXTRA_SPACE = 1
+MISSING_SPACE = 2
+EXTRA_LINE = 3
+MISSING_LINE = 4
+ILLEGAL_TAB = 5
+WRONG_INDENTATION = 6
+WRONG_BLANK_LINE_COUNT = 7
+
+# Semicolons
+MISSING_SEMICOLON = 10
+MISSING_SEMICOLON_AFTER_FUNCTION = 11
+ILLEGAL_SEMICOLON_AFTER_FUNCTION = 12
+REDUNDANT_SEMICOLON = 13
+
+# Miscellaneous
+ILLEGAL_PROTOTYPE_MEMBER_VALUE = 100
+LINE_TOO_LONG = 110
+LINE_STARTS_WITH_OPERATOR = 120
+COMMA_AT_END_OF_LITERAL = 121
+LINE_ENDS_WITH_DOT = 122
+MULTI_LINE_STRING = 130
+UNNECESSARY_DOUBLE_QUOTED_STRING = 131
+UNUSED_PRIVATE_MEMBER = 132
+UNUSED_LOCAL_VARIABLE = 133
+
+# Requires, provides
+GOOG_REQUIRES_NOT_ALPHABETIZED = 140
+GOOG_PROVIDES_NOT_ALPHABETIZED = 141
+MISSING_GOOG_REQUIRE = 142
+MISSING_GOOG_PROVIDE = 143
+EXTRA_GOOG_REQUIRE = 144
+EXTRA_GOOG_PROVIDE = 145
+ALIAS_STMT_NEEDS_GOOG_REQUIRE = 146
+
+# JsDoc
+INVALID_JSDOC_TAG = 200
+INVALID_USE_OF_DESC_TAG = 201
+NO_BUG_NUMBER_AFTER_BUG_TAG = 202
+MISSING_PARAMETER_DOCUMENTATION = 210
+EXTRA_PARAMETER_DOCUMENTATION = 211
+WRONG_PARAMETER_DOCUMENTATION = 212
+MISSING_JSDOC_TAG_TYPE = 213
+MISSING_JSDOC_TAG_DESCRIPTION = 214
+MISSING_JSDOC_PARAM_NAME = 215
+OUT_OF_ORDER_JSDOC_TAG_TYPE = 216
+MISSING_RETURN_DOCUMENTATION = 217
+UNNECESSARY_RETURN_DOCUMENTATION = 218
+MISSING_BRACES_AROUND_TYPE = 219
+MISSING_MEMBER_DOCUMENTATION = 220
+MISSING_PRIVATE = 221
+EXTRA_PRIVATE = 222
+INVALID_OVERRIDE_PRIVATE = 223
+INVALID_INHERIT_DOC_PRIVATE = 224
+MISSING_JSDOC_TAG_THIS = 225
+UNNECESSARY_BRACES_AROUND_INHERIT_DOC = 226
+INVALID_AUTHOR_TAG_DESCRIPTION = 227
+JSDOC_PREFER_QUESTION_TO_PIPE_NULL = 230
+JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231
+JSDOC_MISSING_OPTIONAL_TYPE = 232
+JSDOC_MISSING_OPTIONAL_PREFIX = 233
+JSDOC_MISSING_VAR_ARGS_TYPE = 234
+JSDOC_MISSING_VAR_ARGS_NAME = 235
+JSDOC_DOES_NOT_PARSE = 236
+# TODO(robbyw): Split this in to more specific syntax problems.
+INCORRECT_SUPPRESS_SYNTAX = 250
+INVALID_SUPPRESS_TYPE = 251
+UNNECESSARY_SUPPRESS = 252
+
+# File ending
+FILE_MISSING_NEWLINE = 300
+FILE_IN_BLOCK = 301
+
+# Interfaces
+INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS = 400
+INTERFACE_METHOD_CANNOT_HAVE_CODE = 401
+
+# Comments
+MISSING_END_OF_SCOPE_COMMENT = 500
+MALFORMED_END_OF_SCOPE_COMMENT = 501
+
+# goog.scope - Namespace aliasing
+# TODO(nnaze) Add additional errors here and in aliaspass.py
+INVALID_USE_OF_GOOG_SCOPE = 600
+EXTRA_GOOG_SCOPE_USAGE = 601
+
+# ActionScript specific errors:
+# TODO(user): move these errors to their own file and move all JavaScript
+# specific errors to their own file as well.
+# All ActionScript specific errors should have error number at least 1000.
+FUNCTION_MISSING_RETURN_TYPE = 1132
+PARAMETER_MISSING_TYPE = 1133
+VAR_MISSING_TYPE = 1134
+PARAMETER_MISSING_DEFAULT_VALUE = 1135
+IMPORTS_NOT_ALPHABETIZED = 1140
+IMPORT_CONTAINS_WILDCARD = 1141
+UNUSED_IMPORT = 1142
+INVALID_TRACE_SEVERITY_LEVEL = 1250
+MISSING_TRACE_SEVERITY_LEVEL = 1251
+MISSING_TRACE_MESSAGE = 1252
+REMOVE_TRACE_BEFORE_SUBMIT = 1253
+REMOVE_COMMENT_BEFORE_SUBMIT = 1254
+# End of list of ActionScript specific errors.
+
+NEW_ERRORS = frozenset([
+ # Errors added after 2.0.2:
+ WRONG_INDENTATION,
+ MISSING_SEMICOLON,
+ # Errors added after 2.3.9:
+ JSDOC_MISSING_VAR_ARGS_TYPE,
+ JSDOC_MISSING_VAR_ARGS_NAME,
+ # Errors added after 2.3.15:
+ ALIAS_STMT_NEEDS_GOOG_REQUIRE,
+ JSDOC_DOES_NOT_PARSE,
+ LINE_ENDS_WITH_DOT,
+ # Errors added after 2.3.17:
+ ])
diff --git a/tools/closure_linter/build/lib/closure_linter/fixjsstyle.py b/tools/closure_linter/build/lib/closure_linter/fixjsstyle.py
new file mode 100644
index 0000000000..2d65e0398f
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/fixjsstyle.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Automatically fix simple style guide violations."""
+
+__author__ = 'robbyw@google.com (Robert Walker)'
+
+import StringIO
+import sys
+
+import gflags as flags
+
+from closure_linter import error_fixer
+from closure_linter import runner
+from closure_linter.common import simplefileflags as fileflags
+
+FLAGS = flags.FLAGS
+flags.DEFINE_list('additional_extensions', None, 'List of additional file '
+ 'extensions (not js) that should be treated as '
+ 'JavaScript files.')
+flags.DEFINE_boolean('dry_run', False, 'Do not modify the file, only print it.')
+
+
+def main(argv=None):
+ """Main function.
+
+ Args:
+ argv: Sequence of command line arguments.
+ """
+ if argv is None:
+ argv = flags.FLAGS(sys.argv)
+
+ suffixes = ['.js']
+ if FLAGS.additional_extensions:
+ suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
+
+ files = fileflags.GetFileList(argv, 'JavaScript', suffixes)
+
+ output_buffer = None
+ if FLAGS.dry_run:
+ output_buffer = StringIO.StringIO()
+
+ fixer = error_fixer.ErrorFixer(output_buffer)
+
+ # Check the list of files.
+ for filename in files:
+ runner.Run(filename, fixer)
+ if FLAGS.dry_run:
+ print output_buffer.getvalue()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/closure_linter/build/lib/closure_linter/fixjsstyle_test.py b/tools/closure_linter/build/lib/closure_linter/fixjsstyle_test.py
new file mode 100644
index 0000000000..34de3f8488
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/fixjsstyle_test.py
@@ -0,0 +1,615 @@
+#!/usr/bin/env python
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Medium tests for the gpylint auto-fixer."""
+
+__author__ = 'robbyw@google.com (Robby Walker)'
+
+import StringIO
+
+import gflags as flags
+import unittest as googletest
+from closure_linter import error_fixer
+from closure_linter import runner
+
+
+_RESOURCE_PREFIX = 'closure_linter/testdata'
+
+flags.FLAGS.strict = True
+flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
+flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
+
+
+class FixJsStyleTest(googletest.TestCase):
+ """Test case to for gjslint auto-fixing."""
+
+ def setUp(self):
+ flags.FLAGS.dot_on_next_line = True
+
+ def tearDown(self):
+ flags.FLAGS.dot_on_next_line = False
+
+ def testFixJsStyle(self):
+ test_cases = [
+ ['fixjsstyle.in.js', 'fixjsstyle.out.js'],
+ ['indentation.js', 'fixjsstyle.indentation.out.js'],
+ ['fixjsstyle.html.in.html', 'fixjsstyle.html.out.html'],
+ ['fixjsstyle.oplineend.in.js', 'fixjsstyle.oplineend.out.js']]
+ for [running_input_file, running_output_file] in test_cases:
+ print 'Checking %s vs %s' % (running_input_file, running_output_file)
+ input_filename = None
+ golden_filename = None
+ current_filename = None
+ try:
+ input_filename = '%s/%s' % (_RESOURCE_PREFIX, running_input_file)
+ current_filename = input_filename
+
+ golden_filename = '%s/%s' % (_RESOURCE_PREFIX, running_output_file)
+ current_filename = golden_filename
+ except IOError as ex:
+ raise IOError('Could not find testdata resource for %s: %s' %
+ (current_filename, ex))
+
+ if running_input_file == 'fixjsstyle.in.js':
+ with open(input_filename) as f:
+ for line in f:
+ # Go to last line.
+ pass
+ self.assertTrue(line == line.rstrip(), '%s file should not end '
+ 'with a new line.' % (input_filename))
+
+ # Autofix the file, sending output to a fake file.
+ actual = StringIO.StringIO()
+ runner.Run(input_filename, error_fixer.ErrorFixer(actual))
+
+ # Now compare the files.
+ actual.seek(0)
+ expected = open(golden_filename, 'r')
+
+ # Uncomment to generate new golden files and run
+ # open('/'.join(golden_filename.split('/')[4:]), 'w').write(actual.read())
+ # actual.seek(0)
+
+ self.assertEqual(actual.readlines(), expected.readlines())
+
+ def testAddProvideFirstLine(self):
+ """Tests handling of case where goog.provide is added."""
+ original = [
+ 'dummy.bb.cc = 1;',
+ ]
+
+ expected = [
+ 'goog.provide(\'dummy.bb\');',
+ '',
+ 'dummy.bb.cc = 1;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ original = [
+ '',
+ 'dummy.bb.cc = 1;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testAddRequireFirstLine(self):
+ """Tests handling of case where goog.require is added."""
+ original = [
+ 'a = dummy.bb.cc;',
+ ]
+
+ expected = [
+ 'goog.require(\'dummy.bb\');',
+ '',
+ 'a = dummy.bb.cc;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ original = [
+ '',
+ 'a = dummy.bb.cc;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testDeleteProvideAndAddProvideFirstLine(self):
+ """Tests handling of case where goog.provide is deleted and added.
+
+ Bug 14832597.
+ """
+ original = [
+ 'goog.provide(\'dummy.aa\');',
+ '',
+ 'dummy.bb.cc = 1;',
+ ]
+
+ expected = [
+ 'goog.provide(\'dummy.bb\');',
+ '',
+ 'dummy.bb.cc = 1;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ original = [
+ 'goog.provide(\'dummy.aa\');',
+ 'dummy.bb.cc = 1;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testDeleteProvideAndAddRequireFirstLine(self):
+ """Tests handling where goog.provide is deleted and goog.require added.
+
+ Bug 14832597.
+ """
+ original = [
+ 'goog.provide(\'dummy.aa\');',
+ '',
+ 'a = dummy.bb.cc;',
+ ]
+
+ expected = [
+ 'goog.require(\'dummy.bb\');',
+ '',
+ 'a = dummy.bb.cc;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ original = [
+ 'goog.provide(\'dummy.aa\');',
+ 'a = dummy.bb.cc;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testDeleteRequireAndAddRequireFirstLine(self):
+ """Tests handling of case where goog.require is deleted and added.
+
+ Bug 14832597.
+ """
+ original = [
+ 'goog.require(\'dummy.aa\');',
+ '',
+ 'a = dummy.bb.cc;',
+ ]
+
+ expected = [
+ 'goog.require(\'dummy.bb\');',
+ '',
+ 'a = dummy.bb.cc;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ original = [
+ 'goog.require(\'dummy.aa\');',
+ 'a = dummy.bb.cc;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testDeleteRequireAndAddProvideFirstLine(self):
+ """Tests handling where goog.require is deleted and goog.provide added.
+
+ Bug 14832597.
+ """
+ original = [
+ 'goog.require(\'dummy.aa\');',
+ '',
+ 'dummy.bb.cc = 1;',
+ ]
+
+ expected = [
+ 'goog.provide(\'dummy.bb\');',
+ '',
+ 'dummy.bb.cc = 1;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ original = [
+ 'goog.require(\'dummy.aa\');',
+ 'dummy.bb.cc = 1;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testMultipleProvideInsert(self):
+ original = [
+ 'goog.provide(\'dummy.bb\');',
+ 'goog.provide(\'dummy.dd\');',
+ '',
+ 'dummy.aa.ff = 1;',
+ 'dummy.bb.ff = 1;',
+ 'dummy.cc.ff = 1;',
+ 'dummy.dd.ff = 1;',
+ 'dummy.ee.ff = 1;',
+ ]
+
+ expected = [
+ 'goog.provide(\'dummy.aa\');',
+ 'goog.provide(\'dummy.bb\');',
+ 'goog.provide(\'dummy.cc\');',
+ 'goog.provide(\'dummy.dd\');',
+ 'goog.provide(\'dummy.ee\');',
+ '',
+ 'dummy.aa.ff = 1;',
+ 'dummy.bb.ff = 1;',
+ 'dummy.cc.ff = 1;',
+ 'dummy.dd.ff = 1;',
+ 'dummy.ee.ff = 1;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testMultipleRequireInsert(self):
+ original = [
+ 'goog.require(\'dummy.bb\');',
+ 'goog.require(\'dummy.dd\');',
+ '',
+ 'a = dummy.aa.ff;',
+ 'b = dummy.bb.ff;',
+ 'c = dummy.cc.ff;',
+ 'd = dummy.dd.ff;',
+ 'e = dummy.ee.ff;',
+ ]
+
+ expected = [
+ 'goog.require(\'dummy.aa\');',
+ 'goog.require(\'dummy.bb\');',
+ 'goog.require(\'dummy.cc\');',
+ 'goog.require(\'dummy.dd\');',
+ 'goog.require(\'dummy.ee\');',
+ '',
+ 'a = dummy.aa.ff;',
+ 'b = dummy.bb.ff;',
+ 'c = dummy.cc.ff;',
+ 'd = dummy.dd.ff;',
+ 'e = dummy.ee.ff;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testUnsortedRequires(self):
+ """Tests handling of unsorted goog.require statements without header.
+
+ Bug 8398202.
+ """
+ original = [
+ 'goog.require(\'dummy.aa\');',
+ 'goog.require(\'dummy.Cc\');',
+ 'goog.require(\'dummy.Dd\');',
+ '',
+ 'function a() {',
+ ' dummy.aa.i = 1;',
+ ' dummy.Cc.i = 1;',
+ ' dummy.Dd.i = 1;',
+ '}',
+ ]
+
+ expected = [
+ 'goog.require(\'dummy.Cc\');',
+ 'goog.require(\'dummy.Dd\');',
+ 'goog.require(\'dummy.aa\');',
+ '',
+ 'function a() {',
+ ' dummy.aa.i = 1;',
+ ' dummy.Cc.i = 1;',
+ ' dummy.Dd.i = 1;',
+ '}',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testMissingExtraAndUnsortedRequires(self):
+ """Tests handling of missing extra and unsorted goog.require statements."""
+ original = [
+ 'goog.require(\'dummy.aa\');',
+ 'goog.require(\'dummy.Cc\');',
+ 'goog.require(\'dummy.Dd\');',
+ '',
+ 'var x = new dummy.Bb();',
+ 'dummy.Cc.someMethod();',
+ 'dummy.aa.someMethod();',
+ ]
+
+ expected = [
+ 'goog.require(\'dummy.Bb\');',
+ 'goog.require(\'dummy.Cc\');',
+ 'goog.require(\'dummy.aa\');',
+ '',
+ 'var x = new dummy.Bb();',
+ 'dummy.Cc.someMethod();',
+ 'dummy.aa.someMethod();',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testExtraRequireOnFirstLine(self):
+ """Tests handling of extra goog.require statement on the first line.
+
+ There was a bug when fixjsstyle quits with an exception. It happened if
+ - the first line of the file is an extra goog.require() statement,
+ - goog.require() statements are not sorted.
+ """
+ original = [
+ 'goog.require(\'dummy.aa\');',
+ 'goog.require(\'dummy.cc\');',
+ 'goog.require(\'dummy.bb\');',
+ '',
+ 'var x = new dummy.bb();',
+ 'var y = new dummy.cc();',
+ ]
+
+ expected = [
+ 'goog.require(\'dummy.bb\');',
+ 'goog.require(\'dummy.cc\');',
+ '',
+ 'var x = new dummy.bb();',
+ 'var y = new dummy.cc();',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testUnsortedProvides(self):
+ """Tests handling of unsorted goog.provide statements without header.
+
+ Bug 8398202.
+ """
+ original = [
+ 'goog.provide(\'dummy.aa\');',
+ 'goog.provide(\'dummy.Cc\');',
+ 'goog.provide(\'dummy.Dd\');',
+ '',
+ 'dummy.aa = function() {};'
+ 'dummy.Cc = function() {};'
+ 'dummy.Dd = function() {};'
+ ]
+
+ expected = [
+ 'goog.provide(\'dummy.Cc\');',
+ 'goog.provide(\'dummy.Dd\');',
+ 'goog.provide(\'dummy.aa\');',
+ '',
+ 'dummy.aa = function() {};'
+ 'dummy.Cc = function() {};'
+ 'dummy.Dd = function() {};'
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testMissingExtraAndUnsortedProvides(self):
+ """Tests handling of missing extra and unsorted goog.provide statements."""
+ original = [
+ 'goog.provide(\'dummy.aa\');',
+ 'goog.provide(\'dummy.Cc\');',
+ 'goog.provide(\'dummy.Dd\');',
+ '',
+ 'dummy.Cc = function() {};',
+ 'dummy.Bb = function() {};',
+ 'dummy.aa.someMethod = function();',
+ ]
+
+ expected = [
+ 'goog.provide(\'dummy.Bb\');',
+ 'goog.provide(\'dummy.Cc\');',
+ 'goog.provide(\'dummy.aa\');',
+ '',
+ 'dummy.Cc = function() {};',
+ 'dummy.Bb = function() {};',
+ 'dummy.aa.someMethod = function();',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testNoRequires(self):
+ """Tests positioning of missing requires without existing requires."""
+ original = [
+ 'goog.provide(\'dummy.Something\');',
+ '',
+ 'dummy.Something = function() {};',
+ '',
+ 'var x = new dummy.Bb();',
+ ]
+
+ expected = [
+ 'goog.provide(\'dummy.Something\');',
+ '',
+ 'goog.require(\'dummy.Bb\');',
+ '',
+ 'dummy.Something = function() {};',
+ '',
+ 'var x = new dummy.Bb();',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testNoProvides(self):
+ """Tests positioning of missing provides without existing provides."""
+ original = [
+ 'goog.require(\'dummy.Bb\');',
+ '',
+ 'dummy.Something = function() {};',
+ '',
+ 'var x = new dummy.Bb();',
+ ]
+
+ expected = [
+ 'goog.provide(\'dummy.Something\');',
+ '',
+ 'goog.require(\'dummy.Bb\');',
+ '',
+ 'dummy.Something = function() {};',
+ '',
+ 'var x = new dummy.Bb();',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testOutputOkayWhenFirstTokenIsDeleted(self):
+ """Tests that autofix output is is correct when first token is deleted.
+
+ Regression test for bug 4581567
+ """
+ original = ['"use strict";']
+ expected = ["'use strict';"]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testGoogScopeIndentation(self):
+ """Tests Handling a typical end-of-scope indentation fix."""
+ original = [
+ 'goog.scope(function() {',
+ ' // TODO(brain): Take over the world.',
+ '}); // goog.scope',
+ ]
+
+ expected = [
+ 'goog.scope(function() {',
+ '// TODO(brain): Take over the world.',
+ '}); // goog.scope',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testMissingEndOfScopeComment(self):
+ """Tests Handling a missing comment at end of goog.scope."""
+ original = [
+ 'goog.scope(function() {',
+ '});',
+ ]
+
+ expected = [
+ 'goog.scope(function() {',
+ '}); // goog.scope',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testMissingEndOfScopeCommentWithOtherComment(self):
+ """Tests handling an irrelevant comment at end of goog.scope."""
+ original = [
+ 'goog.scope(function() {',
+ "}); // I don't belong here!",
+ ]
+
+ expected = [
+ 'goog.scope(function() {',
+ '}); // goog.scope',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testMalformedEndOfScopeComment(self):
+ """Tests Handling a malformed comment at end of goog.scope."""
+ original = [
+ 'goog.scope(function() {',
+ '}); // goog.scope FTW',
+ ]
+
+ expected = [
+ 'goog.scope(function() {',
+ '}); // goog.scope',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testEndsWithIdentifier(self):
+ """Tests Handling case where script ends with identifier. Bug 7643404."""
+ original = [
+ 'goog.provide(\'xyz\');',
+ '',
+ 'abc'
+ ]
+
+ expected = [
+ 'goog.provide(\'xyz\');',
+ '',
+ 'abc;'
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testFileStartsWithSemicolon(self):
+ """Tests handling files starting with semicolon.
+
+ b/10062516
+ """
+ original = [
+ ';goog.provide(\'xyz\');',
+ '',
+ 'abc;'
+ ]
+
+ expected = [
+ 'goog.provide(\'xyz\');',
+ '',
+ 'abc;'
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testCodeStartsWithSemicolon(self):
+ """Tests handling code in starting with semicolon after comments.
+
+ b/10062516
+ """
+ original = [
+ ';goog.provide(\'xyz\');',
+ '',
+ 'abc;'
+ ]
+
+ expected = [
+ 'goog.provide(\'xyz\');',
+ '',
+ 'abc;'
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def _AssertFixes(self, original, expected, include_header=True):
+ """Asserts that the error fixer corrects original to expected."""
+ if include_header:
+ original = self._GetHeader() + original
+ expected = self._GetHeader() + expected
+
+ actual = StringIO.StringIO()
+ runner.Run('testing.js', error_fixer.ErrorFixer(actual), original)
+ actual.seek(0)
+
+ expected = [x + '\n' for x in expected]
+
+ self.assertListEqual(actual.readlines(), expected)
+
+ def _GetHeader(self):
+ """Returns a fake header for a JavaScript file."""
+ return [
+ '// Copyright 2011 Google Inc. All Rights Reserved.',
+ '',
+ '/**',
+ ' * @fileoverview Fake file overview.',
+ ' * @author fake@google.com (Fake Person)',
+ ' */',
+ ''
+ ]
+
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/full_test.py b/tools/closure_linter/build/lib/closure_linter/full_test.py
new file mode 100644
index 0000000000..d0a1557dc2
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/full_test.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Full regression-type (Medium) tests for gjslint.
+
+Tests every error that can be thrown by gjslint. Based heavily on
+devtools/javascript/gpylint/full_test.py
+"""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+import os
+import sys
+import unittest
+
+import gflags as flags
+import unittest as googletest
+
+from closure_linter import error_check
+from closure_linter import errors
+from closure_linter import runner
+from closure_linter.common import filetestcase
+
+_RESOURCE_PREFIX = 'closure_linter/testdata'
+
+flags.FLAGS.strict = True
+flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
+flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
+flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
+ 'limited_doc_checks.js')
+flags.FLAGS.jslint_error = error_check.Rule.ALL
+
+# List of files under testdata to test.
+# We need to list files explicitly since pyglib can't list directories.
+# TODO(user): Figure out how to list the directory.
+_TEST_FILES = [
+ 'all_js_wrapped.js',
+ 'blank_lines.js',
+ 'ends_with_block.js',
+ 'empty_file.js',
+ 'externs.js',
+ 'externs_jsdoc.js',
+ 'goog_scope.js',
+ 'html_parse_error.html',
+ 'indentation.js',
+ 'interface.js',
+ 'jsdoc.js',
+ 'limited_doc_checks.js',
+ 'minimal.js',
+ 'other.js',
+ 'provide_blank.js',
+ 'provide_extra.js',
+ 'provide_missing.js',
+ 'require_alias.js',
+ 'require_all_caps.js',
+ 'require_blank.js',
+ 'require_extra.js',
+ 'require_function.js',
+ 'require_function_missing.js',
+ 'require_function_through_both.js',
+ 'require_function_through_namespace.js',
+ 'require_interface.js',
+ 'require_interface_alias.js',
+ 'require_interface_base.js',
+ 'require_lower_case.js',
+ 'require_missing.js',
+ 'require_numeric.js',
+ 'require_provide_blank.js',
+ 'require_provide_missing.js',
+ 'require_provide_ok.js',
+ 'semicolon_missing.js',
+ 'simple.html',
+ 'spaces.js',
+ 'tokenizer.js',
+ 'unparseable.js',
+ 'unused_local_variables.js',
+ 'unused_private_members.js',
+ 'utf8.html',
+]
+
+
+class GJsLintTestSuite(unittest.TestSuite):
+ """Test suite to run a GJsLintTest for each of several files.
+
+ If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
+ testdata to test. Otherwise, _TEST_FILES is used.
+ """
+
+ def __init__(self, tests=()):
+ unittest.TestSuite.__init__(self, tests)
+
+ argv = sys.argv and sys.argv[1:] or []
+ if argv:
+ test_files = argv
+ else:
+ test_files = _TEST_FILES
+ for test_file in test_files:
+ resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
+ self.addTest(
+ filetestcase.AnnotatedFileTestCase(
+ resource_path,
+ runner.Run,
+ errors.ByName))
+
+if __name__ == '__main__':
+ # Don't let main parse args; it happens in the TestSuite.
+ googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')
diff --git a/tools/closure_linter/build/lib/closure_linter/gjslint.py b/tools/closure_linter/build/lib/closure_linter/gjslint.py
new file mode 100644
index 0000000000..824e025dcb
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/gjslint.py
@@ -0,0 +1,319 @@
+#!/usr/bin/env python
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Checks JavaScript files for common style guide violations.
+
+gjslint.py is designed to be used as a PRESUBMIT script to check for javascript
+style guide violations. As of now, it checks for the following violations:
+
+ * Missing and extra spaces
+ * Lines longer than 80 characters
+ * Missing newline at end of file
+ * Missing semicolon after function declaration
+ * Valid JsDoc including parameter matching
+
+Someday it will validate to the best of its ability against the entirety of the
+JavaScript style guide.
+
+This file is a front end that parses arguments and flags. The core of the code
+is in tokenizer.py and checker.py.
+"""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)',
+ 'nnaze@google.com (Nathan Naze)',)
+
+import errno
+import itertools
+import os
+import platform
+import re
+import sys
+import time
+
+import gflags as flags
+
+from closure_linter import errorrecord
+from closure_linter import runner
+from closure_linter.common import erroraccumulator
+from closure_linter.common import simplefileflags as fileflags
+
+# Attempt import of multiprocessing (should be available in Python 2.6 and up).
+try:
+ # pylint: disable=g-import-not-at-top
+ import multiprocessing
+except ImportError:
+ multiprocessing = None
+
+FLAGS = flags.FLAGS
+flags.DEFINE_boolean('unix_mode', False,
+ 'Whether to emit warnings in standard unix format.')
+flags.DEFINE_boolean('beep', True, 'Whether to beep when errors are found.')
+flags.DEFINE_boolean('time', False, 'Whether to emit timing statistics.')
+flags.DEFINE_boolean('quiet', False, 'Whether to minimize logged messages. '
+ 'Most useful for per-file linting, such as that performed '
+ 'by the presubmit linter service.')
+flags.DEFINE_boolean('check_html', False,
+ 'Whether to check javascript in html files.')
+flags.DEFINE_boolean('summary', False,
+ 'Whether to show an error count summary.')
+flags.DEFINE_list('additional_extensions', None, 'List of additional file '
+ 'extensions (not js) that should be treated as '
+ 'JavaScript files.')
+flags.DEFINE_boolean('multiprocess',
+ platform.system() is 'Linux' and bool(multiprocessing),
+ 'Whether to attempt parallelized linting using the '
+ 'multiprocessing module. Enabled by default on Linux '
+ 'if the multiprocessing module is present (Python 2.6+). '
+ 'Otherwise disabled by default. '
+ 'Disabling may make debugging easier.')
+flags.ADOPT_module_key_flags(fileflags)
+flags.ADOPT_module_key_flags(runner)
+
+
+GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time',
+ '--check_html', '--summary', '--quiet']
+
+
+
+def _MultiprocessCheckPaths(paths):
+ """Run _CheckPath over mutltiple processes.
+
+ Tokenization, passes, and checks are expensive operations. Running in a
+ single process, they can only run on one CPU/core. Instead,
+ shard out linting over all CPUs with multiprocessing to parallelize.
+
+ Args:
+ paths: paths to check.
+
+ Yields:
+ errorrecord.ErrorRecords for any found errors.
+ """
+
+ pool = multiprocessing.Pool()
+
+ path_results = pool.imap(_CheckPath, paths)
+ for results in path_results:
+ for result in results:
+ yield result
+
+ # Force destruct before returning, as this can sometimes raise spurious
+ # "interrupted system call" (EINTR), which we can ignore.
+ try:
+ pool.close()
+ pool.join()
+ del pool
+ except OSError as err:
+ if err.errno is not errno.EINTR:
+ raise err
+
+
+def _CheckPaths(paths):
+ """Run _CheckPath on all paths in one thread.
+
+ Args:
+ paths: paths to check.
+
+ Yields:
+ errorrecord.ErrorRecords for any found errors.
+ """
+
+ for path in paths:
+ results = _CheckPath(path)
+ for record in results:
+ yield record
+
+
+def _CheckPath(path):
+ """Check a path and return any errors.
+
+ Args:
+ path: paths to check.
+
+ Returns:
+ A list of errorrecord.ErrorRecords for any found errors.
+ """
+
+ error_handler = erroraccumulator.ErrorAccumulator()
+ runner.Run(path, error_handler)
+
+ make_error_record = lambda err: errorrecord.MakeErrorRecord(path, err)
+ return map(make_error_record, error_handler.GetErrors())
+
+
+def _GetFilePaths(argv):
+ suffixes = ['.js']
+ if FLAGS.additional_extensions:
+ suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
+ if FLAGS.check_html:
+ suffixes += ['.html', '.htm']
+ return fileflags.GetFileList(argv, 'JavaScript', suffixes)
+
+
+# Error printing functions
+
+
+def _PrintFileSummary(paths, records):
+ """Print a detailed summary of the number of errors in each file."""
+
+ paths = list(paths)
+ paths.sort()
+
+ for path in paths:
+ path_errors = [e for e in records if e.path == path]
+ print '%s: %d' % (path, len(path_errors))
+
+
+def _PrintFileSeparator(path):
+ print '----- FILE : %s -----' % path
+
+
+def _PrintSummary(paths, error_records):
+ """Print a summary of the number of errors and files."""
+
+ error_count = len(error_records)
+ all_paths = set(paths)
+ all_paths_count = len(all_paths)
+
+ if error_count is 0:
+ print '%d files checked, no errors found.' % all_paths_count
+
+ new_error_count = len([e for e in error_records if e.new_error])
+
+ error_paths = set([e.path for e in error_records])
+ error_paths_count = len(error_paths)
+ no_error_paths_count = all_paths_count - error_paths_count
+
+ if (error_count or new_error_count) and not FLAGS.quiet:
+ error_noun = 'error' if error_count == 1 else 'errors'
+ new_error_noun = 'error' if new_error_count == 1 else 'errors'
+ error_file_noun = 'file' if error_paths_count == 1 else 'files'
+ ok_file_noun = 'file' if no_error_paths_count == 1 else 'files'
+ print ('Found %d %s, including %d new %s, in %d %s (%d %s OK).' %
+ (error_count,
+ error_noun,
+ new_error_count,
+ new_error_noun,
+ error_paths_count,
+ error_file_noun,
+ no_error_paths_count,
+ ok_file_noun))
+
+
+def _PrintErrorRecords(error_records):
+ """Print error records strings in the expected format."""
+
+ current_path = None
+ for record in error_records:
+
+ if current_path != record.path:
+ current_path = record.path
+ if not FLAGS.unix_mode:
+ _PrintFileSeparator(current_path)
+
+ print record.error_string
+
+
+def _FormatTime(t):
+ """Formats a duration as a human-readable string.
+
+ Args:
+ t: A duration in seconds.
+
+ Returns:
+ A formatted duration string.
+ """
+ if t < 1:
+ return '%dms' % round(t * 1000)
+ else:
+ return '%.2fs' % t
+
+
+
+
+def main(argv=None):
+ """Main function.
+
+ Args:
+ argv: Sequence of command line arguments.
+ """
+ if argv is None:
+ argv = flags.FLAGS(sys.argv)
+
+ if FLAGS.time:
+ start_time = time.time()
+
+ suffixes = ['.js']
+ if FLAGS.additional_extensions:
+ suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
+ if FLAGS.check_html:
+ suffixes += ['.html', '.htm']
+ paths = fileflags.GetFileList(argv, 'JavaScript', suffixes)
+
+ if FLAGS.multiprocess:
+ records_iter = _MultiprocessCheckPaths(paths)
+ else:
+ records_iter = _CheckPaths(paths)
+
+ records_iter, records_iter_copy = itertools.tee(records_iter, 2)
+ _PrintErrorRecords(records_iter_copy)
+
+ error_records = list(records_iter)
+ _PrintSummary(paths, error_records)
+
+ exit_code = 0
+
+ # If there are any errors
+ if error_records:
+ exit_code += 1
+
+ # If there are any new errors
+ if [r for r in error_records if r.new_error]:
+ exit_code += 2
+
+ if exit_code:
+ if FLAGS.summary:
+ _PrintFileSummary(paths, error_records)
+
+ if FLAGS.beep:
+ # Make a beep noise.
+ sys.stdout.write(chr(7))
+
+ # Write out instructions for using fixjsstyle script to fix some of the
+ # reported errors.
+ fix_args = []
+ for flag in sys.argv[1:]:
+ for f in GJSLINT_ONLY_FLAGS:
+ if flag.startswith(f):
+ break
+ else:
+ fix_args.append(flag)
+
+ if not FLAGS.quiet:
+ print """
+Some of the errors reported by GJsLint may be auto-fixable using the script
+fixjsstyle. Please double check any changes it makes and report any bugs. The
+script can be run by executing:
+
+fixjsstyle %s """ % ' '.join(fix_args)
+
+ if FLAGS.time:
+ print 'Done in %s.' % _FormatTime(time.time() - start_time)
+
+ sys.exit(exit_code)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/closure_linter/build/lib/closure_linter/indentation.py b/tools/closure_linter/build/lib/closure_linter/indentation.py
new file mode 100644
index 0000000000..d48ad2b862
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/indentation.py
@@ -0,0 +1,617 @@
+#!/usr/bin/env python
+# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Methods for checking EcmaScript files for indentation issues."""
+
+__author__ = ('robbyw@google.com (Robert Walker)')
+
+import gflags as flags
+
+from closure_linter import ecmametadatapass
+from closure_linter import errors
+from closure_linter import javascripttokens
+from closure_linter import tokenutil
+from closure_linter.common import error
+from closure_linter.common import position
+
+
+flags.DEFINE_boolean('debug_indentation', False,
+ 'Whether to print debugging information for indentation.')
+
+
+# Shorthand
+Context = ecmametadatapass.EcmaContext
+Error = error.Error
+Position = position.Position
+Type = javascripttokens.JavaScriptTokenType
+
+
+# The general approach:
+#
+# 1. Build a stack of tokens that can affect indentation.
+# For each token, we determine if it is a block or continuation token.
+# Some tokens need to be temporarily overwritten in case they are removed
+# before the end of the line.
+# Much of the work here is determining which tokens to keep on the stack
+# at each point. Operators, for example, should be removed once their
+# expression or line is gone, while parentheses must stay until the matching
+# end parentheses is found.
+#
+# 2. Given that stack, determine the allowable indentations.
+# Due to flexible indentation rules in JavaScript, there may be many
+# allowable indentations for each stack. We follows the general
+# "no false positives" approach of GJsLint and build the most permissive
+# set possible.
+
+
+class TokenInfo(object):
+ """Stores information about a token.
+
+ Attributes:
+ token: The token
+ is_block: Whether the token represents a block indentation.
+ is_transient: Whether the token should be automatically removed without
+ finding a matching end token.
+ overridden_by: TokenInfo for a token that overrides the indentation that
+ this token would require.
+ is_permanent_override: Whether the override on this token should persist
+ even after the overriding token is removed from the stack. For example:
+ x([
+ 1],
+ 2);
+ needs this to be set so the last line is not required to be a continuation
+ indent.
+ line_number: The effective line number of this token. Will either be the
+ actual line number or the one before it in the case of a mis-wrapped
+ operator.
+ """
+
+ def __init__(self, token, is_block=False):
+ """Initializes a TokenInfo object.
+
+ Args:
+ token: The token
+ is_block: Whether the token represents a block indentation.
+ """
+ self.token = token
+ self.overridden_by = None
+ self.is_permanent_override = False
+ self.is_block = is_block
+ self.is_transient = not is_block and token.type not in (
+ Type.START_PAREN, Type.START_PARAMETERS)
+ self.line_number = token.line_number
+
+ def __repr__(self):
+ result = '\n %s' % self.token
+ if self.overridden_by:
+ result = '%s OVERRIDDEN [by "%s"]' % (
+ result, self.overridden_by.token.string)
+ result += ' {is_block: %s, is_transient: %s}' % (
+ self.is_block, self.is_transient)
+ return result
+
+
+class IndentationRules(object):
+ """EmcaScript indentation rules.
+
+ Can be used to find common indentation errors in JavaScript, ActionScript and
+ other Ecma like scripting languages.
+ """
+
+ def __init__(self):
+ """Initializes the IndentationRules checker."""
+ self._stack = []
+
+ # Map from line number to number of characters it is off in indentation.
+ self._start_index_offset = {}
+
+ def Finalize(self):
+ if self._stack:
+ old_stack = self._stack
+ self._stack = []
+ raise Exception('INTERNAL ERROR: indentation stack is not empty: %r' %
+ old_stack)
+
+ def CheckToken(self, token, state):
+ """Checks a token for indentation errors.
+
+ Args:
+ token: The current token under consideration
+ state: Additional information about the current tree state
+
+ Returns:
+ An error array [error code, error string, error token] if the token is
+ improperly indented, or None if indentation is correct.
+ """
+
+ token_type = token.type
+ indentation_errors = []
+ stack = self._stack
+ is_first = self._IsFirstNonWhitespaceTokenInLine(token)
+
+ # Add tokens that could decrease indentation before checking.
+ if token_type == Type.END_PAREN:
+ self._PopTo(Type.START_PAREN)
+
+ elif token_type == Type.END_PARAMETERS:
+ self._PopTo(Type.START_PARAMETERS)
+
+ elif token_type == Type.END_BRACKET:
+ self._PopTo(Type.START_BRACKET)
+
+ elif token_type == Type.END_BLOCK:
+ start_token = self._PopTo(Type.START_BLOCK)
+ # Check for required goog.scope comment.
+ if start_token:
+ goog_scope = tokenutil.GoogScopeOrNoneFromStartBlock(start_token.token)
+ if goog_scope is not None:
+ if not token.line.endswith('; // goog.scope\n'):
+ if (token.line.find('//') > -1 and
+ token.line.find('goog.scope') >
+ token.line.find('//')):
+ indentation_errors.append([
+ errors.MALFORMED_END_OF_SCOPE_COMMENT,
+ ('Malformed end of goog.scope comment. Please use the '
+ 'exact following syntax to close the scope:\n'
+ '}); // goog.scope'),
+ token,
+ Position(token.start_index, token.length)])
+ else:
+ indentation_errors.append([
+ errors.MISSING_END_OF_SCOPE_COMMENT,
+ ('Missing comment for end of goog.scope which opened at line '
+ '%d. End the scope with:\n'
+ '}); // goog.scope' %
+ (start_token.line_number)),
+ token,
+ Position(token.start_index, token.length)])
+
+ elif token_type == Type.KEYWORD and token.string in ('case', 'default'):
+ self._Add(self._PopTo(Type.START_BLOCK))
+
+ elif token_type == Type.SEMICOLON:
+ self._PopTransient()
+
+ if (is_first and
+ token_type not in (Type.COMMENT, Type.DOC_PREFIX, Type.STRING_TEXT)):
+ if flags.FLAGS.debug_indentation:
+ print 'Line #%d: stack %r' % (token.line_number, stack)
+
+ # Ignore lines that start in JsDoc since we don't check them properly yet.
+ # TODO(robbyw): Support checking JsDoc indentation.
+ # Ignore lines that start as multi-line strings since indentation is N/A.
+ # Ignore lines that start with operators since we report that already.
+ # Ignore lines with tabs since we report that already.
+ expected = self._GetAllowableIndentations()
+ actual = self._GetActualIndentation(token)
+
+ # Special case comments describing else, case, and default. Allow them
+ # to outdent to the parent block.
+ if token_type in Type.COMMENT_TYPES:
+ next_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
+ if next_code and next_code.type == Type.END_BLOCK:
+ next_code = tokenutil.SearchExcept(next_code, Type.NON_CODE_TYPES)
+ if next_code and next_code.string in ('else', 'case', 'default'):
+ # TODO(robbyw): This almost certainly introduces false negatives.
+ expected |= self._AddToEach(expected, -2)
+
+ if actual >= 0 and actual not in expected:
+ expected = sorted(expected)
+ indentation_errors.append([
+ errors.WRONG_INDENTATION,
+ 'Wrong indentation: expected any of {%s} but got %d' % (
+ ', '.join('%d' % x for x in expected if x < 80), actual),
+ token,
+ Position(actual, expected[0])])
+ self._start_index_offset[token.line_number] = expected[0] - actual
+
+ # Add tokens that could increase indentation.
+ if token_type == Type.START_BRACKET:
+ self._Add(TokenInfo(
+ token=token,
+ is_block=token.metadata.context.type == Context.ARRAY_LITERAL))
+
+ elif token_type == Type.START_BLOCK or token.metadata.is_implied_block:
+ self._Add(TokenInfo(token=token, is_block=True))
+
+ elif token_type in (Type.START_PAREN, Type.START_PARAMETERS):
+ self._Add(TokenInfo(token=token, is_block=False))
+
+ elif token_type == Type.KEYWORD and token.string == 'return':
+ self._Add(TokenInfo(token))
+
+ elif not token.IsLastInLine() and (
+ token.IsAssignment() or token.IsOperator('?')):
+ self._Add(TokenInfo(token=token))
+
+ # Handle implied block closes.
+ if token.metadata.is_implied_block_close:
+ self._PopToImpliedBlock()
+
+ # Add some tokens only if they appear at the end of the line.
+ is_last = self._IsLastCodeInLine(token)
+ if is_last:
+ next_code_token = tokenutil.GetNextCodeToken(token)
+ # Increase required indentation if this is an overlong wrapped statement
+ # ending in an operator.
+ if token_type == Type.OPERATOR:
+ if token.string == ':':
+ if stack and stack[-1].token.string == '?':
+ # When a ternary : is on a different line than its '?', it doesn't
+ # add indentation.
+ if token.line_number == stack[-1].token.line_number:
+ self._Add(TokenInfo(token))
+ elif token.metadata.context.type == Context.CASE_BLOCK:
+ # Pop transient tokens from say, line continuations, e.g.,
+ # case x.
+ # y:
+ # Want to pop the transient 4 space continuation indent.
+ self._PopTransient()
+ # Starting the body of the case statement, which is a type of
+ # block.
+ self._Add(TokenInfo(token=token, is_block=True))
+ elif token.metadata.context.type == Context.LITERAL_ELEMENT:
+ # When in an object literal, acts as operator indicating line
+ # continuations.
+ self._Add(TokenInfo(token))
+ else:
+ # ':' might also be a statement label, no effect on indentation in
+ # this case.
+ pass
+
+ elif token.string != ',':
+ self._Add(TokenInfo(token))
+ else:
+ # The token is a comma.
+ if token.metadata.context.type == Context.VAR:
+ self._Add(TokenInfo(token))
+ elif token.metadata.context.type != Context.PARAMETERS:
+ self._PopTransient()
+ # Increase required indentation if this is the end of a statement that's
+ # continued with an operator on the next line (e.g. the '.').
+ elif (next_code_token and next_code_token.type == Type.OPERATOR and
+ not next_code_token.metadata.IsUnaryOperator()):
+ self._Add(TokenInfo(token))
+ elif token_type == Type.PARAMETERS and token.string.endswith(','):
+ # Parameter lists.
+ self._Add(TokenInfo(token))
+ elif token.IsKeyword('var'):
+ self._Add(TokenInfo(token))
+ elif token.metadata.is_implied_semicolon:
+ self._PopTransient()
+ elif token.IsAssignment():
+ self._Add(TokenInfo(token))
+
+ return indentation_errors
+
+ def _AddToEach(self, original, amount):
+ """Returns a new set with the given amount added to each element.
+
+ Args:
+ original: The original set of numbers
+ amount: The amount to add to each element
+
+ Returns:
+ A new set containing each element of the original set added to the amount.
+ """
+ return set([x + amount for x in original])
+
+ _HARD_STOP_TYPES = (Type.START_PAREN, Type.START_PARAMETERS,
+ Type.START_BRACKET)
+
+ _HARD_STOP_STRINGS = ('return', '?')
+
+ def _IsHardStop(self, token):
+ """Determines if the given token can have a hard stop after it.
+
+ Args:
+ token: token to examine
+
+ Returns:
+ Whether the token can have a hard stop after it.
+
+ Hard stops are indentations defined by the position of another token as in
+ indentation lined up with return, (, [, and ?.
+ """
+ return (token.type in self._HARD_STOP_TYPES or
+ token.string in self._HARD_STOP_STRINGS or
+ token.IsAssignment())
+
+ def _GetAllowableIndentations(self):
+ """Computes the set of allowable indentations.
+
+ Returns:
+ The set of allowable indentations, given the current stack.
+ """
+ expected = set([0])
+ hard_stops = set([])
+
+ # Whether the tokens are still in the same continuation, meaning additional
+ # indentation is optional. As an example:
+ # x = 5 +
+ # 6 +
+ # 7;
+ # The second '+' does not add any required indentation.
+ in_same_continuation = False
+
+ for token_info in self._stack:
+ token = token_info.token
+
+ # Handle normal additive indentation tokens.
+ if not token_info.overridden_by and token.string != 'return':
+ if token_info.is_block:
+ expected = self._AddToEach(expected, 2)
+ hard_stops = self._AddToEach(hard_stops, 2)
+ in_same_continuation = False
+ elif in_same_continuation:
+ expected |= self._AddToEach(expected, 4)
+ hard_stops |= self._AddToEach(hard_stops, 4)
+ else:
+ expected = self._AddToEach(expected, 4)
+ hard_stops |= self._AddToEach(hard_stops, 4)
+ in_same_continuation = True
+
+ # Handle hard stops after (, [, return, =, and ?
+ if self._IsHardStop(token):
+ override_is_hard_stop = (token_info.overridden_by and
+ self._IsHardStop(
+ token_info.overridden_by.token))
+ if token.type == Type.START_PAREN and token.previous:
+ # For someFunction(...) we allow to indent at the beginning of the
+ # identifier +4
+ prev = token.previous
+ if (prev.type == Type.IDENTIFIER and
+ prev.line_number == token.line_number):
+ hard_stops.add(prev.start_index + 4)
+ if not override_is_hard_stop:
+ start_index = token.start_index
+ if token.line_number in self._start_index_offset:
+ start_index += self._start_index_offset[token.line_number]
+ if (token.type in (Type.START_PAREN, Type.START_PARAMETERS) and
+ not token_info.overridden_by):
+ hard_stops.add(start_index + 1)
+
+ elif token.string == 'return' and not token_info.overridden_by:
+ hard_stops.add(start_index + 7)
+
+ elif token.type == Type.START_BRACKET:
+ hard_stops.add(start_index + 1)
+
+ elif token.IsAssignment():
+ hard_stops.add(start_index + len(token.string) + 1)
+
+ elif token.IsOperator('?') and not token_info.overridden_by:
+ hard_stops.add(start_index + 2)
+
+ return (expected | hard_stops) or set([0])
+
+ def _GetActualIndentation(self, token):
+ """Gets the actual indentation of the line containing the given token.
+
+ Args:
+ token: Any token on the line.
+
+ Returns:
+ The actual indentation of the line containing the given token. Returns
+ -1 if this line should be ignored due to the presence of tabs.
+ """
+ # Move to the first token in the line
+ token = tokenutil.GetFirstTokenInSameLine(token)
+
+ # If it is whitespace, it is the indentation.
+ if token.type == Type.WHITESPACE:
+ if token.string.find('\t') >= 0:
+ return -1
+ else:
+ return len(token.string)
+ elif token.type == Type.PARAMETERS:
+ return len(token.string) - len(token.string.lstrip())
+ else:
+ return 0
+
+ def _IsFirstNonWhitespaceTokenInLine(self, token):
+ """Determines if the given token is the first non-space token on its line.
+
+ Args:
+ token: The token.
+
+ Returns:
+ True if the token is the first non-whitespace token on its line.
+ """
+ if token.type in (Type.WHITESPACE, Type.BLANK_LINE):
+ return False
+ if token.IsFirstInLine():
+ return True
+ return (token.previous and token.previous.IsFirstInLine() and
+ token.previous.type == Type.WHITESPACE)
+
+ def _IsLastCodeInLine(self, token):
+ """Determines if the given token is the last code token on its line.
+
+ Args:
+ token: The token.
+
+ Returns:
+ True if the token is the last code token on its line.
+ """
+ if token.type in Type.NON_CODE_TYPES:
+ return False
+ start_token = token
+ while True:
+ token = token.next
+ if not token or token.line_number != start_token.line_number:
+ return True
+ if token.type not in Type.NON_CODE_TYPES:
+ return False
+
+ def _AllFunctionPropertyAssignTokens(self, start_token, end_token):
+ """Checks if tokens are (likely) a valid function property assignment.
+
+ Args:
+ start_token: Start of the token range.
+ end_token: End of the token range.
+
+ Returns:
+ True if all tokens between start_token and end_token are legal tokens
+ within a function declaration and assignment into a property.
+ """
+ for token in tokenutil.GetTokenRange(start_token, end_token):
+ fn_decl_tokens = (Type.FUNCTION_DECLARATION,
+ Type.PARAMETERS,
+ Type.START_PARAMETERS,
+ Type.END_PARAMETERS,
+ Type.END_PAREN)
+ if (token.type not in fn_decl_tokens and
+ token.IsCode() and
+ not tokenutil.IsIdentifierOrDot(token) and
+ not token.IsAssignment() and
+ not (token.type == Type.OPERATOR and token.string == ',')):
+ return False
+ return True
+
+ def _Add(self, token_info):
+ """Adds the given token info to the stack.
+
+ Args:
+ token_info: The token information to add.
+ """
+ if self._stack and self._stack[-1].token == token_info.token:
+ # Don't add the same token twice.
+ return
+
+ if token_info.is_block or token_info.token.type == Type.START_PAREN:
+ scope_token = tokenutil.GoogScopeOrNoneFromStartBlock(token_info.token)
+ token_info.overridden_by = TokenInfo(scope_token) if scope_token else None
+
+ if (token_info.token.type == Type.START_BLOCK and
+ token_info.token.metadata.context.type == Context.BLOCK):
+ # Handle function() {} assignments: their block contents get special
+ # treatment and are allowed to just indent by two whitespace.
+ # For example
+ # long.long.name = function(
+ # a) {
+ # In this case the { and the = are on different lines. But the
+ # override should still apply for all previous stack tokens that are
+ # part of an assignment of a block.
+
+ has_assignment = any(x for x in self._stack if x.token.IsAssignment())
+ if has_assignment:
+ last_token = token_info.token.previous
+ for stack_info in reversed(self._stack):
+ if (last_token and
+ not self._AllFunctionPropertyAssignTokens(stack_info.token,
+ last_token)):
+ break
+ stack_info.overridden_by = token_info
+ stack_info.is_permanent_override = True
+ last_token = stack_info.token
+
+ index = len(self._stack) - 1
+ while index >= 0:
+ stack_info = self._stack[index]
+ stack_token = stack_info.token
+
+ if stack_info.line_number == token_info.line_number:
+ # In general, tokens only override each other when they are on
+ # the same line.
+ stack_info.overridden_by = token_info
+ if (token_info.token.type == Type.START_BLOCK and
+ (stack_token.IsAssignment() or
+ stack_token.type in (Type.IDENTIFIER, Type.START_PAREN))):
+ # Multi-line blocks have lasting overrides, as in:
+ # callFn({
+ # a: 10
+ # },
+ # 30);
+ # b/11450054. If a string is not closed properly then close_block
+ # could be null.
+ close_block = token_info.token.metadata.context.end_token
+ stack_info.is_permanent_override = close_block and (
+ close_block.line_number != token_info.token.line_number)
+ else:
+ break
+ index -= 1
+
+ self._stack.append(token_info)
+
+ def _Pop(self):
+ """Pops the top token from the stack.
+
+ Returns:
+ The popped token info.
+ """
+ token_info = self._stack.pop()
+ if token_info.token.type not in (Type.START_BLOCK, Type.START_BRACKET):
+ # Remove any temporary overrides.
+ self._RemoveOverrides(token_info)
+ else:
+ # For braces and brackets, which can be object and array literals, remove
+ # overrides when the literal is closed on the same line.
+ token_check = token_info.token
+ same_type = token_check.type
+ goal_type = None
+ if token_info.token.type == Type.START_BRACKET:
+ goal_type = Type.END_BRACKET
+ else:
+ goal_type = Type.END_BLOCK
+ line_number = token_info.token.line_number
+ count = 0
+ while token_check and token_check.line_number == line_number:
+ if token_check.type == goal_type:
+ count -= 1
+ if not count:
+ self._RemoveOverrides(token_info)
+ break
+ if token_check.type == same_type:
+ count += 1
+ token_check = token_check.next
+ return token_info
+
+ def _PopToImpliedBlock(self):
+ """Pops the stack until an implied block token is found."""
+ while not self._Pop().token.metadata.is_implied_block:
+ pass
+
+ def _PopTo(self, stop_type):
+ """Pops the stack until a token of the given type is popped.
+
+ Args:
+ stop_type: The type of token to pop to.
+
+ Returns:
+ The token info of the given type that was popped.
+ """
+ last = None
+ while True:
+ last = self._Pop()
+ if last.token.type == stop_type:
+ break
+ return last
+
+ def _RemoveOverrides(self, token_info):
+ """Marks any token that was overridden by this token as active again.
+
+ Args:
+ token_info: The token that is being removed from the stack.
+ """
+ for stack_token in self._stack:
+ if (stack_token.overridden_by == token_info and
+ not stack_token.is_permanent_override):
+ stack_token.overridden_by = None
+
+ def _PopTransient(self):
+ """Pops all transient tokens - i.e. not blocks, literals, or parens."""
+ while self._stack and self._stack[-1].is_transient:
+ self._Pop()
diff --git a/tools/closure_linter/build/lib/closure_linter/javascriptlintrules.py b/tools/closure_linter/build/lib/closure_linter/javascriptlintrules.py
new file mode 100644
index 0000000000..9578009daa
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/javascriptlintrules.py
@@ -0,0 +1,754 @@
+#!/usr/bin/env python
+# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Methods for checking JS files for common style guide violations.
+
+These style guide violations should only apply to JavaScript and not an Ecma
+scripting languages.
+"""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)',
+ 'jacobr@google.com (Jacob Richman)')
+
+import re
+
+from closure_linter import ecmalintrules
+from closure_linter import error_check
+from closure_linter import errors
+from closure_linter import javascripttokenizer
+from closure_linter import javascripttokens
+from closure_linter import requireprovidesorter
+from closure_linter import tokenutil
+from closure_linter.common import error
+from closure_linter.common import position
+
+# Shorthand
+Error = error.Error
+Position = position.Position
+Rule = error_check.Rule
+Type = javascripttokens.JavaScriptTokenType
+
+
+class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
+ """JavaScript lint rules that catch JavaScript specific style errors."""
+
+ def __init__(self, namespaces_info):
+ """Initializes a JavaScriptLintRules instance."""
+ ecmalintrules.EcmaScriptLintRules.__init__(self)
+ self._namespaces_info = namespaces_info
+ self._declared_private_member_tokens = {}
+ self._declared_private_members = set()
+ self._used_private_members = set()
+ # A stack of dictionaries, one for each function scope entered. Each
+ # dictionary is keyed by an identifier that defines a local variable and has
+ # a token as its value.
+ self._unused_local_variables_by_scope = []
+
+ def HandleMissingParameterDoc(self, token, param_name):
+ """Handle errors associated with a parameter missing a param tag."""
+ self._HandleError(errors.MISSING_PARAMETER_DOCUMENTATION,
+ 'Missing docs for parameter: "%s"' % param_name, token)
+
+ # pylint: disable=too-many-statements
+ def CheckToken(self, token, state):
+ """Checks a token, given the current parser_state, for warnings and errors.
+
+ Args:
+ token: The current token under consideration
+ state: parser_state object that indicates the current state in the page
+ """
+
+ # Call the base class's CheckToken function.
+ super(JavaScriptLintRules, self).CheckToken(token, state)
+
+ # Store some convenience variables
+ namespaces_info = self._namespaces_info
+
+ if error_check.ShouldCheck(Rule.UNUSED_LOCAL_VARIABLES):
+ self._CheckUnusedLocalVariables(token, state)
+
+ if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
+ # Find all assignments to private members.
+ if token.type == Type.SIMPLE_LVALUE:
+ identifier = token.string
+ if identifier.endswith('_') and not identifier.endswith('__'):
+ doc_comment = state.GetDocComment()
+ suppressed = doc_comment and (
+ 'underscore' in doc_comment.suppressions or
+ 'unusedPrivateMembers' in doc_comment.suppressions)
+ if not suppressed:
+ # Look for static members defined on a provided namespace.
+ if namespaces_info:
+ namespace = namespaces_info.GetClosurizedNamespace(identifier)
+ provided_namespaces = namespaces_info.GetProvidedNamespaces()
+ else:
+ namespace = None
+ provided_namespaces = set()
+
+ # Skip cases of this.something_.somethingElse_.
+ regex = re.compile(r'^this\.[a-zA-Z_]+$')
+ if namespace in provided_namespaces or regex.match(identifier):
+ variable = identifier.split('.')[-1]
+ self._declared_private_member_tokens[variable] = token
+ self._declared_private_members.add(variable)
+ elif not identifier.endswith('__'):
+ # Consider setting public members of private members to be a usage.
+ for piece in identifier.split('.'):
+ if piece.endswith('_'):
+ self._used_private_members.add(piece)
+
+ # Find all usages of private members.
+ if token.type == Type.IDENTIFIER:
+ for piece in token.string.split('.'):
+ if piece.endswith('_'):
+ self._used_private_members.add(piece)
+
+ if token.type == Type.DOC_FLAG:
+ flag = token.attached_object
+
+ if flag.flag_type == 'param' and flag.name_token is not None:
+ self._CheckForMissingSpaceBeforeToken(
+ token.attached_object.name_token)
+
+ if flag.type is not None and flag.name is not None:
+ if error_check.ShouldCheck(Rule.VARIABLE_ARG_MARKER):
+ # Check for variable arguments marker in type.
+ if flag.jstype.IsVarArgsType() and flag.name != 'var_args':
+ self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_NAME,
+ 'Variable length argument %s must be renamed '
+ 'to var_args.' % flag.name,
+ token)
+ elif not flag.jstype.IsVarArgsType() and flag.name == 'var_args':
+ self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_TYPE,
+ 'Variable length argument %s type must start '
+ 'with \'...\'.' % flag.name,
+ token)
+
+ if error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER):
+ # Check for optional marker in type.
+ if (flag.jstype.opt_arg and
+ not flag.name.startswith('opt_')):
+ self._HandleError(errors.JSDOC_MISSING_OPTIONAL_PREFIX,
+ 'Optional parameter name %s must be prefixed '
+ 'with opt_.' % flag.name,
+ token)
+ elif (not flag.jstype.opt_arg and
+ flag.name.startswith('opt_')):
+ self._HandleError(errors.JSDOC_MISSING_OPTIONAL_TYPE,
+ 'Optional parameter %s type must end with =.' %
+ flag.name,
+ token)
+
+ if flag.flag_type in state.GetDocFlag().HAS_TYPE:
+ # Check for both missing type token and empty type braces '{}'
+ # Missing suppress types are reported separately and we allow enums,
+ # const, private, public and protected without types.
+ if (flag.flag_type not in state.GetDocFlag().CAN_OMIT_TYPE
+ and (not flag.jstype or flag.jstype.IsEmpty())):
+ self._HandleError(errors.MISSING_JSDOC_TAG_TYPE,
+ 'Missing type in %s tag' % token.string, token)
+
+ elif flag.name_token and flag.type_end_token and tokenutil.Compare(
+ flag.type_end_token, flag.name_token) > 0:
+ self._HandleError(
+ errors.OUT_OF_ORDER_JSDOC_TAG_TYPE,
+ 'Type should be immediately after %s tag' % token.string,
+ token)
+
+ elif token.type == Type.DOUBLE_QUOTE_STRING_START:
+ next_token = token.next
+ while next_token.type == Type.STRING_TEXT:
+ if javascripttokenizer.JavaScriptTokenizer.SINGLE_QUOTE.search(
+ next_token.string):
+ break
+ next_token = next_token.next
+ else:
+ self._HandleError(
+ errors.UNNECESSARY_DOUBLE_QUOTED_STRING,
+ 'Single-quoted string preferred over double-quoted string.',
+ token,
+ position=Position.All(token.string))
+
+ elif token.type == Type.END_DOC_COMMENT:
+ doc_comment = state.GetDocComment()
+
+ # When @externs appears in a @fileoverview comment, it should trigger
+ # the same limited doc checks as a special filename like externs.js.
+ if doc_comment.HasFlag('fileoverview') and doc_comment.HasFlag('externs'):
+ self._SetLimitedDocChecks(True)
+
+ if (error_check.ShouldCheck(Rule.BLANK_LINES_AT_TOP_LEVEL) and
+ not self._is_html and
+ state.InTopLevel() and
+ not state.InNonScopeBlock()):
+
+ # Check if we're in a fileoverview or constructor JsDoc.
+ is_constructor = (
+ doc_comment.HasFlag('constructor') or
+ doc_comment.HasFlag('interface'))
+ # @fileoverview is an optional tag so if the dosctring is the first
+ # token in the file treat it as a file level docstring.
+ is_file_level_comment = (
+ doc_comment.HasFlag('fileoverview') or
+ not doc_comment.start_token.previous)
+
+ # If the comment is not a file overview, and it does not immediately
+ # precede some code, skip it.
+ # NOTE: The tokenutil methods are not used here because of their
+ # behavior at the top of a file.
+ next_token = token.next
+ if (not next_token or
+ (not is_file_level_comment and
+ next_token.type in Type.NON_CODE_TYPES)):
+ return
+
+ # Don't require extra blank lines around suppression of extra
+ # goog.require errors.
+ if (doc_comment.SuppressionOnly() and
+ next_token.type == Type.IDENTIFIER and
+ next_token.string in ['goog.provide', 'goog.require']):
+ return
+
+ # Find the start of this block (include comments above the block, unless
+ # this is a file overview).
+ block_start = doc_comment.start_token
+ if not is_file_level_comment:
+ token = block_start.previous
+ while token and token.type in Type.COMMENT_TYPES:
+ block_start = token
+ token = token.previous
+
+ # Count the number of blank lines before this block.
+ blank_lines = 0
+ token = block_start.previous
+ while token and token.type in [Type.WHITESPACE, Type.BLANK_LINE]:
+ if token.type == Type.BLANK_LINE:
+ # A blank line.
+ blank_lines += 1
+ elif token.type == Type.WHITESPACE and not token.line.strip():
+ # A line with only whitespace on it.
+ blank_lines += 1
+ token = token.previous
+
+ # Log errors.
+ error_message = False
+ expected_blank_lines = 0
+
+ # Only need blank line before file overview if it is not the beginning
+ # of the file, e.g. copyright is first.
+ if is_file_level_comment and blank_lines == 0 and block_start.previous:
+ error_message = 'Should have a blank line before a file overview.'
+ expected_blank_lines = 1
+ elif is_constructor and blank_lines != 3:
+ error_message = (
+ 'Should have 3 blank lines before a constructor/interface.')
+ expected_blank_lines = 3
+ elif (not is_file_level_comment and not is_constructor and
+ blank_lines != 2):
+ error_message = 'Should have 2 blank lines between top-level blocks.'
+ expected_blank_lines = 2
+
+ if error_message:
+ self._HandleError(
+ errors.WRONG_BLANK_LINE_COUNT, error_message,
+ block_start, position=Position.AtBeginning(),
+ fix_data=expected_blank_lines - blank_lines)
+
+ elif token.type == Type.END_BLOCK:
+ if state.InFunction() and state.IsFunctionClose():
+ is_immediately_called = (token.next and
+ token.next.type == Type.START_PAREN)
+
+ function = state.GetFunction()
+ if not self._limited_doc_checks:
+ if (function.has_return and function.doc and
+ not is_immediately_called and
+ not function.doc.HasFlag('return') and
+ not function.doc.InheritsDocumentation() and
+ not function.doc.HasFlag('constructor')):
+ # Check for proper documentation of return value.
+ self._HandleError(
+ errors.MISSING_RETURN_DOCUMENTATION,
+ 'Missing @return JsDoc in function with non-trivial return',
+ function.doc.end_token, position=Position.AtBeginning())
+ elif (not function.has_return and
+ not function.has_throw and
+ function.doc and
+ function.doc.HasFlag('return') and
+ not state.InInterfaceMethod()):
+ flag = function.doc.GetFlag('return')
+ valid_no_return_names = ['undefined', 'void', '*']
+ invalid_return = flag.jstype is None or not any(
+ sub_type.identifier in valid_no_return_names
+ for sub_type in flag.jstype.IterTypeGroup())
+
+ if invalid_return:
+ self._HandleError(
+ errors.UNNECESSARY_RETURN_DOCUMENTATION,
+ 'Found @return JsDoc on function that returns nothing',
+ flag.flag_token, position=Position.AtBeginning())
+
+ # b/4073735. Method in object literal definition of prototype can
+ # safely reference 'this'.
+ prototype_object_literal = False
+ block_start = None
+ previous_code = None
+ previous_previous_code = None
+
+ # Search for cases where prototype is defined as object literal.
+ # previous_previous_code
+ # | previous_code
+ # | | block_start
+ # | | |
+ # a.b.prototype = {
+ # c : function() {
+ # this.d = 1;
+ # }
+ # }
+
+ # If in object literal, find first token of block so to find previous
+ # tokens to check above condition.
+ if state.InObjectLiteral():
+ block_start = state.GetCurrentBlockStart()
+
+ # If an object literal then get previous token (code type). For above
+ # case it should be '='.
+ if block_start:
+ previous_code = tokenutil.SearchExcept(block_start,
+ Type.NON_CODE_TYPES,
+ reverse=True)
+
+ # If previous token to block is '=' then get its previous token.
+ if previous_code and previous_code.IsOperator('='):
+ previous_previous_code = tokenutil.SearchExcept(previous_code,
+ Type.NON_CODE_TYPES,
+ reverse=True)
+
+ # If variable/token before '=' ends with '.prototype' then its above
+ # case of prototype defined with object literal.
+ prototype_object_literal = (previous_previous_code and
+ previous_previous_code.string.endswith(
+ '.prototype'))
+
+ if (function.has_this and function.doc and
+ not function.doc.HasFlag('this') and
+ not function.is_constructor and
+ not function.is_interface and
+ '.prototype.' not in function.name and
+ not prototype_object_literal):
+ self._HandleError(
+ errors.MISSING_JSDOC_TAG_THIS,
+ 'Missing @this JsDoc in function referencing "this". ('
+ 'this usually means you are trying to reference "this" in '
+ 'a static function, or you have forgotten to mark a '
+ 'constructor with @constructor)',
+ function.doc.end_token, position=Position.AtBeginning())
+
+ elif token.type == Type.IDENTIFIER:
+ if token.string == 'goog.inherits' and not state.InFunction():
+ if state.GetLastNonSpaceToken().line_number == token.line_number:
+ self._HandleError(
+ errors.MISSING_LINE,
+ 'Missing newline between constructor and goog.inherits',
+ token,
+ position=Position.AtBeginning())
+
+ extra_space = state.GetLastNonSpaceToken().next
+ while extra_space != token:
+ if extra_space.type == Type.BLANK_LINE:
+ self._HandleError(
+ errors.EXTRA_LINE,
+ 'Extra line between constructor and goog.inherits',
+ extra_space)
+ extra_space = extra_space.next
+
+ # TODO(robbyw): Test the last function was a constructor.
+ # TODO(robbyw): Test correct @extends and @implements documentation.
+
+ elif (token.string == 'goog.provide' and
+ not state.InFunction() and
+ namespaces_info is not None):
+ namespace = tokenutil.GetStringAfterToken(token)
+
+ # Report extra goog.provide statement.
+ if not namespace or namespaces_info.IsExtraProvide(token):
+ if not namespace:
+ msg = 'Empty namespace in goog.provide'
+ else:
+ msg = 'Unnecessary goog.provide: ' + namespace
+
+ # Hint to user if this is a Test namespace.
+ if namespace.endswith('Test'):
+ msg += (' *Test namespaces must be mentioned in the '
+ 'goog.setTestOnly() call')
+
+ self._HandleError(
+ errors.EXTRA_GOOG_PROVIDE,
+ msg,
+ token, position=Position.AtBeginning())
+
+ if namespaces_info.IsLastProvide(token):
+ # Report missing provide statements after the last existing provide.
+ missing_provides = namespaces_info.GetMissingProvides()
+ if missing_provides:
+ self._ReportMissingProvides(
+ missing_provides,
+ tokenutil.GetLastTokenInSameLine(token).next,
+ False)
+
+ # If there are no require statements, missing requires should be
+ # reported after the last provide.
+ if not namespaces_info.GetRequiredNamespaces():
+ missing_requires, illegal_alias_statements = (
+ namespaces_info.GetMissingRequires())
+ if missing_requires:
+ self._ReportMissingRequires(
+ missing_requires,
+ tokenutil.GetLastTokenInSameLine(token).next,
+ True)
+ if illegal_alias_statements:
+ self._ReportIllegalAliasStatement(illegal_alias_statements)
+
+ elif (token.string == 'goog.require' and
+ not state.InFunction() and
+ namespaces_info is not None):
+ namespace = tokenutil.GetStringAfterToken(token)
+
+ # If there are no provide statements, missing provides should be
+ # reported before the first require.
+ if (namespaces_info.IsFirstRequire(token) and
+ not namespaces_info.GetProvidedNamespaces()):
+ missing_provides = namespaces_info.GetMissingProvides()
+ if missing_provides:
+ self._ReportMissingProvides(
+ missing_provides,
+ tokenutil.GetFirstTokenInSameLine(token),
+ True)
+
+ # Report extra goog.require statement.
+ if not namespace or namespaces_info.IsExtraRequire(token):
+ if not namespace:
+ msg = 'Empty namespace in goog.require'
+ else:
+ msg = 'Unnecessary goog.require: ' + namespace
+
+ self._HandleError(
+ errors.EXTRA_GOOG_REQUIRE,
+ msg,
+ token, position=Position.AtBeginning())
+
+ # Report missing goog.require statements.
+ if namespaces_info.IsLastRequire(token):
+ missing_requires, illegal_alias_statements = (
+ namespaces_info.GetMissingRequires())
+ if missing_requires:
+ self._ReportMissingRequires(
+ missing_requires,
+ tokenutil.GetLastTokenInSameLine(token).next,
+ False)
+ if illegal_alias_statements:
+ self._ReportIllegalAliasStatement(illegal_alias_statements)
+
+ elif token.type == Type.OPERATOR:
+ last_in_line = token.IsLastInLine()
+ # If the token is unary and appears to be used in a unary context
+ # it's ok. Otherwise, if it's at the end of the line or immediately
+ # before a comment, it's ok.
+ # Don't report an error before a start bracket - it will be reported
+ # by that token's space checks.
+ if (not token.metadata.IsUnaryOperator() and not last_in_line
+ and not token.next.IsComment()
+ and not token.next.IsOperator(',')
+ and not tokenutil.IsDot(token)
+ and token.next.type not in (Type.WHITESPACE, Type.END_PAREN,
+ Type.END_BRACKET, Type.SEMICOLON,
+ Type.START_BRACKET)):
+ self._HandleError(
+ errors.MISSING_SPACE,
+ 'Missing space after "%s"' % token.string,
+ token,
+ position=Position.AtEnd(token.string))
+ elif token.type == Type.WHITESPACE:
+ first_in_line = token.IsFirstInLine()
+ last_in_line = token.IsLastInLine()
+ # Check whitespace length if it's not the first token of the line and
+ # if it's not immediately before a comment.
+ if not last_in_line and not first_in_line and not token.next.IsComment():
+ # Ensure there is no space after opening parentheses.
+ if (token.previous.type in (Type.START_PAREN, Type.START_BRACKET,
+ Type.FUNCTION_NAME)
+ or token.next.type == Type.START_PARAMETERS):
+ self._HandleError(
+ errors.EXTRA_SPACE,
+ 'Extra space after "%s"' % token.previous.string,
+ token,
+ position=Position.All(token.string))
+ elif token.type == Type.SEMICOLON:
+ previous_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES,
+ reverse=True)
+ if not previous_token:
+ self._HandleError(
+ errors.REDUNDANT_SEMICOLON,
+ 'Semicolon without any statement',
+ token,
+ position=Position.AtEnd(token.string))
+ elif (previous_token.type == Type.KEYWORD and
+ previous_token.string not in ['break', 'continue', 'return']):
+ self._HandleError(
+ errors.REDUNDANT_SEMICOLON,
+ ('Semicolon after \'%s\' without any statement.'
+ ' Looks like an error.' % previous_token.string),
+ token,
+ position=Position.AtEnd(token.string))
+
+ def _CheckUnusedLocalVariables(self, token, state):
+ """Checks for unused local variables in function blocks.
+
+ Args:
+ token: The token to check.
+ state: The state tracker.
+ """
+ # We don't use state.InFunction because that disregards scope functions.
+ in_function = state.FunctionDepth() > 0
+ if token.type == Type.SIMPLE_LVALUE or token.type == Type.IDENTIFIER:
+ if in_function:
+ identifier = token.string
+ # Check whether the previous token was var.
+ previous_code_token = tokenutil.CustomSearch(
+ token,
+ lambda t: t.type not in Type.NON_CODE_TYPES,
+ reverse=True)
+ if previous_code_token and previous_code_token.IsKeyword('var'):
+ # Add local variable declaration to the top of the unused locals
+ # stack.
+ self._unused_local_variables_by_scope[-1][identifier] = token
+ elif token.type == Type.IDENTIFIER:
+ # This covers most cases where the variable is used as an identifier.
+ self._MarkLocalVariableUsed(token.string)
+ elif token.type == Type.SIMPLE_LVALUE and '.' in identifier:
+ # This covers cases where a value is assigned to a property of the
+ # variable.
+ self._MarkLocalVariableUsed(token.string)
+ elif token.type == Type.START_BLOCK:
+ if in_function and state.IsFunctionOpen():
+ # Push a new map onto the stack
+ self._unused_local_variables_by_scope.append({})
+ elif token.type == Type.END_BLOCK:
+ if state.IsFunctionClose():
+ # Pop the stack and report any remaining locals as unused.
+ unused_local_variables = self._unused_local_variables_by_scope.pop()
+ for unused_token in unused_local_variables.values():
+ self._HandleError(
+ errors.UNUSED_LOCAL_VARIABLE,
+ 'Unused local variable: %s.' % unused_token.string,
+ unused_token)
+ elif token.type == Type.DOC_FLAG:
+ # Flags that use aliased symbols should be counted.
+ flag = token.attached_object
+ js_type = flag and flag.jstype
+ if flag and flag.flag_type in state.GetDocFlag().HAS_TYPE and js_type:
+ self._MarkAliasUsed(js_type)
+
+ def _MarkAliasUsed(self, js_type):
+ """Marks aliases in a type as used.
+
+ Recursively iterates over all subtypes in a jsdoc type annotation and
+ tracks usage of aliased symbols (which may be local variables).
+ Marks the local variable as used in the scope nearest to the current
+ scope that matches the given token.
+
+ Args:
+ js_type: The jsdoc type, a typeannotation.TypeAnnotation object.
+ """
+ if js_type.alias:
+ self._MarkLocalVariableUsed(js_type.identifier)
+ for sub_type in js_type.IterTypes():
+ self._MarkAliasUsed(sub_type)
+
+ def _MarkLocalVariableUsed(self, identifier):
+ """Marks the local variable as used in the relevant scope.
+
+ Marks the local variable in the scope nearest to the current scope that
+ matches the given identifier as used.
+
+ Args:
+ identifier: The identifier representing the potential usage of a local
+ variable.
+ """
+ identifier = identifier.split('.', 1)[0]
+ # Find the first instance of the identifier in the stack of function scopes
+ # and mark it used.
+ for unused_local_variables in reversed(
+ self._unused_local_variables_by_scope):
+ if identifier in unused_local_variables:
+ del unused_local_variables[identifier]
+ break
+
+ def _ReportMissingProvides(self, missing_provides, token, need_blank_line):
+ """Reports missing provide statements to the error handler.
+
+ Args:
+ missing_provides: A dictionary of string(key) and integer(value) where
+ each string(key) is a namespace that should be provided, but is not
+ and integer(value) is first line number where it's required.
+ token: The token where the error was detected (also where the new provides
+ will be inserted.
+ need_blank_line: Whether a blank line needs to be inserted after the new
+ provides are inserted. May be True, False, or None, where None
+ indicates that the insert location is unknown.
+ """
+
+ missing_provides_msg = 'Missing the following goog.provide statements:\n'
+ missing_provides_msg += '\n'.join(['goog.provide(\'%s\');' % x for x in
+ sorted(missing_provides)])
+ missing_provides_msg += '\n'
+
+ missing_provides_msg += '\nFirst line where provided: \n'
+ missing_provides_msg += '\n'.join(
+ [' %s : line %d' % (x, missing_provides[x]) for x in
+ sorted(missing_provides)])
+ missing_provides_msg += '\n'
+
+ self._HandleError(
+ errors.MISSING_GOOG_PROVIDE,
+ missing_provides_msg,
+ token, position=Position.AtBeginning(),
+ fix_data=(missing_provides.keys(), need_blank_line))
+
+ def _ReportMissingRequires(self, missing_requires, token, need_blank_line):
+ """Reports missing require statements to the error handler.
+
+ Args:
+ missing_requires: A dictionary of string(key) and integer(value) where
+ each string(key) is a namespace that should be required, but is not
+ and integer(value) is first line number where it's required.
+ token: The token where the error was detected (also where the new requires
+ will be inserted.
+ need_blank_line: Whether a blank line needs to be inserted before the new
+ requires are inserted. May be True, False, or None, where None
+ indicates that the insert location is unknown.
+ """
+
+ missing_requires_msg = 'Missing the following goog.require statements:\n'
+ missing_requires_msg += '\n'.join(['goog.require(\'%s\');' % x for x in
+ sorted(missing_requires)])
+ missing_requires_msg += '\n'
+
+ missing_requires_msg += '\nFirst line where required: \n'
+ missing_requires_msg += '\n'.join(
+ [' %s : line %d' % (x, missing_requires[x]) for x in
+ sorted(missing_requires)])
+ missing_requires_msg += '\n'
+
+ self._HandleError(
+ errors.MISSING_GOOG_REQUIRE,
+ missing_requires_msg,
+ token, position=Position.AtBeginning(),
+ fix_data=(missing_requires.keys(), need_blank_line))
+
+ def _ReportIllegalAliasStatement(self, illegal_alias_statements):
+ """Reports alias statements that would need a goog.require."""
+ for namespace, token in illegal_alias_statements.iteritems():
+ self._HandleError(
+ errors.ALIAS_STMT_NEEDS_GOOG_REQUIRE,
+ 'The alias definition would need the namespace \'%s\' which is not '
+ 'required through any other symbol.' % namespace,
+ token, position=Position.AtBeginning())
+
+ def Finalize(self, state):
+ """Perform all checks that need to occur after all lines are processed."""
+ # Call the base class's Finalize function.
+ super(JavaScriptLintRules, self).Finalize(state)
+
+ if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
+ # Report an error for any declared private member that was never used.
+ unused_private_members = (self._declared_private_members -
+ self._used_private_members)
+
+ for variable in unused_private_members:
+ token = self._declared_private_member_tokens[variable]
+ self._HandleError(errors.UNUSED_PRIVATE_MEMBER,
+ 'Unused private member: %s.' % token.string,
+ token)
+
+ # Clear state to prepare for the next file.
+ self._declared_private_member_tokens = {}
+ self._declared_private_members = set()
+ self._used_private_members = set()
+
+ namespaces_info = self._namespaces_info
+ if namespaces_info is not None:
+ # If there are no provide or require statements, missing provides and
+ # requires should be reported on line 1.
+ if (not namespaces_info.GetProvidedNamespaces() and
+ not namespaces_info.GetRequiredNamespaces()):
+ missing_provides = namespaces_info.GetMissingProvides()
+ if missing_provides:
+ self._ReportMissingProvides(
+ missing_provides, state.GetFirstToken(), None)
+
+ missing_requires, illegal_alias = namespaces_info.GetMissingRequires()
+ if missing_requires:
+ self._ReportMissingRequires(
+ missing_requires, state.GetFirstToken(), None)
+ if illegal_alias:
+ self._ReportIllegalAliasStatement(illegal_alias)
+
+ self._CheckSortedRequiresProvides(state.GetFirstToken())
+
+ def _CheckSortedRequiresProvides(self, token):
+ """Checks that all goog.require and goog.provide statements are sorted.
+
+ Note that this method needs to be run after missing statements are added to
+ preserve alphabetical order.
+
+ Args:
+ token: The first token in the token stream.
+ """
+ sorter = requireprovidesorter.RequireProvideSorter()
+ first_provide_token = sorter.CheckProvides(token)
+ if first_provide_token:
+ new_order = sorter.GetFixedProvideString(first_provide_token)
+ self._HandleError(
+ errors.GOOG_PROVIDES_NOT_ALPHABETIZED,
+ 'goog.provide classes must be alphabetized. The correct code is:\n' +
+ new_order,
+ first_provide_token,
+ position=Position.AtBeginning(),
+ fix_data=first_provide_token)
+
+ first_require_token = sorter.CheckRequires(token)
+ if first_require_token:
+ new_order = sorter.GetFixedRequireString(first_require_token)
+ self._HandleError(
+ errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
+ 'goog.require classes must be alphabetized. The correct code is:\n' +
+ new_order,
+ first_require_token,
+ position=Position.AtBeginning(),
+ fix_data=first_require_token)
+
+ def GetLongLineExceptions(self):
+ """Gets a list of regexps for lines which can be longer than the limit.
+
+ Returns:
+ A list of regexps, used as matches (rather than searches).
+ """
+ return [
+ re.compile(r'(var .+\s*=\s*)?goog\.require\(.+\);?\s*$'),
+ re.compile(r'goog\.(provide|module|setTestOnly)\(.+\);?\s*$'),
+ re.compile(r'[\s/*]*@visibility\s*{.*}[\s*/]*$'),
+ ]
diff --git a/tools/closure_linter/build/lib/closure_linter/javascriptstatetracker.py b/tools/closure_linter/build/lib/closure_linter/javascriptstatetracker.py
new file mode 100644
index 0000000000..e0a42f66a8
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/javascriptstatetracker.py
@@ -0,0 +1,150 @@
+#!/usr/bin/env python
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Parser for JavaScript files."""
+
+
+
+from closure_linter import javascripttokens
+from closure_linter import statetracker
+from closure_linter import tokenutil
+
+# Shorthand
+Type = javascripttokens.JavaScriptTokenType
+
+
+class JsDocFlag(statetracker.DocFlag):
+ """Javascript doc flag object.
+
+ Attribute:
+ flag_type: param, return, define, type, etc.
+ flag_token: The flag token.
+ type_start_token: The first token specifying the flag JS type,
+ including braces.
+ type_end_token: The last token specifying the flag JS type,
+ including braces.
+ type: The type spec string.
+ jstype: The type spec, a TypeAnnotation instance.
+ name_token: The token specifying the flag name.
+ name: The flag name
+ description_start_token: The first token in the description.
+ description_end_token: The end token in the description.
+ description: The description.
+ """
+
+ # Please keep these lists alphabetized.
+
+ # Some projects use the following extensions to JsDoc.
+ # TODO(robbyw): determine which of these, if any, should be illegal.
+ EXTENDED_DOC = frozenset([
+ 'class', 'code', 'desc', 'final', 'hidden', 'inheritDoc', 'link',
+ 'meaning', 'provideGoog', 'throws'])
+
+ LEGAL_DOC = EXTENDED_DOC | statetracker.DocFlag.LEGAL_DOC
+
+
+class JavaScriptStateTracker(statetracker.StateTracker):
+ """JavaScript state tracker.
+
+ Inherits from the core EcmaScript StateTracker adding extra state tracking
+ functionality needed for JavaScript.
+ """
+
+ def __init__(self):
+ """Initializes a JavaScript token stream state tracker."""
+ statetracker.StateTracker.__init__(self, JsDocFlag)
+
+ def Reset(self):
+ self._scope_depth = 0
+ self._block_stack = []
+ super(JavaScriptStateTracker, self).Reset()
+
+ def InTopLevel(self):
+ """Compute whether we are at the top level in the class.
+
+ This function call is language specific. In some languages like
+ JavaScript, a function is top level if it is not inside any parenthesis.
+ In languages such as ActionScript, a function is top level if it is directly
+ within a class.
+
+ Returns:
+ Whether we are at the top level in the class.
+ """
+ return self._scope_depth == self.ParenthesesDepth()
+
+ def InFunction(self):
+ """Returns true if the current token is within a function.
+
+ This js-specific override ignores goog.scope functions.
+
+ Returns:
+ True if the current token is within a function.
+ """
+ return self._scope_depth != self.FunctionDepth()
+
+ def InNonScopeBlock(self):
+ """Compute whether we are nested within a non-goog.scope block.
+
+ Returns:
+ True if the token is not enclosed in a block that does not originate from
+ a goog.scope statement. False otherwise.
+ """
+ return self._scope_depth != self.BlockDepth()
+
+ def GetBlockType(self, token):
+ """Determine the block type given a START_BLOCK token.
+
+ Code blocks come after parameters, keywords like else, and closing parens.
+
+ Args:
+ token: The current token. Can be assumed to be type START_BLOCK
+ Returns:
+ Code block type for current token.
+ """
+ last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, reverse=True)
+ if last_code.type in (Type.END_PARAMETERS, Type.END_PAREN,
+ Type.KEYWORD) and not last_code.IsKeyword('return'):
+ return self.CODE
+ else:
+ return self.OBJECT_LITERAL
+
+ def GetCurrentBlockStart(self):
+ """Gets the start token of current block.
+
+ Returns:
+ Starting token of current block. None if not in block.
+ """
+ if self._block_stack:
+ return self._block_stack[-1]
+ else:
+ return None
+
+ def HandleToken(self, token, last_non_space_token):
+ """Handles the given token and updates state.
+
+ Args:
+ token: The token to handle.
+ last_non_space_token: The last non space token encountered
+ """
+ if token.type == Type.START_BLOCK:
+ self._block_stack.append(token)
+ if token.type == Type.IDENTIFIER and token.string == 'goog.scope':
+ self._scope_depth += 1
+ if token.type == Type.END_BLOCK:
+ start_token = self._block_stack.pop()
+ if tokenutil.GoogScopeOrNoneFromStartBlock(start_token):
+ self._scope_depth -= 1
+ super(JavaScriptStateTracker, self).HandleToken(token,
+ last_non_space_token)
diff --git a/tools/closure_linter/build/lib/closure_linter/javascriptstatetracker_test.py b/tools/closure_linter/build/lib/closure_linter/javascriptstatetracker_test.py
new file mode 100644
index 0000000000..76dabd2c70
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/javascriptstatetracker_test.py
@@ -0,0 +1,278 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the javascriptstatetracker module."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+
+import unittest as googletest
+
+from closure_linter import javascripttokens
+from closure_linter import testutil
+from closure_linter import tokenutil
+
+
+_FUNCTION_SCRIPT = """\
+var a = 3;
+
+function foo(aaa, bbb, ccc) {
+ var b = 4;
+}
+
+
+/**
+ * JSDoc comment.
+ */
+var bar = function(ddd, eee, fff) {
+
+};
+
+
+/**
+ * Verify that nested functions get their proper parameters recorded.
+ */
+var baz = function(ggg, hhh, iii) {
+ var qux = function(jjj, kkk, lll) {
+ };
+ // make sure that entering a new block does not change baz' parameters.
+ {};
+};
+
+"""
+
+
+class FunctionTest(googletest.TestCase):
+
+ def testFunctionParse(self):
+ functions, _ = testutil.ParseFunctionsAndComments(_FUNCTION_SCRIPT)
+ self.assertEquals(4, len(functions))
+
+ # First function
+ function = functions[0]
+ self.assertEquals(['aaa', 'bbb', 'ccc'], function.parameters)
+
+ start_token = function.start_token
+ end_token = function.end_token
+
+ self.assertEquals(
+ javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
+ function.start_token.type)
+
+ self.assertEquals('function', start_token.string)
+ self.assertEquals(3, start_token.line_number)
+ self.assertEquals(0, start_token.start_index)
+
+ self.assertEquals('}', end_token.string)
+ self.assertEquals(5, end_token.line_number)
+ self.assertEquals(0, end_token.start_index)
+
+ self.assertEquals('foo', function.name)
+
+ self.assertIsNone(function.doc)
+
+ # Second function
+ function = functions[1]
+ self.assertEquals(['ddd', 'eee', 'fff'], function.parameters)
+
+ start_token = function.start_token
+ end_token = function.end_token
+
+ self.assertEquals(
+ javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
+ function.start_token.type)
+
+ self.assertEquals('function', start_token.string)
+ self.assertEquals(11, start_token.line_number)
+ self.assertEquals(10, start_token.start_index)
+
+ self.assertEquals('}', end_token.string)
+ self.assertEquals(13, end_token.line_number)
+ self.assertEquals(0, end_token.start_index)
+
+ self.assertEquals('bar', function.name)
+
+ self.assertIsNotNone(function.doc)
+
+ # Check function JSDoc
+ doc = function.doc
+ doc_tokens = tokenutil.GetTokenRange(doc.start_token, doc.end_token)
+
+ comment_type = javascripttokens.JavaScriptTokenType.COMMENT
+ comment_tokens = filter(lambda t: t.type is comment_type, doc_tokens)
+
+ self.assertEquals('JSDoc comment.',
+ tokenutil.TokensToString(comment_tokens).strip())
+
+ # Third function
+ function = functions[2]
+ self.assertEquals(['ggg', 'hhh', 'iii'], function.parameters)
+
+ start_token = function.start_token
+ end_token = function.end_token
+
+ self.assertEquals(
+ javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
+ function.start_token.type)
+
+ self.assertEquals('function', start_token.string)
+ self.assertEquals(19, start_token.line_number)
+ self.assertEquals(10, start_token.start_index)
+
+ self.assertEquals('}', end_token.string)
+ self.assertEquals(24, end_token.line_number)
+ self.assertEquals(0, end_token.start_index)
+
+ self.assertEquals('baz', function.name)
+ self.assertIsNotNone(function.doc)
+
+ # Fourth function (inside third function)
+ function = functions[3]
+ self.assertEquals(['jjj', 'kkk', 'lll'], function.parameters)
+
+ start_token = function.start_token
+ end_token = function.end_token
+
+ self.assertEquals(
+ javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
+ function.start_token.type)
+
+ self.assertEquals('function', start_token.string)
+ self.assertEquals(20, start_token.line_number)
+ self.assertEquals(12, start_token.start_index)
+
+ self.assertEquals('}', end_token.string)
+ self.assertEquals(21, end_token.line_number)
+ self.assertEquals(2, end_token.start_index)
+
+ self.assertEquals('qux', function.name)
+ self.assertIsNone(function.doc)
+
+
+
+class CommentTest(googletest.TestCase):
+
+ def testGetDescription(self):
+ comment = self._ParseComment("""
+ /**
+ * Comment targeting goog.foo.
+ *
+ * This is the second line.
+ * @param {number} foo The count of foo.
+ */
+ target;""")
+
+ self.assertEqual(
+ 'Comment targeting goog.foo.\n\nThis is the second line.',
+ comment.description)
+
+ def testCommentGetTarget(self):
+ self.assertCommentTarget('goog.foo', """
+ /**
+ * Comment targeting goog.foo.
+ */
+ goog.foo = 6;
+ """)
+
+ self.assertCommentTarget('bar', """
+ /**
+ * Comment targeting bar.
+ */
+ var bar = "Karate!";
+ """)
+
+ self.assertCommentTarget('doThing', """
+ /**
+ * Comment targeting doThing.
+ */
+ function doThing() {};
+ """)
+
+ self.assertCommentTarget('this.targetProperty', """
+ goog.bar.Baz = function() {
+ /**
+ * Comment targeting targetProperty.
+ */
+ this.targetProperty = 3;
+ };
+ """)
+
+ self.assertCommentTarget('goog.bar.prop', """
+ /**
+ * Comment targeting goog.bar.prop.
+ */
+ goog.bar.prop;
+ """)
+
+ self.assertCommentTarget('goog.aaa.bbb', """
+ /**
+ * Comment targeting goog.aaa.bbb.
+ */
+ (goog.aaa.bbb)
+ """)
+
+ self.assertCommentTarget('theTarget', """
+ /**
+ * Comment targeting symbol preceded by newlines, whitespace,
+ * and parens -- things we ignore.
+ */
+ (theTarget)
+ """)
+
+ self.assertCommentTarget(None, """
+ /**
+ * @fileoverview File overview.
+ */
+ (notATarget)
+ """)
+
+ self.assertCommentTarget(None, """
+ /**
+ * Comment that doesn't find a target.
+ */
+ """)
+
+ self.assertCommentTarget('theTarget.is.split.across.lines', """
+ /**
+ * Comment that addresses a symbol split across lines.
+ */
+ (theTarget.is.split
+ .across.lines)
+ """)
+
+ self.assertCommentTarget('theTarget.is.split.across.lines', """
+ /**
+ * Comment that addresses a symbol split across lines.
+ */
+ (theTarget.is.split.
+ across.lines)
+ """)
+
+ def _ParseComment(self, script):
+ """Parse a script that contains one comment and return it."""
+ _, comments = testutil.ParseFunctionsAndComments(script)
+ self.assertEquals(1, len(comments))
+ return comments[0]
+
+ def assertCommentTarget(self, target, script):
+ comment = self._ParseComment(script)
+ self.assertEquals(target, comment.GetTargetIdentifier())
+
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/javascripttokenizer.py b/tools/closure_linter/build/lib/closure_linter/javascripttokenizer.py
new file mode 100644
index 0000000000..2ee5b81ee1
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/javascripttokenizer.py
@@ -0,0 +1,463 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Regular expression based JavaScript parsing classes."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+import copy
+import re
+
+from closure_linter import javascripttokens
+from closure_linter.common import matcher
+from closure_linter.common import tokenizer
+
+# Shorthand
+Type = javascripttokens.JavaScriptTokenType
+Matcher = matcher.Matcher
+
+
+class JavaScriptModes(object):
+ """Enumeration of the different matcher modes used for JavaScript."""
+ TEXT_MODE = 'text'
+ SINGLE_QUOTE_STRING_MODE = 'single_quote_string'
+ DOUBLE_QUOTE_STRING_MODE = 'double_quote_string'
+ BLOCK_COMMENT_MODE = 'block_comment'
+ DOC_COMMENT_MODE = 'doc_comment'
+ DOC_COMMENT_LEX_SPACES_MODE = 'doc_comment_spaces'
+ LINE_COMMENT_MODE = 'line_comment'
+ PARAMETER_MODE = 'parameter'
+ FUNCTION_MODE = 'function'
+
+
+class JavaScriptTokenizer(tokenizer.Tokenizer):
+ """JavaScript tokenizer.
+
+ Convert JavaScript code in to an array of tokens.
+ """
+
+ # Useful patterns for JavaScript parsing.
+ IDENTIFIER_CHAR = r'A-Za-z0-9_$'
+
+ # Number patterns based on:
+ # http://www.mozilla.org/js/language/js20-2000-07/formal/lexer-grammar.html
+ MANTISSA = r"""
+ (\d+(?!\.)) | # Matches '10'
+ (\d+\.(?!\d)) | # Matches '10.'
+ (\d*\.\d+) # Matches '.5' or '10.5'
+ """
+ DECIMAL_LITERAL = r'(%s)([eE][-+]?\d+)?' % MANTISSA
+ HEX_LITERAL = r'0[xX][0-9a-fA-F]+'
+ NUMBER = re.compile(r"""
+ ((%s)|(%s))
+ """ % (HEX_LITERAL, DECIMAL_LITERAL), re.VERBOSE)
+
+ # Strings come in three parts - first we match the start of the string, then
+ # the contents, then the end. The contents consist of any character except a
+ # backslash or end of string, or a backslash followed by any character, or a
+ # backslash followed by end of line to support correct parsing of multi-line
+ # strings.
+ SINGLE_QUOTE = re.compile(r"'")
+ SINGLE_QUOTE_TEXT = re.compile(r"([^'\\]|\\(.|$))+")
+ DOUBLE_QUOTE = re.compile(r'"')
+ DOUBLE_QUOTE_TEXT = re.compile(r'([^"\\]|\\(.|$))+')
+
+ START_SINGLE_LINE_COMMENT = re.compile(r'//')
+ END_OF_LINE_SINGLE_LINE_COMMENT = re.compile(r'//$')
+
+ START_DOC_COMMENT = re.compile(r'/\*\*')
+ START_BLOCK_COMMENT = re.compile(r'/\*')
+ END_BLOCK_COMMENT = re.compile(r'\*/')
+ BLOCK_COMMENT_TEXT = re.compile(r'([^*]|\*(?!/))+')
+
+ # Comment text is anything that we are not going to parse into another special
+ # token like (inline) flags or end comments. Complicated regex to match
+ # most normal characters, and '*', '{', '}', and '@' when we are sure that
+ # it is safe. Expression [^*{\s]@ must come first, or the other options will
+ # match everything before @, and we won't match @'s that aren't part of flags
+ # like in email addresses in the @author tag.
+ DOC_COMMENT_TEXT = re.compile(r'([^*{}\s]@|[^*{}@]|\*(?!/))+')
+ DOC_COMMENT_NO_SPACES_TEXT = re.compile(r'([^*{}\s]@|[^*{}@\s]|\*(?!/))+')
+ # Match anything that is allowed in a type definition, except for tokens
+ # needed to parse it (and the lookahead assertion for "*/").
+ DOC_COMMENT_TYPE_TEXT = re.compile(r'([^*|!?=<>(){}:,\s]|\*(?!/))+')
+
+ # Match the prefix ' * ' that starts every line of jsdoc. Want to include
+ # spaces after the '*', but nothing else that occurs after a '*', and don't
+ # want to match the '*' in '*/'.
+ DOC_PREFIX = re.compile(r'\s*\*(\s+|(?!/))')
+
+ START_BLOCK = re.compile('{')
+ END_BLOCK = re.compile('}')
+
+ REGEX_CHARACTER_CLASS = r"""
+ \[ # Opening bracket
+ ([^\]\\]|\\.)* # Anything but a ] or \,
+ # or a backslash followed by anything
+ \] # Closing bracket
+ """
+ # We ensure the regex is followed by one of the above tokens to avoid
+ # incorrectly parsing something like x / y / z as x REGEX(/ y /) z
+ POST_REGEX_LIST = [
+ ';', ',', r'\.', r'\)', r'\]', '$', r'\/\/', r'\/\*', ':', '}']
+
+ REGEX = re.compile(r"""
+ / # opening slash
+ (?!\*) # not the start of a comment
+ (\\.|[^\[\/\\]|(%s))* # a backslash followed by anything,
+ # or anything but a / or [ or \,
+ # or a character class
+ / # closing slash
+ [gimsx]* # optional modifiers
+ (?=\s*(%s))
+ """ % (REGEX_CHARACTER_CLASS, '|'.join(POST_REGEX_LIST)),
+ re.VERBOSE)
+
+ ANYTHING = re.compile(r'.*')
+ PARAMETERS = re.compile(r'[^\)]+')
+ CLOSING_PAREN_WITH_SPACE = re.compile(r'\)\s*')
+
+ FUNCTION_DECLARATION = re.compile(r'\bfunction\b')
+
+ OPENING_PAREN = re.compile(r'\(')
+ CLOSING_PAREN = re.compile(r'\)')
+
+ OPENING_BRACKET = re.compile(r'\[')
+ CLOSING_BRACKET = re.compile(r'\]')
+
+ # We omit these JS keywords from the list:
+ # function - covered by FUNCTION_DECLARATION.
+ # delete, in, instanceof, new, typeof - included as operators.
+ # this - included in identifiers.
+ # null, undefined - not included, should go in some "special constant" list.
+ KEYWORD_LIST = [
+ 'break',
+ 'case',
+ 'catch',
+ 'continue',
+ 'default',
+ 'do',
+ 'else',
+ 'finally',
+ 'for',
+ 'if',
+ 'return',
+ 'switch',
+ 'throw',
+ 'try',
+ 'var',
+ 'while',
+ 'with',
+ ]
+
+ # List of regular expressions to match as operators. Some notes: for our
+ # purposes, the comma behaves similarly enough to a normal operator that we
+ # include it here. r'\bin\b' actually matches 'in' surrounded by boundary
+ # characters - this may not match some very esoteric uses of the in operator.
+ # Operators that are subsets of larger operators must come later in this list
+ # for proper matching, e.g., '>>' must come AFTER '>>>'.
+ OPERATOR_LIST = [
+ ',',
+ r'\+\+',
+ '===',
+ '!==',
+ '>>>=',
+ '>>>',
+ '==',
+ '>=',
+ '<=',
+ '!=',
+ '<<=',
+ '>>=',
+ '<<',
+ '>>',
+ '=>',
+ '>',
+ '<',
+ r'\+=',
+ r'\+',
+ '--',
+ r'\^=',
+ '-=',
+ '-',
+ '/=',
+ '/',
+ r'\*=',
+ r'\*',
+ '%=',
+ '%',
+ '&&',
+ r'\|\|',
+ '&=',
+ '&',
+ r'\|=',
+ r'\|',
+ '=',
+ '!',
+ ':',
+ r'\?',
+ r'\^',
+ r'\bdelete\b',
+ r'\bin\b',
+ r'\binstanceof\b',
+ r'\bnew\b',
+ r'\btypeof\b',
+ r'\bvoid\b',
+ r'\.',
+ ]
+ OPERATOR = re.compile('|'.join(OPERATOR_LIST))
+
+ WHITESPACE = re.compile(r'\s+')
+ SEMICOLON = re.compile(r';')
+ # Technically JavaScript identifiers can't contain '.', but we treat a set of
+ # nested identifiers as a single identifier, except for trailing dots.
+ NESTED_IDENTIFIER = r'[a-zA-Z_$]([%s]|\.[a-zA-Z_$])*' % IDENTIFIER_CHAR
+ IDENTIFIER = re.compile(NESTED_IDENTIFIER)
+
+ SIMPLE_LVALUE = re.compile(r"""
+ (?P<identifier>%s) # a valid identifier
+ (?=\s* # optional whitespace
+ \= # look ahead to equal sign
+ (?!=)) # not follwed by equal
+ """ % NESTED_IDENTIFIER, re.VERBOSE)
+
+ # A doc flag is a @ sign followed by non-space characters that appears at the
+ # beginning of the line, after whitespace, or after a '{'. The look-behind
+ # check is necessary to not match someone@google.com as a flag.
+ DOC_FLAG = re.compile(r'(^|(?<=\s))@(?P<name>[a-zA-Z]+)')
+ # To properly parse parameter names and complex doctypes containing
+ # whitespace, we need to tokenize whitespace into a token after certain
+ # doctags. All statetracker.HAS_TYPE that are not listed here must not contain
+ # any whitespace in their types.
+ DOC_FLAG_LEX_SPACES = re.compile(
+ r'(^|(?<=\s))@(?P<name>%s)\b' %
+ '|'.join([
+ 'const',
+ 'enum',
+ 'extends',
+ 'final',
+ 'implements',
+ 'param',
+ 'private',
+ 'protected',
+ 'public',
+ 'return',
+ 'type',
+ 'typedef'
+ ]))
+
+ DOC_INLINE_FLAG = re.compile(r'(?<={)@(?P<name>[a-zA-Z]+)')
+
+ DOC_TYPE_BLOCK_START = re.compile(r'[<(]')
+ DOC_TYPE_BLOCK_END = re.compile(r'[>)]')
+ DOC_TYPE_MODIFIERS = re.compile(r'[!?|,:=]')
+
+ # Star followed by non-slash, i.e a star that does not end a comment.
+ # This is used for TYPE_GROUP below.
+ SAFE_STAR = r'(\*(?!/))'
+
+ COMMON_DOC_MATCHERS = [
+ # Find the end of the comment.
+ Matcher(END_BLOCK_COMMENT, Type.END_DOC_COMMENT,
+ JavaScriptModes.TEXT_MODE),
+
+ # Tokenize documented flags like @private.
+ Matcher(DOC_INLINE_FLAG, Type.DOC_INLINE_FLAG),
+ Matcher(DOC_FLAG_LEX_SPACES, Type.DOC_FLAG,
+ JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE),
+
+ # Encountering a doc flag should leave lex spaces mode.
+ Matcher(DOC_FLAG, Type.DOC_FLAG, JavaScriptModes.DOC_COMMENT_MODE),
+
+ # Tokenize braces so we can find types.
+ Matcher(START_BLOCK, Type.DOC_START_BRACE),
+ Matcher(END_BLOCK, Type.DOC_END_BRACE),
+
+ # And some more to parse types.
+ Matcher(DOC_TYPE_BLOCK_START, Type.DOC_TYPE_START_BLOCK),
+ Matcher(DOC_TYPE_BLOCK_END, Type.DOC_TYPE_END_BLOCK),
+
+ Matcher(DOC_TYPE_MODIFIERS, Type.DOC_TYPE_MODIFIER),
+ Matcher(DOC_COMMENT_TYPE_TEXT, Type.COMMENT),
+
+ Matcher(DOC_PREFIX, Type.DOC_PREFIX, None, True)]
+
+ # When text is not matched, it is given this default type based on mode.
+ # If unspecified in this map, the default default is Type.NORMAL.
+ JAVASCRIPT_DEFAULT_TYPES = {
+ JavaScriptModes.DOC_COMMENT_MODE: Type.COMMENT,
+ JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: Type.COMMENT
+ }
+
+ @classmethod
+ def BuildMatchers(cls):
+ """Builds the token matcher group.
+
+ The token matcher groups work as follows: it is a list of Matcher objects.
+ The matchers will be tried in this order, and the first to match will be
+ returned. Hence the order is important because the matchers that come first
+ overrule the matchers that come later.
+
+ Returns:
+ The completed token matcher group.
+ """
+ # Match a keyword string followed by a non-identifier character in order to
+ # not match something like doSomething as do + Something.
+ keyword = re.compile('(%s)((?=[^%s])|$)' % (
+ '|'.join(cls.KEYWORD_LIST), cls.IDENTIFIER_CHAR))
+ return {
+
+ # Matchers for basic text mode.
+ JavaScriptModes.TEXT_MODE: [
+ # Check a big group - strings, starting comments, and regexes - all
+ # of which could be intertwined. 'string with /regex/',
+ # /regex with 'string'/, /* comment with /regex/ and string */ (and
+ # so on)
+ Matcher(cls.START_DOC_COMMENT, Type.START_DOC_COMMENT,
+ JavaScriptModes.DOC_COMMENT_MODE),
+ Matcher(cls.START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT,
+ JavaScriptModes.BLOCK_COMMENT_MODE),
+ Matcher(cls.END_OF_LINE_SINGLE_LINE_COMMENT,
+ Type.START_SINGLE_LINE_COMMENT),
+ Matcher(cls.START_SINGLE_LINE_COMMENT,
+ Type.START_SINGLE_LINE_COMMENT,
+ JavaScriptModes.LINE_COMMENT_MODE),
+ Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START,
+ JavaScriptModes.SINGLE_QUOTE_STRING_MODE),
+ Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START,
+ JavaScriptModes.DOUBLE_QUOTE_STRING_MODE),
+ Matcher(cls.REGEX, Type.REGEX),
+
+ # Next we check for start blocks appearing outside any of the items
+ # above.
+ Matcher(cls.START_BLOCK, Type.START_BLOCK),
+ Matcher(cls.END_BLOCK, Type.END_BLOCK),
+
+ # Then we search for function declarations.
+ Matcher(cls.FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION,
+ JavaScriptModes.FUNCTION_MODE),
+
+ # Next, we convert non-function related parens to tokens.
+ Matcher(cls.OPENING_PAREN, Type.START_PAREN),
+ Matcher(cls.CLOSING_PAREN, Type.END_PAREN),
+
+ # Next, we convert brackets to tokens.
+ Matcher(cls.OPENING_BRACKET, Type.START_BRACKET),
+ Matcher(cls.CLOSING_BRACKET, Type.END_BRACKET),
+
+ # Find numbers. This has to happen before operators because
+ # scientific notation numbers can have + and - in them.
+ Matcher(cls.NUMBER, Type.NUMBER),
+
+ # Find operators and simple assignments
+ Matcher(cls.SIMPLE_LVALUE, Type.SIMPLE_LVALUE),
+ Matcher(cls.OPERATOR, Type.OPERATOR),
+
+ # Find key words and whitespace.
+ Matcher(keyword, Type.KEYWORD),
+ Matcher(cls.WHITESPACE, Type.WHITESPACE),
+
+ # Find identifiers.
+ Matcher(cls.IDENTIFIER, Type.IDENTIFIER),
+
+ # Finally, we convert semicolons to tokens.
+ Matcher(cls.SEMICOLON, Type.SEMICOLON)],
+
+ # Matchers for single quote strings.
+ JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [
+ Matcher(cls.SINGLE_QUOTE_TEXT, Type.STRING_TEXT),
+ Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END,
+ JavaScriptModes.TEXT_MODE)],
+
+ # Matchers for double quote strings.
+ JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [
+ Matcher(cls.DOUBLE_QUOTE_TEXT, Type.STRING_TEXT),
+ Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END,
+ JavaScriptModes.TEXT_MODE)],
+
+ # Matchers for block comments.
+ JavaScriptModes.BLOCK_COMMENT_MODE: [
+ # First we check for exiting a block comment.
+ Matcher(cls.END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT,
+ JavaScriptModes.TEXT_MODE),
+
+ # Match non-comment-ending text..
+ Matcher(cls.BLOCK_COMMENT_TEXT, Type.COMMENT)],
+
+ # Matchers for doc comments.
+ JavaScriptModes.DOC_COMMENT_MODE: cls.COMMON_DOC_MATCHERS + [
+ Matcher(cls.DOC_COMMENT_TEXT, Type.COMMENT)],
+
+ JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: cls.COMMON_DOC_MATCHERS + [
+ Matcher(cls.WHITESPACE, Type.COMMENT),
+ Matcher(cls.DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)],
+
+ # Matchers for single line comments.
+ JavaScriptModes.LINE_COMMENT_MODE: [
+ # We greedy match until the end of the line in line comment mode.
+ Matcher(cls.ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)],
+
+ # Matchers for code after the function keyword.
+ JavaScriptModes.FUNCTION_MODE: [
+ # Must match open paren before anything else and move into parameter
+ # mode, otherwise everything inside the parameter list is parsed
+ # incorrectly.
+ Matcher(cls.OPENING_PAREN, Type.START_PARAMETERS,
+ JavaScriptModes.PARAMETER_MODE),
+ Matcher(cls.WHITESPACE, Type.WHITESPACE),
+ Matcher(cls.IDENTIFIER, Type.FUNCTION_NAME)],
+
+ # Matchers for function parameters
+ JavaScriptModes.PARAMETER_MODE: [
+ # When in function parameter mode, a closing paren is treated
+ # specially. Everything else is treated as lines of parameters.
+ Matcher(cls.CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS,
+ JavaScriptModes.TEXT_MODE),
+ Matcher(cls.PARAMETERS, Type.PARAMETERS,
+ JavaScriptModes.PARAMETER_MODE)]}
+
+ def __init__(self, parse_js_doc=True):
+ """Create a tokenizer object.
+
+ Args:
+ parse_js_doc: Whether to do detailed parsing of javascript doc comments,
+ or simply treat them as normal comments. Defaults to parsing JsDoc.
+ """
+ matchers = self.BuildMatchers()
+ if not parse_js_doc:
+ # Make a copy so the original doesn't get modified.
+ matchers = copy.deepcopy(matchers)
+ matchers[JavaScriptModes.DOC_COMMENT_MODE] = matchers[
+ JavaScriptModes.BLOCK_COMMENT_MODE]
+
+ tokenizer.Tokenizer.__init__(self, JavaScriptModes.TEXT_MODE, matchers,
+ self.JAVASCRIPT_DEFAULT_TYPES)
+
+ def _CreateToken(self, string, token_type, line, line_number, values=None):
+ """Creates a new JavaScriptToken object.
+
+ Args:
+ string: The string of input the token contains.
+ token_type: The type of token.
+ line: The text of the line this token is in.
+ line_number: The line number of the token.
+ values: A dict of named values within the token. For instance, a
+ function declaration may have a value called 'name' which captures the
+ name of the function.
+ """
+ return javascripttokens.JavaScriptToken(string, token_type, line,
+ line_number, values, line_number)
diff --git a/tools/closure_linter/build/lib/closure_linter/javascripttokens.py b/tools/closure_linter/build/lib/closure_linter/javascripttokens.py
new file mode 100644
index 0000000000..f5815d2bf8
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/javascripttokens.py
@@ -0,0 +1,153 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Classes to represent JavaScript tokens."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+from closure_linter.common import tokens
+
+class JavaScriptTokenType(tokens.TokenType):
+ """Enumeration of JavaScript token types, and useful sets of token types."""
+ NUMBER = 'number'
+ START_SINGLE_LINE_COMMENT = '//'
+ START_BLOCK_COMMENT = '/*'
+ START_DOC_COMMENT = '/**'
+ END_BLOCK_COMMENT = '*/'
+ END_DOC_COMMENT = 'doc */'
+ COMMENT = 'comment'
+ SINGLE_QUOTE_STRING_START = "'string"
+ SINGLE_QUOTE_STRING_END = "string'"
+ DOUBLE_QUOTE_STRING_START = '"string'
+ DOUBLE_QUOTE_STRING_END = 'string"'
+ STRING_TEXT = 'string'
+ START_BLOCK = '{'
+ END_BLOCK = '}'
+ START_PAREN = '('
+ END_PAREN = ')'
+ START_BRACKET = '['
+ END_BRACKET = ']'
+ REGEX = '/regex/'
+ FUNCTION_DECLARATION = 'function(...)'
+ FUNCTION_NAME = 'function functionName(...)'
+ START_PARAMETERS = 'startparams('
+ PARAMETERS = 'pa,ra,ms'
+ END_PARAMETERS = ')endparams'
+ SEMICOLON = ';'
+ DOC_FLAG = '@flag'
+ DOC_INLINE_FLAG = '{@flag ...}'
+ DOC_START_BRACE = 'doc {'
+ DOC_END_BRACE = 'doc }'
+ DOC_PREFIX = 'comment prefix: * '
+ DOC_TYPE_START_BLOCK = 'Type <'
+ DOC_TYPE_END_BLOCK = 'Type >'
+ DOC_TYPE_MODIFIER = 'modifier'
+ SIMPLE_LVALUE = 'lvalue='
+ KEYWORD = 'keyword'
+ OPERATOR = 'operator'
+ IDENTIFIER = 'identifier'
+
+ STRING_TYPES = frozenset([
+ SINGLE_QUOTE_STRING_START, SINGLE_QUOTE_STRING_END,
+ DOUBLE_QUOTE_STRING_START, DOUBLE_QUOTE_STRING_END, STRING_TEXT])
+
+ COMMENT_TYPES = frozenset([
+ START_SINGLE_LINE_COMMENT, COMMENT,
+ START_BLOCK_COMMENT, START_DOC_COMMENT,
+ END_BLOCK_COMMENT, END_DOC_COMMENT,
+ DOC_START_BRACE, DOC_END_BRACE,
+ DOC_FLAG, DOC_INLINE_FLAG, DOC_PREFIX,
+ DOC_TYPE_START_BLOCK, DOC_TYPE_END_BLOCK, DOC_TYPE_MODIFIER])
+
+ FLAG_DESCRIPTION_TYPES = frozenset([
+ DOC_INLINE_FLAG, COMMENT, DOC_START_BRACE, DOC_END_BRACE,
+ DOC_TYPE_START_BLOCK, DOC_TYPE_END_BLOCK, DOC_TYPE_MODIFIER])
+
+ FLAG_ENDING_TYPES = frozenset([DOC_FLAG, END_DOC_COMMENT])
+
+ NON_CODE_TYPES = COMMENT_TYPES | frozenset([
+ tokens.TokenType.WHITESPACE, tokens.TokenType.BLANK_LINE])
+
+ UNARY_OPERATORS = ['!', 'new', 'delete', 'typeof', 'void']
+
+ UNARY_OK_OPERATORS = ['--', '++', '-', '+'] + UNARY_OPERATORS
+
+ UNARY_POST_OPERATORS = ['--', '++']
+
+ # An expression ender is any token that can end an object - i.e. we could have
+ # x.y or [1, 2], or (10 + 9) or {a: 10}.
+ EXPRESSION_ENDER_TYPES = [tokens.TokenType.NORMAL, IDENTIFIER, NUMBER,
+ SIMPLE_LVALUE, END_BRACKET, END_PAREN, END_BLOCK,
+ SINGLE_QUOTE_STRING_END, DOUBLE_QUOTE_STRING_END]
+
+
+class JavaScriptToken(tokens.Token):
+ """JavaScript token subclass of Token, provides extra instance checks.
+
+ The following token types have data in attached_object:
+ - All JsDoc flags: a parser.JsDocFlag object.
+ """
+
+ def IsKeyword(self, keyword):
+ """Tests if this token is the given keyword.
+
+ Args:
+ keyword: The keyword to compare to.
+
+ Returns:
+ True if this token is a keyword token with the given name.
+ """
+ return self.type == JavaScriptTokenType.KEYWORD and self.string == keyword
+
+ def IsOperator(self, operator):
+ """Tests if this token is the given operator.
+
+ Args:
+ operator: The operator to compare to.
+
+ Returns:
+ True if this token is a operator token with the given name.
+ """
+ return self.type == JavaScriptTokenType.OPERATOR and self.string == operator
+
+ def IsAssignment(self):
+ """Tests if this token is an assignment operator.
+
+ Returns:
+ True if this token is an assignment operator.
+ """
+ return (self.type == JavaScriptTokenType.OPERATOR and
+ self.string.endswith('=') and
+ self.string not in ('==', '!=', '>=', '<=', '===', '!=='))
+
+ def IsComment(self):
+ """Tests if this token is any part of a comment.
+
+ Returns:
+ True if this token is any part of a comment.
+ """
+ return self.type in JavaScriptTokenType.COMMENT_TYPES
+
+ def IsCode(self):
+ """Tests if this token is code, as opposed to a comment or whitespace."""
+ return self.type not in JavaScriptTokenType.NON_CODE_TYPES
+
+ def __repr__(self):
+ return '<JavaScriptToken: %d, %s, "%s", %r, %r>' % (self.line_number,
+ self.type, self.string,
+ self.values,
+ self.metadata)
diff --git a/tools/closure_linter/build/lib/closure_linter/not_strict_test.py b/tools/closure_linter/build/lib/closure_linter/not_strict_test.py
new file mode 100644
index 0000000000..c92c13ee03
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/not_strict_test.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for gjslint --nostrict.
+
+Tests errors that can be thrown by gjslint when not in strict mode.
+"""
+
+
+
+import os
+import sys
+import unittest
+
+import gflags as flags
+import unittest as googletest
+
+from closure_linter import errors
+from closure_linter import runner
+from closure_linter.common import filetestcase
+
+_RESOURCE_PREFIX = 'closure_linter/testdata'
+
+flags.FLAGS.strict = False
+flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
+flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
+flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
+ 'limited_doc_checks.js')
+
+
+# List of files under testdata to test.
+# We need to list files explicitly since pyglib can't list directories.
+_TEST_FILES = [
+ 'not_strict.js'
+ ]
+
+
+class GJsLintTestSuite(unittest.TestSuite):
+ """Test suite to run a GJsLintTest for each of several files.
+
+ If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
+ testdata to test. Otherwise, _TEST_FILES is used.
+ """
+
+ def __init__(self, tests=()):
+ unittest.TestSuite.__init__(self, tests)
+
+ argv = sys.argv and sys.argv[1:] or []
+ if argv:
+ test_files = argv
+ else:
+ test_files = _TEST_FILES
+ for test_file in test_files:
+ resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
+ self.addTest(filetestcase.AnnotatedFileTestCase(resource_path,
+ runner.Run,
+ errors.ByName))
+
+if __name__ == '__main__':
+ # Don't let main parse args; it happens in the TestSuite.
+ googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')
diff --git a/tools/closure_linter/build/lib/closure_linter/requireprovidesorter.py b/tools/closure_linter/build/lib/closure_linter/requireprovidesorter.py
new file mode 100644
index 0000000000..e7e08a13c2
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/requireprovidesorter.py
@@ -0,0 +1,329 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Contains logic for sorting goog.provide and goog.require statements.
+
+Closurized JavaScript files use goog.provide and goog.require statements at the
+top of the file to manage dependencies. These statements should be sorted
+alphabetically, however, it is common for them to be accompanied by inline
+comments or suppression annotations. In order to sort these statements without
+disrupting their comments and annotations, the association between statements
+and comments/annotations must be maintained while sorting.
+
+ RequireProvideSorter: Handles checking/fixing of provide/require statements.
+"""
+
+
+
+from closure_linter import javascripttokens
+from closure_linter import tokenutil
+
+# Shorthand
+Type = javascripttokens.JavaScriptTokenType
+
+
+class RequireProvideSorter(object):
+ """Checks for and fixes alphabetization of provide and require statements.
+
+ When alphabetizing, comments on the same line or comments directly above a
+ goog.provide or goog.require statement are associated with that statement and
+ stay with the statement as it gets sorted.
+ """
+
+ def CheckProvides(self, token):
+ """Checks alphabetization of goog.provide statements.
+
+ Iterates over tokens in given token stream, identifies goog.provide tokens,
+ and checks that they occur in alphabetical order by the object being
+ provided.
+
+ Args:
+ token: A token in the token stream before any goog.provide tokens.
+
+ Returns:
+ The first provide token in the token stream.
+
+ None is returned if all goog.provide statements are already sorted.
+ """
+ provide_tokens = self._GetRequireOrProvideTokens(token, 'goog.provide')
+ provide_strings = self._GetRequireOrProvideTokenStrings(provide_tokens)
+ sorted_provide_strings = sorted(provide_strings)
+ if provide_strings != sorted_provide_strings:
+ return provide_tokens[0]
+ return None
+
+ def CheckRequires(self, token):
+ """Checks alphabetization of goog.require statements.
+
+ Iterates over tokens in given token stream, identifies goog.require tokens,
+ and checks that they occur in alphabetical order by the dependency being
+ required.
+
+ Args:
+ token: A token in the token stream before any goog.require tokens.
+
+ Returns:
+ The first require token in the token stream.
+
+ None is returned if all goog.require statements are already sorted.
+ """
+ require_tokens = self._GetRequireOrProvideTokens(token, 'goog.require')
+ require_strings = self._GetRequireOrProvideTokenStrings(require_tokens)
+ sorted_require_strings = sorted(require_strings)
+ if require_strings != sorted_require_strings:
+ return require_tokens[0]
+ return None
+
+ def FixProvides(self, token):
+ """Sorts goog.provide statements in the given token stream alphabetically.
+
+ Args:
+ token: The first token in the token stream.
+ """
+ self._FixProvidesOrRequires(
+ self._GetRequireOrProvideTokens(token, 'goog.provide'))
+
+ def FixRequires(self, token):
+ """Sorts goog.require statements in the given token stream alphabetically.
+
+ Args:
+ token: The first token in the token stream.
+ """
+ self._FixProvidesOrRequires(
+ self._GetRequireOrProvideTokens(token, 'goog.require'))
+
+ def _FixProvidesOrRequires(self, tokens):
+ """Sorts goog.provide or goog.require statements.
+
+ Args:
+ tokens: A list of goog.provide or goog.require tokens in the order they
+ appear in the token stream. i.e. the first token in this list must
+ be the first goog.provide or goog.require token.
+ """
+ strings = self._GetRequireOrProvideTokenStrings(tokens)
+ sorted_strings = sorted(strings)
+
+ # Make a separate pass to remove any blank lines between goog.require/
+ # goog.provide tokens.
+ first_token = tokens[0]
+ last_token = tokens[-1]
+ i = last_token
+ while i != first_token and i is not None:
+ if i.type is Type.BLANK_LINE:
+ tokenutil.DeleteToken(i)
+ i = i.previous
+
+ # A map from required/provided object name to tokens that make up the line
+ # it was on, including any comments immediately before it or after it on the
+ # same line.
+ tokens_map = self._GetTokensMap(tokens)
+
+ # Iterate over the map removing all tokens.
+ for name in tokens_map:
+ tokens_to_delete = tokens_map[name]
+ for i in tokens_to_delete:
+ tokenutil.DeleteToken(i)
+
+ # Save token to rest of file. Sorted token will be inserted before this.
+ rest_of_file = tokens_map[strings[-1]][-1].next
+
+ # Re-add all tokens in the map in alphabetical order.
+ insert_after = tokens[0].previous
+ for string in sorted_strings:
+ for i in tokens_map[string]:
+ if rest_of_file:
+ tokenutil.InsertTokenBefore(i, rest_of_file)
+ else:
+ tokenutil.InsertTokenAfter(i, insert_after)
+ insert_after = i
+
+ def _GetRequireOrProvideTokens(self, token, token_string):
+ """Gets all goog.provide or goog.require tokens in the given token stream.
+
+ Args:
+ token: The first token in the token stream.
+ token_string: One of 'goog.provide' or 'goog.require' to indicate which
+ tokens to find.
+
+ Returns:
+ A list of goog.provide or goog.require tokens in the order they appear in
+ the token stream.
+ """
+ tokens = []
+ while token:
+ if token.type == Type.IDENTIFIER:
+ if token.string == token_string:
+ tokens.append(token)
+ elif token.string not in [
+ 'goog.provide', 'goog.require', 'goog.setTestOnly']:
+ # These 3 identifiers are at the top of the file. So if any other
+ # identifier is encountered, return.
+ # TODO(user): Once it's decided what ordering goog.require
+ # should use, add 'goog.module' to the list above and implement the
+ # decision.
+ break
+ token = token.next
+
+ return tokens
+
+ def _GetRequireOrProvideTokenStrings(self, tokens):
+ """Gets a list of strings corresponding to the given list of tokens.
+
+ The string will be the next string in the token stream after each token in
+ tokens. This is used to find the object being provided/required by a given
+ goog.provide or goog.require token.
+
+ Args:
+ tokens: A list of goog.provide or goog.require tokens.
+
+ Returns:
+ A list of object names that are being provided or required by the given
+ list of tokens. For example:
+
+ ['object.a', 'object.c', 'object.b']
+ """
+ token_strings = []
+ for token in tokens:
+ if not token.is_deleted:
+ name = tokenutil.GetStringAfterToken(token)
+ token_strings.append(name)
+ return token_strings
+
+ def _GetTokensMap(self, tokens):
+ """Gets a map from object name to tokens associated with that object.
+
+ Starting from the goog.provide/goog.require token, searches backwards in the
+ token stream for any lines that start with a comment. These lines are
+ associated with the goog.provide/goog.require token. Also associates any
+ tokens on the same line as the goog.provide/goog.require token with that
+ token.
+
+ Args:
+ tokens: A list of goog.provide or goog.require tokens.
+
+ Returns:
+ A dictionary that maps object names to the tokens associated with the
+ goog.provide or goog.require of that object name. For example:
+
+ {
+ 'object.a': [JavaScriptToken, JavaScriptToken, ...],
+ 'object.b': [...]
+ }
+
+ The list of tokens includes any comment lines above the goog.provide or
+ goog.require statement and everything after the statement on the same
+ line. For example, all of the following would be associated with
+ 'object.a':
+
+ /** @suppress {extraRequire} */
+ goog.require('object.a'); // Some comment.
+ """
+ tokens_map = {}
+ for token in tokens:
+ object_name = tokenutil.GetStringAfterToken(token)
+ # If the previous line starts with a comment, presume that the comment
+ # relates to the goog.require or goog.provide and keep them together when
+ # sorting.
+ first_token = token
+ previous_first_token = tokenutil.GetFirstTokenInPreviousLine(first_token)
+ while (previous_first_token and
+ previous_first_token.IsAnyType(Type.COMMENT_TYPES)):
+ first_token = previous_first_token
+ previous_first_token = tokenutil.GetFirstTokenInPreviousLine(
+ first_token)
+
+ # Find the last token on the line.
+ last_token = tokenutil.GetLastTokenInSameLine(token)
+
+ all_tokens = self._GetTokenList(first_token, last_token)
+ tokens_map[object_name] = all_tokens
+ return tokens_map
+
+ def _GetTokenList(self, first_token, last_token):
+ """Gets a list of all tokens from first_token to last_token, inclusive.
+
+ Args:
+ first_token: The first token to get.
+ last_token: The last token to get.
+
+ Returns:
+ A list of all tokens between first_token and last_token, including both
+ first_token and last_token.
+
+ Raises:
+ Exception: If the token stream ends before last_token is reached.
+ """
+ token_list = []
+ token = first_token
+ while token != last_token:
+ if not token:
+ raise Exception('ran out of tokens')
+ token_list.append(token)
+ token = token.next
+ token_list.append(last_token)
+
+ return token_list
+
+ def GetFixedRequireString(self, token):
+ """Get fixed/sorted order of goog.require statements.
+
+ Args:
+ token: The first token in the token stream.
+
+ Returns:
+ A string for correct sorted order of goog.require.
+ """
+ return self._GetFixedRequireOrProvideString(
+ self._GetRequireOrProvideTokens(token, 'goog.require'))
+
+ def GetFixedProvideString(self, token):
+ """Get fixed/sorted order of goog.provide statements.
+
+ Args:
+ token: The first token in the token stream.
+
+ Returns:
+ A string for correct sorted order of goog.provide.
+ """
+ return self._GetFixedRequireOrProvideString(
+ self._GetRequireOrProvideTokens(token, 'goog.provide'))
+
+ def _GetFixedRequireOrProvideString(self, tokens):
+ """Sorts goog.provide or goog.require statements.
+
+ Args:
+ tokens: A list of goog.provide or goog.require tokens in the order they
+ appear in the token stream. i.e. the first token in this list must
+ be the first goog.provide or goog.require token.
+
+ Returns:
+ A string for sorted goog.require or goog.provide statements
+ """
+
+ # A map from required/provided object name to tokens that make up the line
+ # it was on, including any comments immediately before it or after it on the
+ # same line.
+ tokens_map = self._GetTokensMap(tokens)
+ sorted_strings = sorted(tokens_map.keys())
+
+ new_order = ''
+ for string in sorted_strings:
+ for i in tokens_map[string]:
+ new_order += i.string
+ if i.IsLastInLine():
+ new_order += '\n'
+
+ return new_order
diff --git a/tools/closure_linter/build/lib/closure_linter/requireprovidesorter_test.py b/tools/closure_linter/build/lib/closure_linter/requireprovidesorter_test.py
new file mode 100644
index 0000000000..fecb6d04da
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/requireprovidesorter_test.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for RequireProvideSorter."""
+
+
+
+import unittest as googletest
+from closure_linter import javascripttokens
+from closure_linter import requireprovidesorter
+from closure_linter import testutil
+
+# pylint: disable=g-bad-name
+TokenType = javascripttokens.JavaScriptTokenType
+
+
+class RequireProvideSorterTest(googletest.TestCase):
+ """Tests for RequireProvideSorter."""
+
+ def testGetFixedProvideString(self):
+ """Tests that fixed string constains proper comments also."""
+ input_lines = [
+ 'goog.provide(\'package.xyz\');',
+ '/** @suppress {extraprovide} **/',
+ 'goog.provide(\'package.abcd\');'
+ ]
+
+ expected_lines = [
+ '/** @suppress {extraprovide} **/',
+ 'goog.provide(\'package.abcd\');',
+ 'goog.provide(\'package.xyz\');'
+ ]
+
+ token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
+
+ sorter = requireprovidesorter.RequireProvideSorter()
+ fixed_provide_string = sorter.GetFixedProvideString(token)
+
+ self.assertEquals(expected_lines, fixed_provide_string.splitlines())
+
+ def testGetFixedRequireString(self):
+ """Tests that fixed string constains proper comments also."""
+ input_lines = [
+ 'goog.require(\'package.xyz\');',
+ '/** This is needed for scope. **/',
+ 'goog.require(\'package.abcd\');'
+ ]
+
+ expected_lines = [
+ '/** This is needed for scope. **/',
+ 'goog.require(\'package.abcd\');',
+ 'goog.require(\'package.xyz\');'
+ ]
+
+ token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
+
+ sorter = requireprovidesorter.RequireProvideSorter()
+ fixed_require_string = sorter.GetFixedRequireString(token)
+
+ self.assertEquals(expected_lines, fixed_require_string.splitlines())
+
+ def testFixRequires_removeBlankLines(self):
+ """Tests that blank lines are omitted in sorted goog.require statements."""
+ input_lines = [
+ 'goog.provide(\'package.subpackage.Whatever\');',
+ '',
+ 'goog.require(\'package.subpackage.ClassB\');',
+ '',
+ 'goog.require(\'package.subpackage.ClassA\');'
+ ]
+ expected_lines = [
+ 'goog.provide(\'package.subpackage.Whatever\');',
+ '',
+ 'goog.require(\'package.subpackage.ClassA\');',
+ 'goog.require(\'package.subpackage.ClassB\');'
+ ]
+ token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
+
+ sorter = requireprovidesorter.RequireProvideSorter()
+ sorter.FixRequires(token)
+
+ self.assertEquals(expected_lines, self._GetLines(token))
+
+ def fixRequiresTest_withTestOnly(self, position):
+ """Regression-tests sorting even with a goog.setTestOnly statement.
+
+ Args:
+ position: The position in the list where to insert the goog.setTestOnly
+ statement. Will be used to test all possible combinations for
+ this test.
+ """
+ input_lines = [
+ 'goog.provide(\'package.subpackage.Whatever\');',
+ '',
+ 'goog.require(\'package.subpackage.ClassB\');',
+ 'goog.require(\'package.subpackage.ClassA\');'
+ ]
+ expected_lines = [
+ 'goog.provide(\'package.subpackage.Whatever\');',
+ '',
+ 'goog.require(\'package.subpackage.ClassA\');',
+ 'goog.require(\'package.subpackage.ClassB\');'
+ ]
+ input_lines.insert(position, 'goog.setTestOnly();')
+ expected_lines.insert(position, 'goog.setTestOnly();')
+
+ token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
+
+ sorter = requireprovidesorter.RequireProvideSorter()
+ sorter.FixRequires(token)
+
+ self.assertEquals(expected_lines, self._GetLines(token))
+
+ def testFixRequires_withTestOnly(self):
+ """Regression-tests sorting even after a goog.setTestOnly statement."""
+
+ # goog.setTestOnly at first line.
+ self.fixRequiresTest_withTestOnly(position=0)
+
+ # goog.setTestOnly after goog.provide.
+ self.fixRequiresTest_withTestOnly(position=1)
+
+ # goog.setTestOnly before goog.require.
+ self.fixRequiresTest_withTestOnly(position=2)
+
+ # goog.setTestOnly after goog.require.
+ self.fixRequiresTest_withTestOnly(position=4)
+
+ def _GetLines(self, token):
+ """Returns an array of lines based on the specified token stream."""
+ lines = []
+ line = ''
+ while token:
+ line += token.string
+ if token.IsLastInLine():
+ lines.append(line)
+ line = ''
+ token = token.next
+ return lines
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/runner.py b/tools/closure_linter/build/lib/closure_linter/runner.py
new file mode 100644
index 0000000000..04e7fa4ac8
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/runner.py
@@ -0,0 +1,198 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Main lint function. Tokenizes file, runs passes, and feeds to checker."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = 'nnaze@google.com (Nathan Naze)'
+
+import traceback
+
+import gflags as flags
+
+from closure_linter import checker
+from closure_linter import ecmalintrules
+from closure_linter import ecmametadatapass
+from closure_linter import error_check
+from closure_linter import errors
+from closure_linter import javascriptstatetracker
+from closure_linter import javascripttokenizer
+
+from closure_linter.common import error
+from closure_linter.common import htmlutil
+from closure_linter.common import tokens
+
+flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'],
+ 'List of files with relaxed documentation checks. Will not '
+ 'report errors for missing documentation, some missing '
+ 'descriptions, or methods whose @return tags don\'t have a '
+ 'matching return statement.')
+flags.DEFINE_boolean('error_trace', False,
+ 'Whether to show error exceptions.')
+flags.ADOPT_module_key_flags(checker)
+flags.ADOPT_module_key_flags(ecmalintrules)
+flags.ADOPT_module_key_flags(error_check)
+
+
+def _GetLastNonWhiteSpaceToken(start_token):
+ """Get the last non-whitespace token in a token stream."""
+ ret_token = None
+
+ whitespace_tokens = frozenset([
+ tokens.TokenType.WHITESPACE, tokens.TokenType.BLANK_LINE])
+ for t in start_token:
+ if t.type not in whitespace_tokens:
+ ret_token = t
+
+ return ret_token
+
+
+def _IsHtml(filename):
+ return filename.endswith('.html') or filename.endswith('.htm')
+
+
+def _Tokenize(fileobj):
+ """Tokenize a file.
+
+ Args:
+ fileobj: file-like object (or iterable lines) with the source.
+
+ Returns:
+ The first token in the token stream and the ending mode of the tokenizer.
+ """
+ tokenizer = javascripttokenizer.JavaScriptTokenizer()
+ start_token = tokenizer.TokenizeFile(fileobj)
+ return start_token, tokenizer.mode
+
+
+def _IsLimitedDocCheck(filename, limited_doc_files):
+ """Whether this this a limited-doc file.
+
+ Args:
+ filename: The filename.
+ limited_doc_files: Iterable of strings. Suffixes of filenames that should
+ be limited doc check.
+
+ Returns:
+ Whether the file should be limited check.
+ """
+ for limited_doc_filename in limited_doc_files:
+ if filename.endswith(limited_doc_filename):
+ return True
+ return False
+
+
+def Run(filename, error_handler, source=None):
+ """Tokenize, run passes, and check the given file.
+
+ Args:
+ filename: The path of the file to check
+ error_handler: The error handler to report errors to.
+ source: A file-like object with the file source. If omitted, the file will
+ be read from the filename path.
+ """
+ if not source:
+ try:
+ source = open(filename)
+ except IOError:
+ error_handler.HandleFile(filename, None)
+ error_handler.HandleError(
+ error.Error(errors.FILE_NOT_FOUND, 'File not found'))
+ error_handler.FinishFile()
+ return
+
+ if _IsHtml(filename):
+ source_file = htmlutil.GetScriptLines(source)
+ else:
+ source_file = source
+
+ token, tokenizer_mode = _Tokenize(source_file)
+
+ error_handler.HandleFile(filename, token)
+
+ # If we did not end in the basic mode, this a failed parse.
+ if tokenizer_mode is not javascripttokenizer.JavaScriptModes.TEXT_MODE:
+ error_handler.HandleError(
+ error.Error(errors.FILE_IN_BLOCK,
+ 'File ended in mode "%s".' % tokenizer_mode,
+ _GetLastNonWhiteSpaceToken(token)))
+
+ # Run the ECMA pass
+ error_token = None
+
+ ecma_pass = ecmametadatapass.EcmaMetaDataPass()
+ error_token = RunMetaDataPass(token, ecma_pass, error_handler, filename)
+
+ is_limited_doc_check = (
+ _IsLimitedDocCheck(filename, flags.FLAGS.limited_doc_files))
+
+ _RunChecker(token, error_handler,
+ is_limited_doc_check,
+ is_html=_IsHtml(filename),
+ stop_token=error_token)
+
+ error_handler.FinishFile()
+
+
+def RunMetaDataPass(start_token, metadata_pass, error_handler, filename=''):
+ """Run a metadata pass over a token stream.
+
+ Args:
+ start_token: The first token in a token stream.
+ metadata_pass: Metadata pass to run.
+ error_handler: The error handler to report errors to.
+ filename: Filename of the source.
+
+ Returns:
+ The token where the error occurred (if any).
+ """
+
+ try:
+ metadata_pass.Process(start_token)
+ except ecmametadatapass.ParseError, parse_err:
+ if flags.FLAGS.error_trace:
+ traceback.print_exc()
+ error_token = parse_err.token
+ error_msg = str(parse_err)
+ error_handler.HandleError(
+ error.Error(errors.FILE_DOES_NOT_PARSE,
+ ('Error parsing file at token "%s". Unable to '
+ 'check the rest of file.'
+ '\nError "%s"' % (error_token, error_msg)), error_token))
+ return error_token
+ except Exception: # pylint: disable=broad-except
+ traceback.print_exc()
+ error_handler.HandleError(
+ error.Error(
+ errors.FILE_DOES_NOT_PARSE,
+ 'Internal error in %s' % filename))
+
+
+def _RunChecker(start_token, error_handler,
+ limited_doc_checks, is_html,
+ stop_token=None):
+
+ state_tracker = javascriptstatetracker.JavaScriptStateTracker()
+
+ style_checker = checker.JavaScriptStyleChecker(
+ state_tracker=state_tracker,
+ error_handler=error_handler)
+
+ style_checker.Check(start_token,
+ is_html=is_html,
+ limited_doc_checks=limited_doc_checks,
+ stop_token=stop_token)
diff --git a/tools/closure_linter/build/lib/closure_linter/runner_test.py b/tools/closure_linter/build/lib/closure_linter/runner_test.py
new file mode 100644
index 0000000000..da5857d309
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/runner_test.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the runner module."""
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+import StringIO
+
+
+import mox
+
+
+import unittest as googletest
+
+from closure_linter import errors
+from closure_linter import runner
+from closure_linter.common import error
+from closure_linter.common import errorhandler
+from closure_linter.common import tokens
+
+
+class LimitedDocTest(googletest.TestCase):
+
+ def testIsLimitedDocCheck(self):
+ self.assertTrue(runner._IsLimitedDocCheck('foo_test.js', ['_test.js']))
+ self.assertFalse(runner._IsLimitedDocCheck('foo_bar.js', ['_test.js']))
+
+ self.assertTrue(runner._IsLimitedDocCheck(
+ 'foo_moo.js', ['moo.js', 'quack.js']))
+ self.assertFalse(runner._IsLimitedDocCheck(
+ 'foo_moo.js', ['woof.js', 'quack.js']))
+
+
+class RunnerTest(googletest.TestCase):
+
+ def setUp(self):
+ self.mox = mox.Mox()
+
+ def testRunOnMissingFile(self):
+ mock_error_handler = self.mox.CreateMock(errorhandler.ErrorHandler)
+
+ def ValidateError(err):
+ return (isinstance(err, error.Error) and
+ err.code is errors.FILE_NOT_FOUND and
+ err.token is None)
+
+ mock_error_handler.HandleFile('does_not_exist.js', None)
+ mock_error_handler.HandleError(mox.Func(ValidateError))
+ mock_error_handler.FinishFile()
+
+ self.mox.ReplayAll()
+
+ runner.Run('does_not_exist.js', mock_error_handler)
+
+ self.mox.VerifyAll()
+
+ def testBadTokenization(self):
+ mock_error_handler = self.mox.CreateMock(errorhandler.ErrorHandler)
+
+ def ValidateError(err):
+ return (isinstance(err, error.Error) and
+ err.code is errors.FILE_IN_BLOCK and
+ err.token.string == '}')
+
+ mock_error_handler.HandleFile('foo.js', mox.IsA(tokens.Token))
+ mock_error_handler.HandleError(mox.Func(ValidateError))
+ mock_error_handler.HandleError(mox.IsA(error.Error))
+ mock_error_handler.FinishFile()
+
+ self.mox.ReplayAll()
+
+ source = StringIO.StringIO(_BAD_TOKENIZATION_SCRIPT)
+ runner.Run('foo.js', mock_error_handler, source)
+
+ self.mox.VerifyAll()
+
+
+_BAD_TOKENIZATION_SCRIPT = """
+function foo () {
+ var a = 3;
+ var b = 2;
+ return b + a; /* Comment not closed
+}
+"""
+
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/scopeutil.py b/tools/closure_linter/build/lib/closure_linter/scopeutil.py
new file mode 100644
index 0000000000..a7ca9b630a
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/scopeutil.py
@@ -0,0 +1,206 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tools to match goog.scope alias statements."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+import itertools
+
+from closure_linter import ecmametadatapass
+from closure_linter import tokenutil
+from closure_linter.javascripttokens import JavaScriptTokenType
+
+
+
+def IsGoogScopeBlock(context):
+ """Whether the given context is a goog.scope block.
+
+ This function only checks that the block is a function block inside
+ a goog.scope() call.
+
+ TODO(nnaze): Implement goog.scope checks that verify the call is
+ in the root context and contains only a single function literal.
+
+ Args:
+ context: An EcmaContext of type block.
+
+ Returns:
+ Whether the context is a goog.scope block.
+ """
+
+ if context.type != ecmametadatapass.EcmaContext.BLOCK:
+ return False
+
+ if not _IsFunctionLiteralBlock(context):
+ return False
+
+ # Check that this function is contained by a group
+ # of form "goog.scope(...)".
+ parent = context.parent
+ if parent and parent.type is ecmametadatapass.EcmaContext.GROUP:
+
+ last_code_token = parent.start_token.metadata.last_code
+
+ if (last_code_token and
+ last_code_token.type is JavaScriptTokenType.IDENTIFIER and
+ last_code_token.string == 'goog.scope'):
+ return True
+
+ return False
+
+
+def _IsFunctionLiteralBlock(block_context):
+ """Check if a context is a function literal block (without parameters).
+
+ Example function literal block: 'function() {}'
+
+ Args:
+ block_context: An EcmaContext of type block.
+
+ Returns:
+ Whether this context is a function literal block.
+ """
+
+ previous_code_tokens_iter = itertools.ifilter(
+ lambda token: token not in JavaScriptTokenType.NON_CODE_TYPES,
+ reversed(block_context.start_token))
+
+ # Ignore the current token
+ next(previous_code_tokens_iter, None)
+
+ # Grab the previous three tokens and put them in correct order.
+ previous_code_tokens = list(itertools.islice(previous_code_tokens_iter, 3))
+ previous_code_tokens.reverse()
+
+ # There aren't three previous tokens.
+ if len(previous_code_tokens) is not 3:
+ return False
+
+ # Check that the previous three code tokens are "function ()"
+ previous_code_token_types = [token.type for token in previous_code_tokens]
+ if (previous_code_token_types == [
+ JavaScriptTokenType.FUNCTION_DECLARATION,
+ JavaScriptTokenType.START_PARAMETERS,
+ JavaScriptTokenType.END_PARAMETERS]):
+ return True
+
+ return False
+
+
+def IsInClosurizedNamespace(symbol, closurized_namespaces):
+ """Match a goog.scope alias.
+
+ Args:
+ symbol: An identifier like 'goog.events.Event'.
+ closurized_namespaces: Iterable of valid Closurized namespaces (strings).
+
+ Returns:
+ True if symbol is an identifier in a Closurized namespace, otherwise False.
+ """
+ for ns in closurized_namespaces:
+ if symbol.startswith(ns + '.'):
+ return True
+
+ return False
+
+
+def _GetVarAssignmentTokens(context):
+ """Returns the tokens from context if it is a var assignment.
+
+ Args:
+ context: An EcmaContext.
+
+ Returns:
+ If a var assignment, the tokens contained within it w/o the trailing
+ semicolon.
+ """
+ if context.type != ecmametadatapass.EcmaContext.VAR:
+ return
+
+ # Get the tokens in this statement.
+ if context.start_token and context.end_token:
+ statement_tokens = tokenutil.GetTokenRange(context.start_token,
+ context.end_token)
+ else:
+ return
+
+ # And now just those tokens that are actually code.
+ is_non_code_type = lambda t: t.type not in JavaScriptTokenType.NON_CODE_TYPES
+ code_tokens = filter(is_non_code_type, statement_tokens)
+
+ # Pop off the semicolon if present.
+ if code_tokens and code_tokens[-1].IsType(JavaScriptTokenType.SEMICOLON):
+ code_tokens.pop()
+
+ if len(code_tokens) < 4:
+ return
+
+ if (code_tokens[0].IsKeyword('var') and
+ code_tokens[1].IsType(JavaScriptTokenType.SIMPLE_LVALUE) and
+ code_tokens[2].IsOperator('=')):
+ return code_tokens
+
+
+def MatchAlias(context):
+ """Match an alias statement (some identifier assigned to a variable).
+
+ Example alias: var MyClass = proj.longNamespace.MyClass.
+
+ Args:
+ context: An EcmaContext of type EcmaContext.VAR.
+
+ Returns:
+ If a valid alias, returns a tuple of alias and symbol, otherwise None.
+ """
+ code_tokens = _GetVarAssignmentTokens(context)
+ if code_tokens is None:
+ return
+
+ if all(tokenutil.IsIdentifierOrDot(t) for t in code_tokens[3:]):
+ # var Foo = bar.Foo;
+ alias, symbol = code_tokens[1], code_tokens[3]
+ # Mark both tokens as an alias definition to not count them as usages.
+ alias.metadata.is_alias_definition = True
+ symbol.metadata.is_alias_definition = True
+ return alias.string, tokenutil.GetIdentifierForToken(symbol)
+
+
+def MatchModuleAlias(context):
+ """Match an alias statement in a goog.module style import.
+
+ Example alias: var MyClass = goog.require('proj.longNamespace.MyClass').
+
+ Args:
+ context: An EcmaContext.
+
+ Returns:
+ If a valid alias, returns a tuple of alias and symbol, otherwise None.
+ """
+ code_tokens = _GetVarAssignmentTokens(context)
+ if code_tokens is None:
+ return
+
+ if(code_tokens[3].IsType(JavaScriptTokenType.IDENTIFIER) and
+ code_tokens[3].string == 'goog.require'):
+ # var Foo = goog.require('bar.Foo');
+ alias = code_tokens[1]
+ symbol = tokenutil.GetStringAfterToken(code_tokens[3])
+ if symbol:
+ alias.metadata.is_alias_definition = True
+ return alias.string, symbol
diff --git a/tools/closure_linter/build/lib/closure_linter/scopeutil_test.py b/tools/closure_linter/build/lib/closure_linter/scopeutil_test.py
new file mode 100644
index 0000000000..722a953900
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/scopeutil_test.py
@@ -0,0 +1,222 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the scopeutil module."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+
+import unittest as googletest
+
+from closure_linter import ecmametadatapass
+from closure_linter import scopeutil
+from closure_linter import testutil
+
+
+def _FindContexts(start_token):
+ """Depth first search of all contexts referenced by a token stream.
+
+ Includes contexts' parents, which might not be directly referenced
+ by any token in the stream.
+
+ Args:
+ start_token: First token in the token stream.
+
+ Yields:
+ All contexts referenced by this token stream.
+ """
+
+ seen_contexts = set()
+
+ # For each token, yield the context if we haven't seen it before.
+ for token in start_token:
+
+ token_context = token.metadata.context
+ contexts = [token_context]
+
+ # Also grab all the context's ancestors.
+ parent = token_context.parent
+ while parent:
+ contexts.append(parent)
+ parent = parent.parent
+
+ # Yield each of these contexts if we've not seen them.
+ for context in contexts:
+ if context not in seen_contexts:
+ yield context
+
+ seen_contexts.add(context)
+
+
+def _FindFirstContextOfType(token, context_type):
+ """Returns the first statement context."""
+ for context in _FindContexts(token):
+ if context.type == context_type:
+ return context
+
+
+def _ParseAssignment(script):
+ start_token = testutil.TokenizeSourceAndRunEcmaPass(script)
+ statement = _FindFirstContextOfType(
+ start_token, ecmametadatapass.EcmaContext.VAR)
+ return statement
+
+
+class StatementTest(googletest.TestCase):
+
+ def assertAlias(self, expected_match, script):
+ statement = _ParseAssignment(script)
+ match = scopeutil.MatchAlias(statement)
+ self.assertEquals(expected_match, match)
+
+ def assertModuleAlias(self, expected_match, script):
+ statement = _ParseAssignment(script)
+ match = scopeutil.MatchModuleAlias(statement)
+ self.assertEquals(expected_match, match)
+
+ def testSimpleAliases(self):
+ self.assertAlias(
+ ('foo', 'goog.foo'),
+ 'var foo = goog.foo;')
+
+ self.assertAlias(
+ ('foo', 'goog.foo'),
+ 'var foo = goog.foo') # No semicolon
+
+ def testAliasWithComment(self):
+ self.assertAlias(
+ ('Component', 'goog.ui.Component'),
+ 'var Component = /* comment */ goog.ui.Component;')
+
+ def testMultilineAlias(self):
+ self.assertAlias(
+ ('Component', 'goog.ui.Component'),
+ 'var Component = \n goog.ui.\n Component;')
+
+ def testNonSymbolAliasVarStatements(self):
+ self.assertAlias(None, 'var foo = 3;')
+ self.assertAlias(None, 'var foo = function() {};')
+ self.assertAlias(None, 'var foo = bar ? baz : qux;')
+
+ def testModuleAlias(self):
+ self.assertModuleAlias(
+ ('foo', 'goog.foo'),
+ 'var foo = goog.require("goog.foo");')
+ self.assertModuleAlias(
+ None,
+ 'var foo = goog.require(notastring);')
+
+
+class ScopeBlockTest(googletest.TestCase):
+
+ @staticmethod
+ def _GetBlocks(source):
+ start_token = testutil.TokenizeSourceAndRunEcmaPass(source)
+ for context in _FindContexts(start_token):
+ if context.type is ecmametadatapass.EcmaContext.BLOCK:
+ yield context
+
+ def assertNoBlocks(self, script):
+ blocks = list(self._GetBlocks(script))
+ self.assertEquals([], blocks)
+
+ def testNotBlocks(self):
+ # Ensure these are not considered blocks.
+ self.assertNoBlocks('goog.scope(if{});')
+ self.assertNoBlocks('goog.scope(for{});')
+ self.assertNoBlocks('goog.scope(switch{});')
+ self.assertNoBlocks('goog.scope(function foo{});')
+
+ def testNonScopeBlocks(self):
+
+ blocks = list(self._GetBlocks('goog.scope(try{});'))
+ self.assertEquals(1, len(blocks))
+ self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+
+ blocks = list(self._GetBlocks('goog.scope(function(a,b){});'))
+ self.assertEquals(1, len(blocks))
+ self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+
+ blocks = list(self._GetBlocks('goog.scope(try{} catch(){});'))
+ # Two blocks: try and catch.
+ self.assertEquals(2, len(blocks))
+ self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+ self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+
+ blocks = list(self._GetBlocks('goog.scope(try{} catch(){} finally {});'))
+ self.assertEquals(3, len(blocks))
+ self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+ self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+ self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+
+
+class AliasTest(googletest.TestCase):
+
+ def setUp(self):
+ self.start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
+
+ def testMatchAliasStatement(self):
+ matches = set()
+ for context in _FindContexts(self.start_token):
+ match = scopeutil.MatchAlias(context)
+ if match:
+ matches.add(match)
+
+ self.assertEquals(
+ set([('bar', 'baz'),
+ ('foo', 'this.foo_'),
+ ('Component', 'goog.ui.Component'),
+ ('MyClass', 'myproject.foo.MyClass'),
+ ('NonClosurizedClass', 'aaa.bbb.NonClosurizedClass')]),
+ matches)
+
+ def testMatchAliasStatement_withClosurizedNamespaces(self):
+
+ closurized_namepaces = frozenset(['goog', 'myproject'])
+
+ matches = set()
+ for context in _FindContexts(self.start_token):
+ match = scopeutil.MatchAlias(context)
+ if match:
+ unused_alias, symbol = match
+ if scopeutil.IsInClosurizedNamespace(symbol, closurized_namepaces):
+ matches.add(match)
+
+ self.assertEquals(
+ set([('MyClass', 'myproject.foo.MyClass'),
+ ('Component', 'goog.ui.Component')]),
+ matches)
+
+_TEST_SCRIPT = """
+goog.scope(function() {
+ var Component = goog.ui.Component; // scope alias
+ var MyClass = myproject.foo.MyClass; // scope alias
+
+ // Scope alias of non-Closurized namespace.
+ var NonClosurizedClass = aaa.bbb.NonClosurizedClass;
+
+ var foo = this.foo_; // non-scope object property alias
+ var bar = baz; // variable alias
+
+ var component = new Component();
+});
+
+"""
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/statetracker.py b/tools/closure_linter/build/lib/closure_linter/statetracker.py
new file mode 100644
index 0000000000..52e363972f
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/statetracker.py
@@ -0,0 +1,1294 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Light weight EcmaScript state tracker that reads tokens and tracks state."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+import re
+
+from closure_linter import javascripttokenizer
+from closure_linter import javascripttokens
+from closure_linter import tokenutil
+from closure_linter import typeannotation
+
+# Shorthand
+Type = javascripttokens.JavaScriptTokenType
+
+
+class DocFlag(object):
+ """Generic doc flag object.
+
+ Attribute:
+ flag_type: param, return, define, type, etc.
+ flag_token: The flag token.
+ type_start_token: The first token specifying the flag type,
+ including braces.
+ type_end_token: The last token specifying the flag type,
+ including braces.
+ type: The type spec string.
+ jstype: The type spec, a TypeAnnotation instance.
+ name_token: The token specifying the flag name.
+ name: The flag name
+ description_start_token: The first token in the description.
+ description_end_token: The end token in the description.
+ description: The description.
+ """
+
+ # Please keep these lists alphabetized.
+
+ # The list of standard jsdoc tags is from
+ STANDARD_DOC = frozenset([
+ 'author',
+ 'bug',
+ 'classTemplate',
+ 'consistentIdGenerator',
+ 'const',
+ 'constructor',
+ 'define',
+ 'deprecated',
+ 'dict',
+ 'enum',
+ 'export',
+ 'expose',
+ 'extends',
+ 'externs',
+ 'fileoverview',
+ 'idGenerator',
+ 'implements',
+ 'implicitCast',
+ 'interface',
+ 'lends',
+ 'license',
+ 'ngInject', # This annotation is specific to AngularJS.
+ 'noalias',
+ 'nocompile',
+ 'nosideeffects',
+ 'override',
+ 'owner',
+ 'package',
+ 'param',
+ 'preserve',
+ 'private',
+ 'protected',
+ 'public',
+ 'return',
+ 'see',
+ 'stableIdGenerator',
+ 'struct',
+ 'supported',
+ 'template',
+ 'this',
+ 'type',
+ 'typedef',
+ 'unrestricted',
+ ])
+
+ ANNOTATION = frozenset(['preserveTry', 'suppress'])
+
+ LEGAL_DOC = STANDARD_DOC | ANNOTATION
+
+ # Includes all Closure Compiler @suppress types.
+ # Not all of these annotations are interpreted by Closure Linter.
+ #
+ # Specific cases:
+ # - accessControls is supported by the compiler at the expression
+ # and method level to suppress warnings about private/protected
+ # access (method level applies to all references in the method).
+ # The linter mimics the compiler behavior.
+ SUPPRESS_TYPES = frozenset([
+ 'accessControls',
+ 'ambiguousFunctionDecl',
+ 'checkDebuggerStatement',
+ 'checkRegExp',
+ 'checkStructDictInheritance',
+ 'checkTypes',
+ 'checkVars',
+ 'const',
+ 'constantProperty',
+ 'deprecated',
+ 'duplicate',
+ 'es5Strict',
+ 'externsValidation',
+ 'extraProvide',
+ 'extraRequire',
+ 'fileoverviewTags',
+ 'globalThis',
+ 'internetExplorerChecks',
+ 'invalidCasts',
+ 'missingProperties',
+ 'missingProvide',
+ 'missingRequire',
+ 'missingReturn',
+ 'nonStandardJsDocs',
+ 'strictModuleDepCheck',
+ 'suspiciousCode',
+ 'tweakValidation',
+ 'typeInvalidation',
+ 'undefinedNames',
+ 'undefinedVars',
+ 'underscore',
+ 'unknownDefines',
+ 'unnecessaryCasts',
+ 'unusedPrivateMembers',
+ 'uselessCode',
+ 'visibility',
+ 'with',
+ ])
+
+ HAS_DESCRIPTION = frozenset([
+ 'define',
+ 'deprecated',
+ 'desc',
+ 'fileoverview',
+ 'license',
+ 'param',
+ 'preserve',
+ 'return',
+ 'supported',
+ ])
+
+ # Docflags whose argument should be parsed using the typeannotation parser.
+ HAS_TYPE = frozenset([
+ 'const',
+ 'define',
+ 'enum',
+ 'extends',
+ 'final',
+ 'implements',
+ 'mods',
+ 'package',
+ 'param',
+ 'private',
+ 'protected',
+ 'public',
+ 'return',
+ 'suppress',
+ 'type',
+ 'typedef',
+ ])
+
+ # Docflags for which it's ok to omit the type (flag without an argument).
+ CAN_OMIT_TYPE = frozenset([
+ 'const',
+ 'enum',
+ 'final',
+ 'package',
+ 'private',
+ 'protected',
+ 'public',
+ 'suppress', # We'll raise a separate INCORRECT_SUPPRESS_SYNTAX instead.
+ ])
+
+ # Docflags that only take a type as an argument and should not parse a
+ # following description.
+ TYPE_ONLY = frozenset([
+ 'const',
+ 'enum',
+ 'extends',
+ 'implements',
+ 'package',
+ 'suppress',
+ 'type',
+ ])
+
+ HAS_NAME = frozenset(['param'])
+
+ EMPTY_COMMENT_LINE = re.compile(r'^\s*\*?\s*$')
+ EMPTY_STRING = re.compile(r'^\s*$')
+
+ def __init__(self, flag_token, error_handler=None):
+ """Creates the DocFlag object and attaches it to the given start token.
+
+ Args:
+ flag_token: The starting token of the flag.
+ error_handler: An optional error handler for errors occurring while
+ parsing the doctype.
+ """
+ self.flag_token = flag_token
+ self.flag_type = flag_token.string.strip().lstrip('@')
+
+ # Extract type, if applicable.
+ self.type = None
+ self.jstype = None
+ self.type_start_token = None
+ self.type_end_token = None
+ if self.flag_type in self.HAS_TYPE:
+ brace = tokenutil.SearchUntil(flag_token, [Type.DOC_START_BRACE],
+ Type.FLAG_ENDING_TYPES)
+ if brace:
+ end_token, contents = _GetMatchingEndBraceAndContents(brace)
+ self.type = contents
+ self.jstype = typeannotation.Parse(brace, end_token,
+ error_handler)
+ self.type_start_token = brace
+ self.type_end_token = end_token
+ elif (self.flag_type in self.TYPE_ONLY and
+ flag_token.next.type not in Type.FLAG_ENDING_TYPES and
+ flag_token.line_number == flag_token.next.line_number):
+ # b/10407058. If the flag is expected to be followed by a type then
+ # search for type in same line only. If no token after flag in same
+ # line then conclude that no type is specified.
+ self.type_start_token = flag_token.next
+ self.type_end_token, self.type = _GetEndTokenAndContents(
+ self.type_start_token)
+ if self.type is not None:
+ self.type = self.type.strip()
+ self.jstype = typeannotation.Parse(flag_token, self.type_end_token,
+ error_handler)
+
+ # Extract name, if applicable.
+ self.name_token = None
+ self.name = None
+ if self.flag_type in self.HAS_NAME:
+ # Handle bad case, name could be immediately after flag token.
+ self.name_token = _GetNextPartialIdentifierToken(flag_token)
+
+ # Handle good case, if found token is after type start, look for
+ # a identifier (substring to cover cases like [cnt] b/4197272) after
+ # type end, since types contain identifiers.
+ if (self.type and self.name_token and
+ tokenutil.Compare(self.name_token, self.type_start_token) > 0):
+ self.name_token = _GetNextPartialIdentifierToken(self.type_end_token)
+
+ if self.name_token:
+ self.name = self.name_token.string
+
+ # Extract description, if applicable.
+ self.description_start_token = None
+ self.description_end_token = None
+ self.description = None
+ if self.flag_type in self.HAS_DESCRIPTION:
+ search_start_token = flag_token
+ if self.name_token and self.type_end_token:
+ if tokenutil.Compare(self.type_end_token, self.name_token) > 0:
+ search_start_token = self.type_end_token
+ else:
+ search_start_token = self.name_token
+ elif self.name_token:
+ search_start_token = self.name_token
+ elif self.type:
+ search_start_token = self.type_end_token
+
+ interesting_token = tokenutil.Search(search_start_token,
+ Type.FLAG_DESCRIPTION_TYPES | Type.FLAG_ENDING_TYPES)
+ if interesting_token.type in Type.FLAG_DESCRIPTION_TYPES:
+ self.description_start_token = interesting_token
+ self.description_end_token, self.description = (
+ _GetEndTokenAndContents(interesting_token))
+
+ def HasType(self):
+ """Returns whether this flag should have a type annotation."""
+ return self.flag_type in self.HAS_TYPE
+
+ def __repr__(self):
+ return '<Flag: %s, type:%s>' % (self.flag_type, repr(self.jstype))
+
+
+class DocComment(object):
+ """JavaScript doc comment object.
+
+ Attributes:
+ ordered_params: Ordered list of parameters documented.
+ start_token: The token that starts the doc comment.
+ end_token: The token that ends the doc comment.
+ suppressions: Map of suppression type to the token that added it.
+ """
+ def __init__(self, start_token):
+ """Create the doc comment object.
+
+ Args:
+ start_token: The first token in the doc comment.
+ """
+ self.__flags = []
+ self.start_token = start_token
+ self.end_token = None
+ self.suppressions = {}
+ self.invalidated = False
+
+ @property
+ def ordered_params(self):
+ """Gives the list of parameter names as a list of strings."""
+ params = []
+ for flag in self.__flags:
+ if flag.flag_type == 'param' and flag.name:
+ params.append(flag.name)
+ return params
+
+ def Invalidate(self):
+ """Indicate that the JSDoc is well-formed but we had problems parsing it.
+
+ This is a short-circuiting mechanism so that we don't emit false
+ positives about well-formed doc comments just because we don't support
+ hot new syntaxes.
+ """
+ self.invalidated = True
+
+ def IsInvalidated(self):
+ """Test whether Invalidate() has been called."""
+ return self.invalidated
+
+ def AddSuppression(self, token):
+ """Add a new error suppression flag.
+
+ Args:
+ token: The suppression flag token.
+ """
+ flag = token and token.attached_object
+ if flag and flag.jstype:
+ for suppression in flag.jstype.IterIdentifiers():
+ self.suppressions[suppression] = token
+
+ def SuppressionOnly(self):
+ """Returns whether this comment contains only suppression flags."""
+ if not self.__flags:
+ return False
+
+ for flag in self.__flags:
+ if flag.flag_type != 'suppress':
+ return False
+
+ return True
+
+ def AddFlag(self, flag):
+ """Add a new document flag.
+
+ Args:
+ flag: DocFlag object.
+ """
+ self.__flags.append(flag)
+
+ def InheritsDocumentation(self):
+ """Test if the jsdoc implies documentation inheritance.
+
+ Returns:
+ True if documentation may be pulled off the superclass.
+ """
+ return self.HasFlag('inheritDoc') or self.HasFlag('override')
+
+ def HasFlag(self, flag_type):
+ """Test if the given flag has been set.
+
+ Args:
+ flag_type: The type of the flag to check.
+
+ Returns:
+ True if the flag is set.
+ """
+ for flag in self.__flags:
+ if flag.flag_type == flag_type:
+ return True
+ return False
+
+ def GetFlag(self, flag_type):
+ """Gets the last flag of the given type.
+
+ Args:
+ flag_type: The type of the flag to get.
+
+ Returns:
+ The last instance of the given flag type in this doc comment.
+ """
+ for flag in reversed(self.__flags):
+ if flag.flag_type == flag_type:
+ return flag
+
+ def GetDocFlags(self):
+ """Return the doc flags for this comment."""
+ return list(self.__flags)
+
+ def _YieldDescriptionTokens(self):
+ for token in self.start_token:
+
+ if (token is self.end_token or
+ token.type is javascripttokens.JavaScriptTokenType.DOC_FLAG or
+ token.type not in javascripttokens.JavaScriptTokenType.COMMENT_TYPES):
+ return
+
+ if token.type not in [
+ javascripttokens.JavaScriptTokenType.START_DOC_COMMENT,
+ javascripttokens.JavaScriptTokenType.END_DOC_COMMENT,
+ javascripttokens.JavaScriptTokenType.DOC_PREFIX]:
+ yield token
+
+ @property
+ def description(self):
+ return tokenutil.TokensToString(
+ self._YieldDescriptionTokens())
+
+ def GetTargetIdentifier(self):
+ """Returns the identifier (as a string) that this is a comment for.
+
+ Note that this uses method uses GetIdentifierForToken to get the full
+ identifier, even if broken up by whitespace, newlines, or comments,
+ and thus could be longer than GetTargetToken().string.
+
+ Returns:
+ The identifier for the token this comment is for.
+ """
+ token = self.GetTargetToken()
+ if token:
+ return tokenutil.GetIdentifierForToken(token)
+
+ def GetTargetToken(self):
+ """Get this comment's target token.
+
+ Returns:
+ The token that is the target of this comment, or None if there isn't one.
+ """
+
+ # File overviews describe the file, not a token.
+ if self.HasFlag('fileoverview'):
+ return
+
+ skip_types = frozenset([
+ Type.WHITESPACE,
+ Type.BLANK_LINE,
+ Type.START_PAREN])
+
+ target_types = frozenset([
+ Type.FUNCTION_NAME,
+ Type.IDENTIFIER,
+ Type.SIMPLE_LVALUE])
+
+ token = self.end_token.next
+ while token:
+ if token.type in target_types:
+ return token
+
+ # Handles the case of a comment on "var foo = ...'
+ if token.IsKeyword('var'):
+ next_code_token = tokenutil.CustomSearch(
+ token,
+ lambda t: t.type not in Type.NON_CODE_TYPES)
+
+ if (next_code_token and
+ next_code_token.IsType(Type.SIMPLE_LVALUE)):
+ return next_code_token
+
+ return
+
+ # Handles the case of a comment on "function foo () {}"
+ if token.type is Type.FUNCTION_DECLARATION:
+ next_code_token = tokenutil.CustomSearch(
+ token,
+ lambda t: t.type not in Type.NON_CODE_TYPES)
+
+ if next_code_token.IsType(Type.FUNCTION_NAME):
+ return next_code_token
+
+ return
+
+ # Skip types will end the search.
+ if token.type not in skip_types:
+ return
+
+ token = token.next
+
+ def CompareParameters(self, params):
+ """Computes the edit distance and list from the function params to the docs.
+
+ Uses the Levenshtein edit distance algorithm, with code modified from
+ http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python
+
+ Args:
+ params: The parameter list for the function declaration.
+
+ Returns:
+ The edit distance, the edit list.
+ """
+ source_len, target_len = len(self.ordered_params), len(params)
+ edit_lists = [[]]
+ distance = [[]]
+ for i in range(target_len+1):
+ edit_lists[0].append(['I'] * i)
+ distance[0].append(i)
+
+ for j in range(1, source_len+1):
+ edit_lists.append([['D'] * j])
+ distance.append([j])
+
+ for i in range(source_len):
+ for j in range(target_len):
+ cost = 1
+ if self.ordered_params[i] == params[j]:
+ cost = 0
+
+ deletion = distance[i][j+1] + 1
+ insertion = distance[i+1][j] + 1
+ substitution = distance[i][j] + cost
+
+ edit_list = None
+ best = None
+ if deletion <= insertion and deletion <= substitution:
+ # Deletion is best.
+ best = deletion
+ edit_list = list(edit_lists[i][j+1])
+ edit_list.append('D')
+
+ elif insertion <= substitution:
+ # Insertion is best.
+ best = insertion
+ edit_list = list(edit_lists[i+1][j])
+ edit_list.append('I')
+ edit_lists[i+1].append(edit_list)
+
+ else:
+ # Substitution is best.
+ best = substitution
+ edit_list = list(edit_lists[i][j])
+ if cost:
+ edit_list.append('S')
+ else:
+ edit_list.append('=')
+
+ edit_lists[i+1].append(edit_list)
+ distance[i+1].append(best)
+
+ return distance[source_len][target_len], edit_lists[source_len][target_len]
+
+ def __repr__(self):
+ """Returns a string representation of this object.
+
+ Returns:
+ A string representation of this object.
+ """
+ return '<DocComment: %s, %s>' % (
+ str(self.ordered_params), str(self.__flags))
+
+
+#
+# Helper methods used by DocFlag and DocComment to parse out flag information.
+#
+
+
+def _GetMatchingEndBraceAndContents(start_brace):
+ """Returns the matching end brace and contents between the two braces.
+
+ If any FLAG_ENDING_TYPE token is encountered before a matching end brace, then
+ that token is used as the matching ending token. Contents will have all
+ comment prefixes stripped out of them, and all comment prefixes in between the
+ start and end tokens will be split out into separate DOC_PREFIX tokens.
+
+ Args:
+ start_brace: The DOC_START_BRACE token immediately before desired contents.
+
+ Returns:
+ The matching ending token (DOC_END_BRACE or FLAG_ENDING_TYPE) and a string
+ of the contents between the matching tokens, minus any comment prefixes.
+ """
+ open_count = 1
+ close_count = 0
+ contents = []
+
+ # We don't consider the start brace part of the type string.
+ token = start_brace.next
+ while open_count != close_count:
+ if token.type == Type.DOC_START_BRACE:
+ open_count += 1
+ elif token.type == Type.DOC_END_BRACE:
+ close_count += 1
+
+ if token.type != Type.DOC_PREFIX:
+ contents.append(token.string)
+
+ if token.type in Type.FLAG_ENDING_TYPES:
+ break
+ token = token.next
+
+ #Don't include the end token (end brace, end doc comment, etc.) in type.
+ token = token.previous
+ contents = contents[:-1]
+
+ return token, ''.join(contents)
+
+
+def _GetNextPartialIdentifierToken(start_token):
+ """Returns the first token having identifier as substring after a token.
+
+ Searches each token after the start to see if it contains an identifier.
+ If found, token is returned. If no identifier is found returns None.
+ Search is abandoned when a FLAG_ENDING_TYPE token is found.
+
+ Args:
+ start_token: The token to start searching after.
+
+ Returns:
+ The token found containing identifier, None otherwise.
+ """
+ token = start_token.next
+
+ while token and token.type not in Type.FLAG_ENDING_TYPES:
+ match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.search(
+ token.string)
+ if match is not None and token.type == Type.COMMENT:
+ return token
+
+ token = token.next
+
+ return None
+
+
+def _GetEndTokenAndContents(start_token):
+ """Returns last content token and all contents before FLAG_ENDING_TYPE token.
+
+ Comment prefixes are split into DOC_PREFIX tokens and stripped from the
+ returned contents.
+
+ Args:
+ start_token: The token immediately before the first content token.
+
+ Returns:
+ The last content token and a string of all contents including start and
+ end tokens, with comment prefixes stripped.
+ """
+ iterator = start_token
+ last_line = iterator.line_number
+ last_token = None
+ contents = ''
+ doc_depth = 0
+ while not iterator.type in Type.FLAG_ENDING_TYPES or doc_depth > 0:
+ if (iterator.IsFirstInLine() and
+ DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)):
+ # If we have a blank comment line, consider that an implicit
+ # ending of the description. This handles a case like:
+ #
+ # * @return {boolean} True
+ # *
+ # * Note: This is a sentence.
+ #
+ # The note is not part of the @return description, but there was
+ # no definitive ending token. Rather there was a line containing
+ # only a doc comment prefix or whitespace.
+ break
+
+ # b/2983692
+ # don't prematurely match against a @flag if inside a doc flag
+ # need to think about what is the correct behavior for unterminated
+ # inline doc flags
+ if (iterator.type == Type.DOC_START_BRACE and
+ iterator.next.type == Type.DOC_INLINE_FLAG):
+ doc_depth += 1
+ elif (iterator.type == Type.DOC_END_BRACE and
+ doc_depth > 0):
+ doc_depth -= 1
+
+ if iterator.type in Type.FLAG_DESCRIPTION_TYPES:
+ contents += iterator.string
+ last_token = iterator
+
+ iterator = iterator.next
+ if iterator.line_number != last_line:
+ contents += '\n'
+ last_line = iterator.line_number
+
+ end_token = last_token
+ if DocFlag.EMPTY_STRING.match(contents):
+ contents = None
+ else:
+ # Strip trailing newline.
+ contents = contents[:-1]
+
+ return end_token, contents
+
+
+class Function(object):
+ """Data about a JavaScript function.
+
+ Attributes:
+ block_depth: Block depth the function began at.
+ doc: The DocComment associated with the function.
+ has_return: If the function has a return value.
+ has_this: If the function references the 'this' object.
+ is_assigned: If the function is part of an assignment.
+ is_constructor: If the function is a constructor.
+ name: The name of the function, whether given in the function keyword or
+ as the lvalue the function is assigned to.
+ start_token: First token of the function (the function' keyword token).
+ end_token: Last token of the function (the closing '}' token).
+ parameters: List of parameter names.
+ """
+
+ def __init__(self, block_depth, is_assigned, doc, name):
+ self.block_depth = block_depth
+ self.is_assigned = is_assigned
+ self.is_constructor = doc and doc.HasFlag('constructor')
+ self.is_interface = doc and doc.HasFlag('interface')
+ self.has_return = False
+ self.has_throw = False
+ self.has_this = False
+ self.name = name
+ self.doc = doc
+ self.start_token = None
+ self.end_token = None
+ self.parameters = None
+
+
+class StateTracker(object):
+ """EcmaScript state tracker.
+
+ Tracks block depth, function names, etc. within an EcmaScript token stream.
+ """
+
+ OBJECT_LITERAL = 'o'
+ CODE = 'c'
+
+ def __init__(self, doc_flag=DocFlag):
+ """Initializes a JavaScript token stream state tracker.
+
+ Args:
+ doc_flag: An optional custom DocFlag used for validating
+ documentation flags.
+ """
+ self._doc_flag = doc_flag
+ self.Reset()
+
+ def Reset(self):
+ """Resets the state tracker to prepare for processing a new page."""
+ self._block_depth = 0
+ self._is_block_close = False
+ self._paren_depth = 0
+ self._function_stack = []
+ self._functions_by_name = {}
+ self._last_comment = None
+ self._doc_comment = None
+ self._cumulative_params = None
+ self._block_types = []
+ self._last_non_space_token = None
+ self._last_line = None
+ self._first_token = None
+ self._documented_identifiers = set()
+ self._variables_in_scope = []
+
+ def DocFlagPass(self, start_token, error_handler):
+ """Parses doc flags.
+
+ This pass needs to be executed before the aliaspass and we don't want to do
+ a full-blown statetracker dry run for these.
+
+ Args:
+ start_token: The token at which to start iterating
+ error_handler: An error handler for error reporting.
+ """
+ if not start_token:
+ return
+ doc_flag_types = (Type.DOC_FLAG, Type.DOC_INLINE_FLAG)
+ for token in start_token:
+ if token.type in doc_flag_types:
+ token.attached_object = self._doc_flag(token, error_handler)
+
+ def InFunction(self):
+ """Returns true if the current token is within a function.
+
+ Returns:
+ True if the current token is within a function.
+ """
+ return bool(self._function_stack)
+
+ def InConstructor(self):
+ """Returns true if the current token is within a constructor.
+
+ Returns:
+ True if the current token is within a constructor.
+ """
+ return self.InFunction() and self._function_stack[-1].is_constructor
+
+ def InInterfaceMethod(self):
+ """Returns true if the current token is within an interface method.
+
+ Returns:
+ True if the current token is within an interface method.
+ """
+ if self.InFunction():
+ if self._function_stack[-1].is_interface:
+ return True
+ else:
+ name = self._function_stack[-1].name
+ prototype_index = name.find('.prototype.')
+ if prototype_index != -1:
+ class_function_name = name[0:prototype_index]
+ if (class_function_name in self._functions_by_name and
+ self._functions_by_name[class_function_name].is_interface):
+ return True
+
+ return False
+
+ def InTopLevelFunction(self):
+ """Returns true if the current token is within a top level function.
+
+ Returns:
+ True if the current token is within a top level function.
+ """
+ return len(self._function_stack) == 1 and self.InTopLevel()
+
+ def InAssignedFunction(self):
+ """Returns true if the current token is within a function variable.
+
+ Returns:
+ True if if the current token is within a function variable
+ """
+ return self.InFunction() and self._function_stack[-1].is_assigned
+
+ def IsFunctionOpen(self):
+ """Returns true if the current token is a function block open.
+
+ Returns:
+ True if the current token is a function block open.
+ """
+ return (self._function_stack and
+ self._function_stack[-1].block_depth == self._block_depth - 1)
+
+ def IsFunctionClose(self):
+ """Returns true if the current token is a function block close.
+
+ Returns:
+ True if the current token is a function block close.
+ """
+ return (self._function_stack and
+ self._function_stack[-1].block_depth == self._block_depth)
+
+ def InBlock(self):
+ """Returns true if the current token is within a block.
+
+ Returns:
+ True if the current token is within a block.
+ """
+ return bool(self._block_depth)
+
+ def IsBlockClose(self):
+ """Returns true if the current token is a block close.
+
+ Returns:
+ True if the current token is a block close.
+ """
+ return self._is_block_close
+
+ def InObjectLiteral(self):
+ """Returns true if the current token is within an object literal.
+
+ Returns:
+ True if the current token is within an object literal.
+ """
+ return self._block_depth and self._block_types[-1] == self.OBJECT_LITERAL
+
+ def InObjectLiteralDescendant(self):
+ """Returns true if the current token has an object literal ancestor.
+
+ Returns:
+ True if the current token has an object literal ancestor.
+ """
+ return self.OBJECT_LITERAL in self._block_types
+
+ def InParentheses(self):
+ """Returns true if the current token is within parentheses.
+
+ Returns:
+ True if the current token is within parentheses.
+ """
+ return bool(self._paren_depth)
+
+ def ParenthesesDepth(self):
+ """Returns the number of parens surrounding the token.
+
+ Returns:
+ The number of parenthesis surrounding the token.
+ """
+ return self._paren_depth
+
+ def BlockDepth(self):
+ """Returns the number of blocks in which the token is nested.
+
+ Returns:
+ The number of blocks in which the token is nested.
+ """
+ return self._block_depth
+
+ def FunctionDepth(self):
+ """Returns the number of functions in which the token is nested.
+
+ Returns:
+ The number of functions in which the token is nested.
+ """
+ return len(self._function_stack)
+
+ def InTopLevel(self):
+ """Whether we are at the top level in the class.
+
+ This function call is language specific. In some languages like
+ JavaScript, a function is top level if it is not inside any parenthesis.
+ In languages such as ActionScript, a function is top level if it is directly
+ within a class.
+ """
+ raise TypeError('Abstract method InTopLevel not implemented')
+
+ def GetBlockType(self, token):
+ """Determine the block type given a START_BLOCK token.
+
+ Code blocks come after parameters, keywords like else, and closing parens.
+
+ Args:
+ token: The current token. Can be assumed to be type START_BLOCK.
+ Returns:
+ Code block type for current token.
+ """
+ raise TypeError('Abstract method GetBlockType not implemented')
+
+ def GetParams(self):
+ """Returns the accumulated input params as an array.
+
+ In some EcmasSript languages, input params are specified like
+ (param:Type, param2:Type2, ...)
+ in other they are specified just as
+ (param, param2)
+ We handle both formats for specifying parameters here and leave
+ it to the compilers for each language to detect compile errors.
+ This allows more code to be reused between lint checkers for various
+ EcmaScript languages.
+
+ Returns:
+ The accumulated input params as an array.
+ """
+ params = []
+ if self._cumulative_params:
+ params = re.compile(r'\s+').sub('', self._cumulative_params).split(',')
+ # Strip out the type from parameters of the form name:Type.
+ params = map(lambda param: param.split(':')[0], params)
+
+ return params
+
+ def GetLastComment(self):
+ """Return the last plain comment that could be used as documentation.
+
+ Returns:
+ The last plain comment that could be used as documentation.
+ """
+ return self._last_comment
+
+ def GetDocComment(self):
+ """Return the most recent applicable documentation comment.
+
+ Returns:
+ The last applicable documentation comment.
+ """
+ return self._doc_comment
+
+ def HasDocComment(self, identifier):
+ """Returns whether the identifier has been documented yet.
+
+ Args:
+ identifier: The identifier.
+
+ Returns:
+ Whether the identifier has been documented yet.
+ """
+ return identifier in self._documented_identifiers
+
+ def InDocComment(self):
+ """Returns whether the current token is in a doc comment.
+
+ Returns:
+ Whether the current token is in a doc comment.
+ """
+ return self._doc_comment and self._doc_comment.end_token is None
+
+ def GetDocFlag(self):
+ """Returns the current documentation flags.
+
+ Returns:
+ The current documentation flags.
+ """
+ return self._doc_flag
+
+ def IsTypeToken(self, t):
+ if self.InDocComment() and t.type not in (Type.START_DOC_COMMENT,
+ Type.DOC_FLAG, Type.DOC_INLINE_FLAG, Type.DOC_PREFIX):
+ f = tokenutil.SearchUntil(t, [Type.DOC_FLAG], [Type.START_DOC_COMMENT],
+ None, True)
+ if (f and f.attached_object.type_start_token is not None and
+ f.attached_object.type_end_token is not None):
+ return (tokenutil.Compare(t, f.attached_object.type_start_token) > 0 and
+ tokenutil.Compare(t, f.attached_object.type_end_token) < 0)
+ return False
+
+ def GetFunction(self):
+ """Return the function the current code block is a part of.
+
+ Returns:
+ The current Function object.
+ """
+ if self._function_stack:
+ return self._function_stack[-1]
+
+ def GetBlockDepth(self):
+ """Return the block depth.
+
+ Returns:
+ The current block depth.
+ """
+ return self._block_depth
+
+ def GetLastNonSpaceToken(self):
+ """Return the last non whitespace token."""
+ return self._last_non_space_token
+
+ def GetLastLine(self):
+ """Return the last line."""
+ return self._last_line
+
+ def GetFirstToken(self):
+ """Return the very first token in the file."""
+ return self._first_token
+
+ def IsVariableInScope(self, token_string):
+ """Checks if string is variable in current scope.
+
+ For given string it checks whether the string is a defined variable
+ (including function param) in current state.
+
+ E.g. if variables defined (variables in current scope) is docs
+ then docs, docs.length etc will be considered as variable in current
+ scope. This will help in avoding extra goog.require for variables.
+
+ Args:
+ token_string: String to check if its is a variable in current scope.
+
+ Returns:
+ true if given string is a variable in current scope.
+ """
+ for variable in self._variables_in_scope:
+ if (token_string == variable
+ or token_string.startswith(variable + '.')):
+ return True
+
+ return False
+
+ def HandleToken(self, token, last_non_space_token):
+ """Handles the given token and updates state.
+
+ Args:
+ token: The token to handle.
+ last_non_space_token:
+ """
+ self._is_block_close = False
+
+ if not self._first_token:
+ self._first_token = token
+
+ # Track block depth.
+ type = token.type
+ if type == Type.START_BLOCK:
+ self._block_depth += 1
+
+ # Subclasses need to handle block start very differently because
+ # whether a block is a CODE or OBJECT_LITERAL block varies significantly
+ # by language.
+ self._block_types.append(self.GetBlockType(token))
+
+ # When entering a function body, record its parameters.
+ if self.InFunction():
+ function = self._function_stack[-1]
+ if self._block_depth == function.block_depth + 1:
+ function.parameters = self.GetParams()
+
+ # Track block depth.
+ elif type == Type.END_BLOCK:
+ self._is_block_close = not self.InObjectLiteral()
+ self._block_depth -= 1
+ self._block_types.pop()
+
+ # Track parentheses depth.
+ elif type == Type.START_PAREN:
+ self._paren_depth += 1
+
+ # Track parentheses depth.
+ elif type == Type.END_PAREN:
+ self._paren_depth -= 1
+
+ elif type == Type.COMMENT:
+ self._last_comment = token.string
+
+ elif type == Type.START_DOC_COMMENT:
+ self._last_comment = None
+ self._doc_comment = DocComment(token)
+
+ elif type == Type.END_DOC_COMMENT:
+ self._doc_comment.end_token = token
+
+ elif type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
+ # Don't overwrite flags if they were already parsed in a previous pass.
+ if token.attached_object is None:
+ flag = self._doc_flag(token)
+ token.attached_object = flag
+ else:
+ flag = token.attached_object
+ self._doc_comment.AddFlag(flag)
+
+ if flag.flag_type == 'suppress':
+ self._doc_comment.AddSuppression(token)
+
+ elif type == Type.FUNCTION_DECLARATION:
+ last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None,
+ True)
+ doc = None
+ # Only top-level functions are eligible for documentation.
+ if self.InTopLevel():
+ doc = self._doc_comment
+
+ name = ''
+ is_assigned = last_code and (last_code.IsOperator('=') or
+ last_code.IsOperator('||') or last_code.IsOperator('&&') or
+ (last_code.IsOperator(':') and not self.InObjectLiteral()))
+ if is_assigned:
+ # TODO(robbyw): This breaks for x[2] = ...
+ # Must use loop to find full function name in the case of line-wrapped
+ # declarations (bug 1220601) like:
+ # my.function.foo.
+ # bar = function() ...
+ identifier = tokenutil.Search(last_code, Type.SIMPLE_LVALUE, None, True)
+ while identifier and tokenutil.IsIdentifierOrDot(identifier):
+ name = identifier.string + name
+ # Traverse behind us, skipping whitespace and comments.
+ while True:
+ identifier = identifier.previous
+ if not identifier or not identifier.type in Type.NON_CODE_TYPES:
+ break
+
+ else:
+ next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
+ while next_token and next_token.IsType(Type.FUNCTION_NAME):
+ name += next_token.string
+ next_token = tokenutil.Search(next_token, Type.FUNCTION_NAME, 2)
+
+ function = Function(self._block_depth, is_assigned, doc, name)
+ function.start_token = token
+
+ self._function_stack.append(function)
+ self._functions_by_name[name] = function
+
+ # Add a delimiter in stack for scope variables to define start of
+ # function. This helps in popping variables of this function when
+ # function declaration ends.
+ self._variables_in_scope.append('')
+
+ elif type == Type.START_PARAMETERS:
+ self._cumulative_params = ''
+
+ elif type == Type.PARAMETERS:
+ self._cumulative_params += token.string
+ self._variables_in_scope.extend(self.GetParams())
+
+ elif type == Type.KEYWORD and token.string == 'return':
+ next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
+ if not next_token.IsType(Type.SEMICOLON):
+ function = self.GetFunction()
+ if function:
+ function.has_return = True
+
+ elif type == Type.KEYWORD and token.string == 'throw':
+ function = self.GetFunction()
+ if function:
+ function.has_throw = True
+
+ elif type == Type.KEYWORD and token.string == 'var':
+ function = self.GetFunction()
+ next_token = tokenutil.Search(token, [Type.IDENTIFIER,
+ Type.SIMPLE_LVALUE])
+
+ if next_token:
+ if next_token.type == Type.SIMPLE_LVALUE:
+ self._variables_in_scope.append(next_token.values['identifier'])
+ else:
+ self._variables_in_scope.append(next_token.string)
+
+ elif type == Type.SIMPLE_LVALUE:
+ identifier = token.values['identifier']
+ jsdoc = self.GetDocComment()
+ if jsdoc:
+ self._documented_identifiers.add(identifier)
+
+ self._HandleIdentifier(identifier, True)
+
+ elif type == Type.IDENTIFIER:
+ self._HandleIdentifier(token.string, False)
+
+ # Detect documented non-assignments.
+ next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
+ if next_token and next_token.IsType(Type.SEMICOLON):
+ if (self._last_non_space_token and
+ self._last_non_space_token.IsType(Type.END_DOC_COMMENT)):
+ self._documented_identifiers.add(token.string)
+
+ def _HandleIdentifier(self, identifier, is_assignment):
+ """Process the given identifier.
+
+ Currently checks if it references 'this' and annotates the function
+ accordingly.
+
+ Args:
+ identifier: The identifer to process.
+ is_assignment: Whether the identifer is being written to.
+ """
+ if identifier == 'this' or identifier.startswith('this.'):
+ function = self.GetFunction()
+ if function:
+ function.has_this = True
+
+ def HandleAfterToken(self, token):
+ """Handle updating state after a token has been checked.
+
+ This function should be used for destructive state changes such as
+ deleting a tracked object.
+
+ Args:
+ token: The token to handle.
+ """
+ type = token.type
+ if type == Type.SEMICOLON or type == Type.END_PAREN or (
+ type == Type.END_BRACKET and
+ self._last_non_space_token.type not in (
+ Type.SINGLE_QUOTE_STRING_END, Type.DOUBLE_QUOTE_STRING_END)):
+ # We end on any numeric array index, but keep going for string based
+ # array indices so that we pick up manually exported identifiers.
+ self._doc_comment = None
+ self._last_comment = None
+
+ elif type == Type.END_BLOCK:
+ self._doc_comment = None
+ self._last_comment = None
+
+ if self.InFunction() and self.IsFunctionClose():
+ # TODO(robbyw): Detect the function's name for better errors.
+ function = self._function_stack.pop()
+ function.end_token = token
+
+ # Pop all variables till delimiter ('') those were defined in the
+ # function being closed so make them out of scope.
+ while self._variables_in_scope and self._variables_in_scope[-1]:
+ self._variables_in_scope.pop()
+
+ # Pop delimiter
+ if self._variables_in_scope:
+ self._variables_in_scope.pop()
+
+ elif type == Type.END_PARAMETERS and self._doc_comment:
+ self._doc_comment = None
+ self._last_comment = None
+
+ if not token.IsAnyType(Type.WHITESPACE, Type.BLANK_LINE):
+ self._last_non_space_token = token
+
+ self._last_line = token.line
diff --git a/tools/closure_linter/build/lib/closure_linter/statetracker_test.py b/tools/closure_linter/build/lib/closure_linter/statetracker_test.py
new file mode 100644
index 0000000000..494dc642fc
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/statetracker_test.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the statetracker module."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+
+
+import unittest as googletest
+
+from closure_linter import javascripttokens
+from closure_linter import statetracker
+from closure_linter import testutil
+
+
+class _FakeDocFlag(object):
+
+ def __repr__(self):
+ return '@%s %s' % (self.flag_type, self.name)
+
+
+class IdentifierTest(googletest.TestCase):
+
+ def testJustIdentifier(self):
+ a = javascripttokens.JavaScriptToken(
+ 'abc', javascripttokens.JavaScriptTokenType.IDENTIFIER, 'abc', 1)
+
+ st = statetracker.StateTracker()
+ st.HandleToken(a, None)
+
+
+class DocCommentTest(googletest.TestCase):
+
+ @staticmethod
+ def _MakeDocFlagFake(flag_type, name=None):
+ flag = _FakeDocFlag()
+ flag.flag_type = flag_type
+ flag.name = name
+ return flag
+
+ def testDocFlags(self):
+ comment = statetracker.DocComment(None)
+
+ a = self._MakeDocFlagFake('param', 'foo')
+ comment.AddFlag(a)
+
+ b = self._MakeDocFlagFake('param', '')
+ comment.AddFlag(b)
+
+ c = self._MakeDocFlagFake('param', 'bar')
+ comment.AddFlag(c)
+
+ self.assertEquals(
+ ['foo', 'bar'],
+ comment.ordered_params)
+
+ self.assertEquals(
+ [a, b, c],
+ comment.GetDocFlags())
+
+ def testInvalidate(self):
+ comment = statetracker.DocComment(None)
+
+ self.assertFalse(comment.invalidated)
+ self.assertFalse(comment.IsInvalidated())
+
+ comment.Invalidate()
+
+ self.assertTrue(comment.invalidated)
+ self.assertTrue(comment.IsInvalidated())
+
+ def testSuppressionOnly(self):
+ comment = statetracker.DocComment(None)
+
+ self.assertFalse(comment.SuppressionOnly())
+ comment.AddFlag(self._MakeDocFlagFake('suppress'))
+ self.assertTrue(comment.SuppressionOnly())
+ comment.AddFlag(self._MakeDocFlagFake('foo'))
+ self.assertFalse(comment.SuppressionOnly())
+
+ def testRepr(self):
+ comment = statetracker.DocComment(None)
+ comment.AddFlag(self._MakeDocFlagFake('param', 'foo'))
+ comment.AddFlag(self._MakeDocFlagFake('param', 'bar'))
+
+ self.assertEquals(
+ '<DocComment: [\'foo\', \'bar\'], [@param foo, @param bar]>',
+ repr(comment))
+
+ def testDocFlagParam(self):
+ comment = self._ParseComment("""
+ /**
+ * @param {string} [name] Name of customer.
+ */""")
+ flag = comment.GetFlag('param')
+ self.assertEquals('string', flag.type)
+ self.assertEquals('string', flag.jstype.ToString())
+ self.assertEquals('[name]', flag.name)
+
+ def _ParseComment(self, script):
+ """Parse a script that contains one comment and return it."""
+ _, comments = testutil.ParseFunctionsAndComments(script)
+ self.assertEquals(1, len(comments))
+ return comments[0]
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/strict_test.py b/tools/closure_linter/build/lib/closure_linter/strict_test.py
new file mode 100644
index 0000000000..2634456874
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/strict_test.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+# Copyright 2013 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for gjslint --strict.
+
+Tests errors that can be thrown by gjslint when in strict mode.
+"""
+
+
+
+import unittest
+
+import gflags as flags
+import unittest as googletest
+
+from closure_linter import errors
+from closure_linter import runner
+from closure_linter.common import erroraccumulator
+
+flags.FLAGS.strict = True
+
+
+class StrictTest(unittest.TestCase):
+ """Tests scenarios where strict generates warnings."""
+
+ def testUnclosedString(self):
+ """Tests warnings are reported when nothing is disabled.
+
+ b/11450054.
+ """
+ original = [
+ 'bug = function() {',
+ ' (\'foo\'\');',
+ '};',
+ '',
+ ]
+
+ expected = [errors.FILE_DOES_NOT_PARSE, errors.MULTI_LINE_STRING,
+ errors.FILE_IN_BLOCK]
+ self._AssertErrors(original, expected)
+
+ def _AssertErrors(self, original, expected_errors):
+ """Asserts that the error fixer corrects original to expected."""
+
+ # Trap gjslint's output parse it to get messages added.
+ error_accumulator = erroraccumulator.ErrorAccumulator()
+ runner.Run('testing.js', error_accumulator, source=original)
+ error_nums = [e.code for e in error_accumulator.GetErrors()]
+
+ error_nums.sort()
+ expected_errors.sort()
+ self.assertListEqual(error_nums, expected_errors)
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/testutil.py b/tools/closure_linter/build/lib/closure_linter/testutil.py
new file mode 100644
index 0000000000..f7084ee37b
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/testutil.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utility functions for testing gjslint components."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+import StringIO
+
+from closure_linter import ecmametadatapass
+from closure_linter import javascriptstatetracker
+from closure_linter import javascripttokenizer
+
+
+def TokenizeSource(source):
+ """Convert a source into a string of tokens.
+
+ Args:
+ source: A source file as a string or file-like object (iterates lines).
+
+ Returns:
+ The first token of the resulting token stream.
+ """
+
+ if isinstance(source, basestring):
+ source = StringIO.StringIO(source)
+
+ tokenizer = javascripttokenizer.JavaScriptTokenizer()
+ return tokenizer.TokenizeFile(source)
+
+
+def TokenizeSourceAndRunEcmaPass(source):
+ """Tokenize a source and run the EcmaMetaDataPass on it.
+
+ Args:
+ source: A source file as a string or file-like object (iterates lines).
+
+ Returns:
+ The first token of the resulting token stream.
+ """
+ start_token = TokenizeSource(source)
+ ecma_pass = ecmametadatapass.EcmaMetaDataPass()
+ ecma_pass.Process(start_token)
+ return start_token
+
+
+def ParseFunctionsAndComments(source, error_handler=None):
+ """Run the tokenizer and tracker and return comments and functions found.
+
+ Args:
+ source: A source file as a string or file-like object (iterates lines).
+ error_handler: An error handler.
+
+ Returns:
+ The functions and comments as a tuple.
+ """
+ start_token = TokenizeSourceAndRunEcmaPass(source)
+
+ tracker = javascriptstatetracker.JavaScriptStateTracker()
+ if error_handler is not None:
+ tracker.DocFlagPass(start_token, error_handler)
+
+ functions = []
+ comments = []
+ for token in start_token:
+ tracker.HandleToken(token, tracker.GetLastNonSpaceToken())
+
+ function = tracker.GetFunction()
+ if function and function not in functions:
+ functions.append(function)
+
+ comment = tracker.GetDocComment()
+ if comment and comment not in comments:
+ comments.append(comment)
+
+ tracker.HandleAfterToken(token)
+
+ return functions, comments
diff --git a/tools/closure_linter/build/lib/closure_linter/tokenutil.py b/tools/closure_linter/build/lib/closure_linter/tokenutil.py
new file mode 100644
index 0000000000..11e3ccc68b
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/tokenutil.py
@@ -0,0 +1,697 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Token utility functions."""
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)')
+
+import copy
+import StringIO
+
+from closure_linter.common import tokens
+from closure_linter.javascripttokens import JavaScriptToken
+from closure_linter.javascripttokens import JavaScriptTokenType
+
+# Shorthand
+Type = tokens.TokenType
+
+
+def GetFirstTokenInSameLine(token):
+ """Returns the first token in the same line as token.
+
+ Args:
+ token: Any token in the line.
+
+ Returns:
+ The first token in the same line as token.
+ """
+ while not token.IsFirstInLine():
+ token = token.previous
+ return token
+
+
+def GetFirstTokenInPreviousLine(token):
+ """Returns the first token in the previous line as token.
+
+ Args:
+ token: Any token in the line.
+
+ Returns:
+ The first token in the previous line as token, or None if token is on the
+ first line.
+ """
+ first_in_line = GetFirstTokenInSameLine(token)
+ if first_in_line.previous:
+ return GetFirstTokenInSameLine(first_in_line.previous)
+
+ return None
+
+
+def GetLastTokenInSameLine(token):
+ """Returns the last token in the same line as token.
+
+ Args:
+ token: Any token in the line.
+
+ Returns:
+ The last token in the same line as token.
+ """
+ while not token.IsLastInLine():
+ token = token.next
+ return token
+
+
+def GetAllTokensInSameLine(token):
+ """Returns all tokens in the same line as the given token.
+
+ Args:
+ token: Any token in the line.
+
+ Returns:
+ All tokens on the same line as the given token.
+ """
+ first_token = GetFirstTokenInSameLine(token)
+ last_token = GetLastTokenInSameLine(token)
+
+ tokens_in_line = []
+ while first_token != last_token:
+ tokens_in_line.append(first_token)
+ first_token = first_token.next
+ tokens_in_line.append(last_token)
+
+ return tokens_in_line
+
+
+def CustomSearch(start_token, func, end_func=None, distance=None,
+ reverse=False):
+ """Returns the first token where func is True within distance of this token.
+
+ Args:
+ start_token: The token to start searching from
+ func: The function to call to test a token for applicability
+ end_func: The function to call to test a token to determine whether to abort
+ the search.
+ distance: The number of tokens to look through before failing search. Must
+ be positive. If unspecified, will search until the end of the token
+ chain
+ reverse: When true, search the tokens before this one instead of the tokens
+ after it
+
+ Returns:
+ The first token matching func within distance of this token, or None if no
+ such token is found.
+ """
+ token = start_token
+ if reverse:
+ while token and (distance is None or distance > 0):
+ previous = token.previous
+ if previous:
+ if func(previous):
+ return previous
+ if end_func and end_func(previous):
+ return None
+
+ token = previous
+ if distance is not None:
+ distance -= 1
+
+ else:
+ while token and (distance is None or distance > 0):
+ next_token = token.next
+ if next_token:
+ if func(next_token):
+ return next_token
+ if end_func and end_func(next_token):
+ return None
+
+ token = next_token
+ if distance is not None:
+ distance -= 1
+
+ return None
+
+
+def Search(start_token, token_types, distance=None, reverse=False):
+ """Returns the first token of type in token_types within distance.
+
+ Args:
+ start_token: The token to start searching from
+ token_types: The allowable types of the token being searched for
+ distance: The number of tokens to look through before failing search. Must
+ be positive. If unspecified, will search until the end of the token
+ chain
+ reverse: When true, search the tokens before this one instead of the tokens
+ after it
+
+ Returns:
+ The first token of any type in token_types within distance of this token, or
+ None if no such token is found.
+ """
+ return CustomSearch(start_token, lambda token: token.IsAnyType(token_types),
+ None, distance, reverse)
+
+
+def SearchExcept(start_token, token_types, distance=None, reverse=False):
+ """Returns the first token not of any type in token_types within distance.
+
+ Args:
+ start_token: The token to start searching from
+ token_types: The unallowable types of the token being searched for
+ distance: The number of tokens to look through before failing search. Must
+ be positive. If unspecified, will search until the end of the token
+ chain
+ reverse: When true, search the tokens before this one instead of the tokens
+ after it
+
+ Returns:
+ The first token of any type in token_types within distance of this token, or
+ None if no such token is found.
+ """
+ return CustomSearch(start_token,
+ lambda token: not token.IsAnyType(token_types),
+ None, distance, reverse)
+
+
+def SearchUntil(start_token, token_types, end_types, distance=None,
+ reverse=False):
+ """Returns the first token of type in token_types before a token of end_type.
+
+ Args:
+ start_token: The token to start searching from.
+ token_types: The allowable types of the token being searched for.
+ end_types: Types of tokens to abort search if we find.
+ distance: The number of tokens to look through before failing search. Must
+ be positive. If unspecified, will search until the end of the token
+ chain
+ reverse: When true, search the tokens before this one instead of the tokens
+ after it
+
+ Returns:
+ The first token of any type in token_types within distance of this token
+ before any tokens of type in end_type, or None if no such token is found.
+ """
+ return CustomSearch(start_token, lambda token: token.IsAnyType(token_types),
+ lambda token: token.IsAnyType(end_types),
+ distance, reverse)
+
+
+def DeleteToken(token):
+ """Deletes the given token from the linked list.
+
+ Args:
+ token: The token to delete
+ """
+ # When deleting a token, we do not update the deleted token itself to make
+ # sure the previous and next pointers are still pointing to tokens which are
+ # not deleted. Also it is very hard to keep track of all previously deleted
+ # tokens to update them when their pointers become invalid. So we add this
+ # flag that any token linked list iteration logic can skip deleted node safely
+ # when its current token is deleted.
+ token.is_deleted = True
+ if token.previous:
+ token.previous.next = token.next
+
+ if token.next:
+ token.next.previous = token.previous
+
+ following_token = token.next
+ while following_token and following_token.metadata.last_code == token:
+ following_token.metadata.last_code = token.metadata.last_code
+ following_token = following_token.next
+
+
+def DeleteTokens(token, token_count):
+ """Deletes the given number of tokens starting with the given token.
+
+ Args:
+ token: The token to start deleting at.
+ token_count: The total number of tokens to delete.
+ """
+ for i in xrange(1, token_count):
+ DeleteToken(token.next)
+ DeleteToken(token)
+
+
+def InsertTokenBefore(new_token, token):
+ """Insert new_token before token.
+
+ Args:
+ new_token: A token to be added to the stream
+ token: A token already in the stream
+ """
+ new_token.next = token
+ new_token.previous = token.previous
+
+ new_token.metadata = copy.copy(token.metadata)
+
+ if new_token.IsCode():
+ old_last_code = token.metadata.last_code
+ following_token = token
+ while (following_token and
+ following_token.metadata.last_code == old_last_code):
+ following_token.metadata.last_code = new_token
+ following_token = following_token.next
+
+ token.previous = new_token
+ if new_token.previous:
+ new_token.previous.next = new_token
+
+ if new_token.start_index is None:
+ if new_token.line_number == token.line_number:
+ new_token.start_index = token.start_index
+ else:
+ previous_token = new_token.previous
+ if previous_token:
+ new_token.start_index = (previous_token.start_index +
+ len(previous_token.string))
+ else:
+ new_token.start_index = 0
+
+ iterator = new_token.next
+ while iterator and iterator.line_number == new_token.line_number:
+ iterator.start_index += len(new_token.string)
+ iterator = iterator.next
+
+
+def InsertTokenAfter(new_token, token):
+ """Insert new_token after token.
+
+ Args:
+ new_token: A token to be added to the stream
+ token: A token already in the stream
+ """
+ new_token.previous = token
+ new_token.next = token.next
+
+ new_token.metadata = copy.copy(token.metadata)
+
+ if token.IsCode():
+ new_token.metadata.last_code = token
+
+ if new_token.IsCode():
+ following_token = token.next
+ while following_token and following_token.metadata.last_code == token:
+ following_token.metadata.last_code = new_token
+ following_token = following_token.next
+
+ token.next = new_token
+ if new_token.next:
+ new_token.next.previous = new_token
+
+ if new_token.start_index is None:
+ if new_token.line_number == token.line_number:
+ new_token.start_index = token.start_index + len(token.string)
+ else:
+ new_token.start_index = 0
+
+ iterator = new_token.next
+ while iterator and iterator.line_number == new_token.line_number:
+ iterator.start_index += len(new_token.string)
+ iterator = iterator.next
+
+
+def InsertTokensAfter(new_tokens, token):
+ """Insert multiple tokens after token.
+
+ Args:
+ new_tokens: An array of tokens to be added to the stream
+ token: A token already in the stream
+ """
+ # TODO(user): It would be nicer to have InsertTokenAfter defer to here
+ # instead of vice-versa.
+ current_token = token
+ for new_token in new_tokens:
+ InsertTokenAfter(new_token, current_token)
+ current_token = new_token
+
+
+def InsertSpaceTokenAfter(token):
+ """Inserts a space token after the given token.
+
+ Args:
+ token: The token to insert a space token after
+
+ Returns:
+ A single space token
+ """
+ space_token = JavaScriptToken(' ', Type.WHITESPACE, token.line,
+ token.line_number)
+ InsertTokenAfter(space_token, token)
+
+
+def InsertBlankLineAfter(token):
+ """Inserts a blank line after the given token.
+
+ Args:
+ token: The token to insert a blank line after
+
+ Returns:
+ A single space token
+ """
+ blank_token = JavaScriptToken('', Type.BLANK_LINE, '',
+ token.line_number + 1)
+ InsertLineAfter(token, [blank_token])
+
+
+def InsertLineAfter(token, new_tokens):
+ """Inserts a new line consisting of new_tokens after the given token.
+
+ Args:
+ token: The token to insert after.
+ new_tokens: The tokens that will make up the new line.
+ """
+ insert_location = token
+ for new_token in new_tokens:
+ InsertTokenAfter(new_token, insert_location)
+ insert_location = new_token
+
+ # Update all subsequent line numbers.
+ next_token = new_tokens[-1].next
+ while next_token:
+ next_token.line_number += 1
+ next_token = next_token.next
+
+
+def SplitToken(token, position):
+ """Splits the token into two tokens at position.
+
+ Args:
+ token: The token to split
+ position: The position to split at. Will be the beginning of second token.
+
+ Returns:
+ The new second token.
+ """
+ new_string = token.string[position:]
+ token.string = token.string[:position]
+
+ new_token = JavaScriptToken(new_string, token.type, token.line,
+ token.line_number)
+ InsertTokenAfter(new_token, token)
+
+ return new_token
+
+
+def Compare(token1, token2):
+ """Compares two tokens and determines their relative order.
+
+ Args:
+ token1: The first token to compare.
+ token2: The second token to compare.
+
+ Returns:
+ A negative integer, zero, or a positive integer as the first token is
+ before, equal, or after the second in the token stream.
+ """
+ if token2.line_number != token1.line_number:
+ return token1.line_number - token2.line_number
+ else:
+ return token1.start_index - token2.start_index
+
+
+def GoogScopeOrNoneFromStartBlock(token):
+ """Determines if the given START_BLOCK is part of a goog.scope statement.
+
+ Args:
+ token: A token of type START_BLOCK.
+
+ Returns:
+ The goog.scope function call token, or None if such call doesn't exist.
+ """
+ if token.type != JavaScriptTokenType.START_BLOCK:
+ return None
+
+ # Search for a goog.scope statement, which will be 5 tokens before the
+ # block. Illustration of the tokens found prior to the start block:
+ # goog.scope(function() {
+ # 5 4 3 21 ^
+
+ maybe_goog_scope = token
+ for unused_i in xrange(5):
+ maybe_goog_scope = (maybe_goog_scope.previous if maybe_goog_scope and
+ maybe_goog_scope.previous else None)
+ if maybe_goog_scope and maybe_goog_scope.string == 'goog.scope':
+ return maybe_goog_scope
+
+
+def GetTokenRange(start_token, end_token):
+ """Returns a list of tokens between the two given, inclusive.
+
+ Args:
+ start_token: Start token in the range.
+ end_token: End token in the range.
+
+ Returns:
+ A list of tokens, in order, from start_token to end_token (including start
+ and end). Returns none if the tokens do not describe a valid range.
+ """
+
+ token_range = []
+ token = start_token
+
+ while token:
+ token_range.append(token)
+
+ if token == end_token:
+ return token_range
+
+ token = token.next
+
+
+def TokensToString(token_iterable):
+ """Convert a number of tokens into a string.
+
+ Newlines will be inserted whenever the line_number of two neighboring
+ strings differ.
+
+ Args:
+ token_iterable: The tokens to turn to a string.
+
+ Returns:
+ A string representation of the given tokens.
+ """
+
+ buf = StringIO.StringIO()
+ token_list = list(token_iterable)
+ if not token_list:
+ return ''
+
+ line_number = token_list[0].line_number
+
+ for token in token_list:
+
+ while line_number < token.line_number:
+ line_number += 1
+ buf.write('\n')
+
+ if line_number > token.line_number:
+ line_number = token.line_number
+ buf.write('\n')
+
+ buf.write(token.string)
+
+ return buf.getvalue()
+
+
+def GetPreviousCodeToken(token):
+ """Returns the code token before the specified token.
+
+ Args:
+ token: A token.
+
+ Returns:
+ The code token before the specified token or None if no such token
+ exists.
+ """
+
+ return CustomSearch(
+ token,
+ lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES,
+ reverse=True)
+
+
+def GetNextCodeToken(token):
+ """Returns the next code token after the specified token.
+
+ Args:
+ token: A token.
+
+ Returns:
+ The next code token after the specified token or None if no such token
+ exists.
+ """
+
+ return CustomSearch(
+ token,
+ lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES,
+ reverse=False)
+
+
+def GetIdentifierStart(token):
+ """Returns the first token in an identifier.
+
+ Given a token which is part of an identifier, returns the token at the start
+ of the identifier.
+
+ Args:
+ token: A token which is part of an identifier.
+
+ Returns:
+ The token at the start of the identifier or None if the identifier was not
+ of the form 'a.b.c' (e.g. "['a']['b'].c").
+ """
+
+ start_token = token
+ previous_code_token = GetPreviousCodeToken(token)
+
+ while (previous_code_token and (
+ previous_code_token.IsType(JavaScriptTokenType.IDENTIFIER) or
+ IsDot(previous_code_token))):
+ start_token = previous_code_token
+ previous_code_token = GetPreviousCodeToken(previous_code_token)
+
+ if IsDot(start_token):
+ return None
+
+ return start_token
+
+
+def GetIdentifierForToken(token):
+ """Get the symbol specified by a token.
+
+ Given a token, this function additionally concatenates any parts of an
+ identifying symbol being identified that are split by whitespace or a
+ newline.
+
+ The function will return None if the token is not the first token of an
+ identifier.
+
+ Args:
+ token: The first token of a symbol.
+
+ Returns:
+ The whole symbol, as a string.
+ """
+
+ # Search backward to determine if this token is the first token of the
+ # identifier. If it is not the first token, return None to signal that this
+ # token should be ignored.
+ prev_token = token.previous
+ while prev_token:
+ if (prev_token.IsType(JavaScriptTokenType.IDENTIFIER) or
+ IsDot(prev_token)):
+ return None
+
+ if (prev_token.IsType(tokens.TokenType.WHITESPACE) or
+ prev_token.IsAnyType(JavaScriptTokenType.COMMENT_TYPES)):
+ prev_token = prev_token.previous
+ else:
+ break
+
+ # A "function foo()" declaration.
+ if token.type is JavaScriptTokenType.FUNCTION_NAME:
+ return token.string
+
+ # A "var foo" declaration (if the previous token is 'var')
+ previous_code_token = GetPreviousCodeToken(token)
+
+ if previous_code_token and previous_code_token.IsKeyword('var'):
+ return token.string
+
+ # Otherwise, this is potentially a namespaced (goog.foo.bar) identifier that
+ # could span multiple lines or be broken up by whitespace. We need
+ # to concatenate.
+ identifier_types = set([
+ JavaScriptTokenType.IDENTIFIER,
+ JavaScriptTokenType.SIMPLE_LVALUE
+ ])
+
+ assert token.type in identifier_types
+
+ # Start with the first token
+ symbol_tokens = [token]
+
+ if token.next:
+ for t in token.next:
+ last_symbol_token = symbol_tokens[-1]
+
+ # A dot is part of the previous symbol.
+ if IsDot(t):
+ symbol_tokens.append(t)
+ continue
+
+ # An identifier is part of the previous symbol if the previous one was a
+ # dot.
+ if t.type in identifier_types:
+ if IsDot(last_symbol_token):
+ symbol_tokens.append(t)
+ continue
+ else:
+ break
+
+ # Skip any whitespace
+ if t.type in JavaScriptTokenType.NON_CODE_TYPES:
+ continue
+
+ # This is the end of the identifier. Stop iterating.
+ break
+
+ if symbol_tokens:
+ return ''.join([t.string for t in symbol_tokens])
+
+
+def GetStringAfterToken(token):
+ """Get string after token.
+
+ Args:
+ token: Search will be done after this token.
+
+ Returns:
+ String if found after token else None (empty string will also
+ return None).
+
+ Search until end of string as in case of empty string Type.STRING_TEXT is not
+ present/found and don't want to return next string.
+ E.g.
+ a = '';
+ b = 'test';
+ When searching for string after 'a' if search is not limited by end of string
+ then it will return 'test' which is not desirable as there is a empty string
+ before that.
+
+ This will return None for cases where string is empty or no string found
+ as in both cases there is no Type.STRING_TEXT.
+ """
+ string_token = SearchUntil(token, JavaScriptTokenType.STRING_TEXT,
+ [JavaScriptTokenType.SINGLE_QUOTE_STRING_END,
+ JavaScriptTokenType.DOUBLE_QUOTE_STRING_END])
+ if string_token:
+ return string_token.string
+ else:
+ return None
+
+
+def IsDot(token):
+ """Whether the token represents a "dot" operator (foo.bar)."""
+ return token.type is JavaScriptTokenType.OPERATOR and token.string == '.'
+
+
+def IsIdentifierOrDot(token):
+ """Whether the token is either an identifier or a '.'."""
+ return (token.type in [JavaScriptTokenType.IDENTIFIER,
+ JavaScriptTokenType.SIMPLE_LVALUE] or
+ IsDot(token))
diff --git a/tools/closure_linter/build/lib/closure_linter/tokenutil_test.py b/tools/closure_linter/build/lib/closure_linter/tokenutil_test.py
new file mode 100644
index 0000000000..c7d3854776
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/tokenutil_test.py
@@ -0,0 +1,297 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the scopeutil module."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+import unittest as googletest
+
+from closure_linter import ecmametadatapass
+from closure_linter import javascripttokens
+from closure_linter import testutil
+from closure_linter import tokenutil
+
+
+class FakeToken(object):
+ pass
+
+
+class TokenUtilTest(googletest.TestCase):
+
+ def testGetTokenRange(self):
+
+ a = FakeToken()
+ b = FakeToken()
+ c = FakeToken()
+ d = FakeToken()
+ e = FakeToken()
+
+ a.next = b
+ b.next = c
+ c.next = d
+
+ self.assertEquals([a, b, c, d], tokenutil.GetTokenRange(a, d))
+
+ # This is an error as e does not come after a in the token chain.
+ self.assertRaises(Exception, lambda: tokenutil.GetTokenRange(a, e))
+
+ def testTokensToString(self):
+
+ a = FakeToken()
+ b = FakeToken()
+ c = FakeToken()
+ d = FakeToken()
+ e = FakeToken()
+
+ a.string = 'aaa'
+ b.string = 'bbb'
+ c.string = 'ccc'
+ d.string = 'ddd'
+ e.string = 'eee'
+
+ a.line_number = 5
+ b.line_number = 6
+ c.line_number = 6
+ d.line_number = 10
+ e.line_number = 11
+
+ self.assertEquals(
+ 'aaa\nbbbccc\n\n\n\nddd\neee',
+ tokenutil.TokensToString([a, b, c, d, e]))
+
+ self.assertEquals(
+ 'ddd\neee\naaa\nbbbccc',
+ tokenutil.TokensToString([d, e, a, b, c]),
+ 'Neighboring tokens not in line_number order should have a newline '
+ 'between them.')
+
+ def testGetPreviousCodeToken(self):
+
+ tokens = testutil.TokenizeSource("""
+start1. // comment
+ /* another comment */
+ end1
+""")
+
+ def _GetTokenStartingWith(token_starts_with):
+ for t in tokens:
+ if t.string.startswith(token_starts_with):
+ return t
+
+ self.assertEquals(
+ None,
+ tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('start1')))
+
+ self.assertEquals(
+ '.',
+ tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('end1')).string)
+
+ self.assertEquals(
+ 'start1',
+ tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('.')).string)
+
+ def testGetNextCodeToken(self):
+
+ tokens = testutil.TokenizeSource("""
+start1. // comment
+ /* another comment */
+ end1
+""")
+
+ def _GetTokenStartingWith(token_starts_with):
+ for t in tokens:
+ if t.string.startswith(token_starts_with):
+ return t
+
+ self.assertEquals(
+ '.',
+ tokenutil.GetNextCodeToken(_GetTokenStartingWith('start1')).string)
+
+ self.assertEquals(
+ 'end1',
+ tokenutil.GetNextCodeToken(_GetTokenStartingWith('.')).string)
+
+ self.assertEquals(
+ None,
+ tokenutil.GetNextCodeToken(_GetTokenStartingWith('end1')))
+
+ def testGetIdentifierStart(self):
+
+ tokens = testutil.TokenizeSource("""
+start1 . // comment
+ prototype. /* another comment */
+ end1
+
+['edge'][case].prototype.
+ end2 = function() {}
+""")
+
+ def _GetTokenStartingWith(token_starts_with):
+ for t in tokens:
+ if t.string.startswith(token_starts_with):
+ return t
+
+ self.assertEquals(
+ 'start1',
+ tokenutil.GetIdentifierStart(_GetTokenStartingWith('end1')).string)
+
+ self.assertEquals(
+ 'start1',
+ tokenutil.GetIdentifierStart(_GetTokenStartingWith('start1')).string)
+
+ self.assertEquals(
+ None,
+ tokenutil.GetIdentifierStart(_GetTokenStartingWith('end2')))
+
+ def testInsertTokenBefore(self):
+
+ self.AssertInsertTokenAfterBefore(False)
+
+ def testInsertTokenAfter(self):
+
+ self.AssertInsertTokenAfterBefore(True)
+
+ def AssertInsertTokenAfterBefore(self, after):
+
+ new_token = javascripttokens.JavaScriptToken(
+ 'a', javascripttokens.JavaScriptTokenType.IDENTIFIER, 1, 1)
+
+ existing_token1 = javascripttokens.JavaScriptToken(
+ 'var', javascripttokens.JavaScriptTokenType.KEYWORD, 1, 1)
+ existing_token1.start_index = 0
+ existing_token1.metadata = ecmametadatapass.EcmaMetaData()
+
+ existing_token2 = javascripttokens.JavaScriptToken(
+ ' ', javascripttokens.JavaScriptTokenType.WHITESPACE, 1, 1)
+ existing_token2.start_index = 3
+ existing_token2.metadata = ecmametadatapass.EcmaMetaData()
+ existing_token2.metadata.last_code = existing_token1
+
+ existing_token1.next = existing_token2
+ existing_token2.previous = existing_token1
+
+ if after:
+ tokenutil.InsertTokenAfter(new_token, existing_token1)
+ else:
+ tokenutil.InsertTokenBefore(new_token, existing_token2)
+
+ self.assertEquals(existing_token1, new_token.previous)
+ self.assertEquals(existing_token2, new_token.next)
+
+ self.assertEquals(new_token, existing_token1.next)
+ self.assertEquals(new_token, existing_token2.previous)
+
+ self.assertEquals(existing_token1, new_token.metadata.last_code)
+ self.assertEquals(new_token, existing_token2.metadata.last_code)
+
+ self.assertEquals(0, existing_token1.start_index)
+ self.assertEquals(3, new_token.start_index)
+ self.assertEquals(4, existing_token2.start_index)
+
+ def testGetIdentifierForToken(self):
+
+ tokens = testutil.TokenizeSource("""
+start1.abc.def.prototype.
+ onContinuedLine
+
+(start2.abc.def
+ .hij.klm
+ .nop)
+
+start3.abc.def
+ .hij = function() {};
+
+// An absurd multi-liner.
+start4.abc.def.
+ hij.
+ klm = function() {};
+
+start5 . aaa . bbb . ccc
+ shouldntBePartOfThePreviousSymbol
+
+start6.abc.def ghi.shouldntBePartOfThePreviousSymbol
+
+var start7 = 42;
+
+function start8() {
+
+}
+
+start9.abc. // why is there a comment here?
+ def /* another comment */
+ shouldntBePart
+
+start10.abc // why is there a comment here?
+ .def /* another comment */
+ shouldntBePart
+
+start11.abc. middle1.shouldNotBeIdentifier
+""")
+
+ def _GetTokenStartingWith(token_starts_with):
+ for t in tokens:
+ if t.string.startswith(token_starts_with):
+ return t
+
+ self.assertEquals(
+ 'start1.abc.def.prototype.onContinuedLine',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start1')))
+
+ self.assertEquals(
+ 'start2.abc.def.hij.klm.nop',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start2')))
+
+ self.assertEquals(
+ 'start3.abc.def.hij',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start3')))
+
+ self.assertEquals(
+ 'start4.abc.def.hij.klm',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start4')))
+
+ self.assertEquals(
+ 'start5.aaa.bbb.ccc',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start5')))
+
+ self.assertEquals(
+ 'start6.abc.def',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start6')))
+
+ self.assertEquals(
+ 'start7',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start7')))
+
+ self.assertEquals(
+ 'start8',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start8')))
+
+ self.assertEquals(
+ 'start9.abc.def',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start9')))
+
+ self.assertEquals(
+ 'start10.abc.def',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start10')))
+
+ self.assertIsNone(
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('middle1')))
+
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/build/lib/closure_linter/typeannotation.py b/tools/closure_linter/build/lib/closure_linter/typeannotation.py
new file mode 100644
index 0000000000..00604c13a7
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/typeannotation.py
@@ -0,0 +1,401 @@
+#!/usr/bin/env python
+#*-* coding: utf-8
+"""Closure typeannotation parsing and utilities."""
+
+
+
+from closure_linter import errors
+from closure_linter import javascripttokens
+from closure_linter.common import error
+
+# Shorthand
+TYPE = javascripttokens.JavaScriptTokenType
+
+
+class TypeAnnotation(object):
+ """Represents a structured view of a closure type annotation.
+
+ Attribute:
+ identifier: The name of the type.
+ key_type: The name part before a colon.
+ sub_types: The list of sub_types used e.g. for Array.<…>
+ or_null: The '?' annotation
+ not_null: The '!' annotation
+ type_group: If this a a grouping (a|b), but does not include function(a).
+ return_type: The return type of a function definition.
+ alias: The actual type set by closurizednamespaceinfo if the identifier uses
+ an alias to shorten the name.
+ tokens: An ordered list of tokens used for this type. May contain
+ TypeAnnotation instances for sub_types, key_type or return_type.
+ """
+
+ IMPLICIT_TYPE_GROUP = 2
+
+ NULLABILITY_UNKNOWN = 2
+
+ # Frequently used known non-nullable types.
+ NON_NULLABLE = frozenset([
+ 'boolean', 'function', 'number', 'string', 'undefined'])
+ # Frequently used known nullable types.
+ NULLABLE_TYPE_WHITELIST = frozenset([
+ 'Array', 'Document', 'Element', 'Function', 'Node', 'NodeList',
+ 'Object'])
+
+ def __init__(self):
+ self.identifier = ''
+ self.sub_types = []
+ self.or_null = False
+ self.not_null = False
+ self.type_group = False
+ self.alias = None
+ self.key_type = None
+ self.record_type = False
+ self.opt_arg = False
+ self.return_type = None
+ self.tokens = []
+
+ def IsFunction(self):
+ """Determines whether this is a function definition."""
+ return self.identifier == 'function'
+
+ def IsConstructor(self):
+ """Determines whether this is a function definition for a constructor."""
+ key_type = self.sub_types and self.sub_types[0].key_type
+ return self.IsFunction() and key_type.identifier == 'new'
+
+ def IsRecordType(self):
+ """Returns True if this type is a record type."""
+ return (self.record_type or
+ bool([t for t in self.sub_types if t.IsRecordType()]))
+
+ def IsVarArgsType(self):
+ """Determines if the type is a var_args type, i.e. starts with '...'."""
+ return self.identifier.startswith('...') or (
+ self.type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP and
+ self.sub_types[0].identifier.startswith('...'))
+
+ def IsEmpty(self):
+ """Returns True if the type is empty."""
+ return not self.tokens
+
+ def IsUnknownType(self):
+ """Returns True if this is the unknown type {?}."""
+ return (self.or_null
+ and not self.identifier
+ and not self.sub_types
+ and not self.return_type)
+
+ def Append(self, item):
+ """Adds a sub_type to this type and finalizes it.
+
+ Args:
+ item: The TypeAnnotation item to append.
+ """
+ # item is a TypeAnnotation instance, so pylint: disable=protected-access
+ self.sub_types.append(item._Finalize(self))
+
+ def __repr__(self):
+ """Reconstructs the type definition."""
+ append = ''
+ if self.sub_types:
+ separator = (',' if not self.type_group else '|')
+ if self.identifier == 'function':
+ surround = '(%s)'
+ else:
+ surround = {False: '{%s}' if self.record_type else '<%s>',
+ True: '(%s)',
+ self.IMPLICIT_TYPE_GROUP: '%s'}[self.type_group]
+ append = surround % separator.join([repr(t) for t in self.sub_types])
+ if self.return_type:
+ append += ':%s' % repr(self.return_type)
+ append += '=' if self.opt_arg else ''
+ prefix = '' + ('?' if self.or_null else '') + ('!' if self.not_null else '')
+ keyword = '%s:' % repr(self.key_type) if self.key_type else ''
+ return keyword + prefix + '%s' % (self.alias or self.identifier) + append
+
+ def ToString(self):
+ """Concats the type's tokens to form a string again."""
+ ret = []
+ for token in self.tokens:
+ if not isinstance(token, TypeAnnotation):
+ ret.append(token.string)
+ else:
+ ret.append(token.ToString())
+ return ''.join(ret)
+
+ def Dump(self, indent=''):
+ """Dumps this type's structure for debugging purposes."""
+ result = []
+ for t in self.tokens:
+ if isinstance(t, TypeAnnotation):
+ result.append(indent + str(t) + ' =>\n' + t.Dump(indent + ' '))
+ else:
+ result.append(indent + str(t))
+ return '\n'.join(result)
+
+ def IterIdentifiers(self):
+ """Iterates over all identifiers in this type and its subtypes."""
+ if self.identifier:
+ yield self.identifier
+ for subtype in self.IterTypes():
+ for identifier in subtype.IterIdentifiers():
+ yield identifier
+
+ def IterTypeGroup(self):
+ """Iterates over all types in the type group including self.
+
+ Yields:
+ If this is a implicit or manual type-group: all sub_types.
+ Otherwise: self
+ E.g. for @type {Foo.<Bar>} this will yield only Foo.<Bar>,
+ for @type {Foo|(Bar|Sample)} this will yield Foo, Bar and Sample.
+
+ """
+ if self.type_group:
+ for sub_type in self.sub_types:
+ for sub_type in sub_type.IterTypeGroup():
+ yield sub_type
+ else:
+ yield self
+
+ def IterTypes(self):
+ """Iterates over each subtype as well as return and key types."""
+ if self.return_type:
+ yield self.return_type
+
+ if self.key_type:
+ yield self.key_type
+
+ for sub_type in self.sub_types:
+ yield sub_type
+
+ def GetNullability(self, modifiers=True):
+ """Computes whether the type may be null.
+
+ Args:
+ modifiers: Whether the modifiers ? and ! should be considered in the
+ evaluation.
+ Returns:
+ True if the type allows null, False if the type is strictly non nullable
+ and NULLABILITY_UNKNOWN if the nullability cannot be determined.
+ """
+
+ # Explicitly marked nullable types or 'null' are nullable.
+ if (modifiers and self.or_null) or self.identifier == 'null':
+ return True
+
+ # Explicitly marked non-nullable types or non-nullable base types:
+ if ((modifiers and self.not_null) or self.record_type
+ or self.identifier in self.NON_NULLABLE):
+ return False
+
+ # A type group is nullable if any of its elements are nullable.
+ if self.type_group:
+ maybe_nullable = False
+ for sub_type in self.sub_types:
+ nullability = sub_type.GetNullability()
+ if nullability == self.NULLABILITY_UNKNOWN:
+ maybe_nullable = nullability
+ elif nullability:
+ return True
+ return maybe_nullable
+
+ # Whitelisted types are nullable.
+ if self.identifier.rstrip('.') in self.NULLABLE_TYPE_WHITELIST:
+ return True
+
+ # All other types are unknown (most should be nullable, but
+ # enums are not and typedefs might not be).
+ return self.NULLABILITY_UNKNOWN
+
+ def WillAlwaysBeNullable(self):
+ """Computes whether the ! flag is illegal for this type.
+
+ This is the case if this type or any of the subtypes is marked as
+ explicitly nullable.
+
+ Returns:
+ True if the ! flag would be illegal.
+ """
+ if self.or_null or self.identifier == 'null':
+ return True
+
+ if self.type_group:
+ return bool([t for t in self.sub_types if t.WillAlwaysBeNullable()])
+
+ return False
+
+ def _Finalize(self, parent):
+ """Fixes some parsing issues once the TypeAnnotation is complete."""
+
+ # Normalize functions whose definition ended up in the key type because
+ # they defined a return type after a colon.
+ if self.key_type and self.key_type.identifier == 'function':
+ current = self.key_type
+ current.return_type = self
+ self.key_type = None
+ # opt_arg never refers to the return type but to the function itself.
+ current.opt_arg = self.opt_arg
+ self.opt_arg = False
+ return current
+
+ # If a typedef just specified the key, it will not end up in the key type.
+ if parent.record_type and not self.key_type:
+ current = TypeAnnotation()
+ current.key_type = self
+ current.tokens.append(self)
+ return current
+ return self
+
+ def FirstToken(self):
+ """Returns the first token used in this type or any of its subtypes."""
+ first = self.tokens[0]
+ return first.FirstToken() if isinstance(first, TypeAnnotation) else first
+
+
+def Parse(token, token_end, error_handler):
+ """Parses a type annotation and returns a TypeAnnotation object."""
+ return TypeAnnotationParser(error_handler).Parse(token.next, token_end)
+
+
+class TypeAnnotationParser(object):
+ """A parser for type annotations constructing the TypeAnnotation object."""
+
+ def __init__(self, error_handler):
+ self._stack = []
+ self._error_handler = error_handler
+ self._closing_error = False
+
+ def Parse(self, token, token_end):
+ """Parses a type annotation and returns a TypeAnnotation object."""
+ root = TypeAnnotation()
+ self._stack.append(root)
+ current = TypeAnnotation()
+ root.tokens.append(current)
+
+ while token and token != token_end:
+ if token.type in (TYPE.DOC_TYPE_START_BLOCK, TYPE.DOC_START_BRACE):
+ if token.string == '(':
+ if (current.identifier and
+ current.identifier not in ['function', '...']):
+ self.Error(token,
+ 'Invalid identifier for (): "%s"' % current.identifier)
+ current.type_group = current.identifier != 'function'
+ elif token.string == '{':
+ current.record_type = True
+ current.tokens.append(token)
+ self._stack.append(current)
+ current = TypeAnnotation()
+ self._stack[-1].tokens.append(current)
+
+ elif token.type in (TYPE.DOC_TYPE_END_BLOCK, TYPE.DOC_END_BRACE):
+ prev = self._stack.pop()
+ prev.Append(current)
+ current = prev
+
+ # If an implicit type group was created, close it as well.
+ if prev.type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP:
+ prev = self._stack.pop()
+ prev.Append(current)
+ current = prev
+ current.tokens.append(token)
+
+ elif token.type == TYPE.DOC_TYPE_MODIFIER:
+ if token.string == '!':
+ current.tokens.append(token)
+ current.not_null = True
+ elif token.string == '?':
+ current.tokens.append(token)
+ current.or_null = True
+ elif token.string == ':':
+ current.tokens.append(token)
+ prev = current
+ current = TypeAnnotation()
+ prev.tokens.append(current)
+ current.key_type = prev
+ elif token.string == '=':
+ # For implicit type groups the '=' refers to the parent.
+ try:
+ if self._stack[-1].type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP:
+ self._stack[-1].tokens.append(token)
+ self._stack[-1].opt_arg = True
+ else:
+ current.tokens.append(token)
+ current.opt_arg = True
+ except IndexError:
+ self.ClosingError(token)
+ elif token.string == '|':
+ # If a type group has explicitly been opened do a normal append.
+ # Otherwise we have to open the type group and move the current
+ # type into it, before appending
+ if not self._stack[-1].type_group:
+ type_group = TypeAnnotation()
+ if current.key_type and current.key_type.identifier != 'function':
+ type_group.key_type = current.key_type
+ current.key_type = None
+ type_group.type_group = TypeAnnotation.IMPLICIT_TYPE_GROUP
+ # Fix the token order
+ prev = self._stack[-1].tokens.pop()
+ self._stack[-1].tokens.append(type_group)
+ type_group.tokens.append(prev)
+ self._stack.append(type_group)
+ self._stack[-1].tokens.append(token)
+ self.Append(current, error_token=token)
+ current = TypeAnnotation()
+ self._stack[-1].tokens.append(current)
+ elif token.string == ',':
+ self.Append(current, error_token=token)
+ current = TypeAnnotation()
+ self._stack[-1].tokens.append(token)
+ self._stack[-1].tokens.append(current)
+ else:
+ current.tokens.append(token)
+ self.Error(token, 'Invalid token')
+
+ elif token.type == TYPE.COMMENT:
+ current.tokens.append(token)
+ current.identifier += token.string.strip()
+
+ elif token.type in [TYPE.DOC_PREFIX, TYPE.WHITESPACE]:
+ current.tokens.append(token)
+
+ else:
+ current.tokens.append(token)
+ self.Error(token, 'Unexpected token')
+
+ token = token.next
+
+ self.Append(current, error_token=token)
+ try:
+ ret = self._stack.pop()
+ except IndexError:
+ self.ClosingError(token)
+ # The type is screwed up, but let's return something.
+ return current
+
+ if self._stack and (len(self._stack) != 1 or
+ ret.type_group != TypeAnnotation.IMPLICIT_TYPE_GROUP):
+ self.Error(token, 'Too many opening items.')
+
+ return ret if len(ret.sub_types) > 1 else ret.sub_types[0]
+
+ def Append(self, type_obj, error_token):
+ """Appends a new TypeAnnotation object to the current parent."""
+ if self._stack:
+ self._stack[-1].Append(type_obj)
+ else:
+ self.ClosingError(error_token)
+
+ def ClosingError(self, token):
+ """Reports an error about too many closing items, but only once."""
+ if not self._closing_error:
+ self._closing_error = True
+ self.Error(token, 'Too many closing items.')
+
+ def Error(self, token, message):
+ """Calls the error_handler to post an error message."""
+ if self._error_handler:
+ self._error_handler.HandleError(error.Error(
+ errors.JSDOC_DOES_NOT_PARSE,
+ 'Error parsing jsdoc type at token "%s" (column: %d): %s' %
+ (token.string, token.start_index, message), token))
diff --git a/tools/closure_linter/build/lib/closure_linter/typeannotation_test.py b/tools/closure_linter/build/lib/closure_linter/typeannotation_test.py
new file mode 100644
index 0000000000..da9dfa369f
--- /dev/null
+++ b/tools/closure_linter/build/lib/closure_linter/typeannotation_test.py
@@ -0,0 +1,232 @@
+#!/usr/bin/env python
+"""Unit tests for the typeannotation module."""
+
+
+
+
+import unittest as googletest
+
+from closure_linter import testutil
+from closure_linter.common import erroraccumulator
+
+CRAZY_TYPE = ('Array.<!function(new:X,{a:null},...(c|d)):'
+ 'function(...(Object.<string>))>')
+
+
+class TypeErrorException(Exception):
+ """Exception for TypeErrors."""
+
+ def __init__(self, errors):
+ super(TypeErrorException, self).__init__()
+ self.errors = errors
+
+
+class TypeParserTest(googletest.TestCase):
+ """Tests for typeannotation parsing."""
+
+ def _ParseComment(self, script):
+ """Parse a script that contains one comment and return it."""
+ accumulator = erroraccumulator.ErrorAccumulator()
+ _, comments = testutil.ParseFunctionsAndComments(script, accumulator)
+ if accumulator.GetErrors():
+ raise TypeErrorException(accumulator.GetErrors())
+ self.assertEquals(1, len(comments))
+ return comments[0]
+
+ def _ParseType(self, type_str):
+ """Creates a comment to parse and returns the parsed type."""
+ comment = self._ParseComment('/** @type {%s} **/' % type_str)
+ return comment.GetDocFlags()[0].jstype
+
+ def assertProperReconstruction(self, type_str, matching_str=None):
+ """Parses the type and asserts the its repr matches the type.
+
+ If matching_str is specified, it will assert that the repr matches this
+ string instead.
+
+ Args:
+ type_str: The type string to parse.
+ matching_str: A string the __repr__ of the parsed type should match.
+ Returns:
+ The parsed js_type.
+ """
+ parsed_type = self._ParseType(type_str)
+ # Use listEqual assertion to more easily identify the difference
+ self.assertListEqual(list(matching_str or type_str),
+ list(repr(parsed_type)))
+ self.assertEquals(matching_str or type_str, repr(parsed_type))
+
+ # Newlines will be inserted by the file writer.
+ self.assertEquals(type_str.replace('\n', ''), parsed_type.ToString())
+ return parsed_type
+
+ def assertNullable(self, type_str, nullable=True):
+ parsed_type = self.assertProperReconstruction(type_str)
+ self.assertEquals(nullable, parsed_type.GetNullability(),
+ '"%s" should %sbe nullable' %
+ (type_str, 'not ' if nullable else ''))
+
+ def assertNotNullable(self, type_str):
+ return self.assertNullable(type_str, nullable=False)
+
+ def testReconstruction(self):
+ self.assertProperReconstruction('*')
+ self.assertProperReconstruction('number')
+ self.assertProperReconstruction('(((number)))')
+ self.assertProperReconstruction('!number')
+ self.assertProperReconstruction('?!number')
+ self.assertProperReconstruction('number=')
+ self.assertProperReconstruction('number=!?', '?!number=')
+ self.assertProperReconstruction('number|?string')
+ self.assertProperReconstruction('(number|string)')
+ self.assertProperReconstruction('?(number|string)')
+ self.assertProperReconstruction('Object.<number,string>')
+ self.assertProperReconstruction('function(new:Object)')
+ self.assertProperReconstruction('function(new:Object):number')
+ self.assertProperReconstruction('function(new:Object,Element):number')
+ self.assertProperReconstruction('function(this:T,...)')
+ self.assertProperReconstruction('{a:?number}')
+ self.assertProperReconstruction('{a:?number,b:(number|string)}')
+ self.assertProperReconstruction('{c:{nested_element:*}|undefined}')
+ self.assertProperReconstruction('{handleEvent:function(?):?}')
+ self.assertProperReconstruction('function():?|null')
+ self.assertProperReconstruction('null|function():?|bar')
+
+ def testOptargs(self):
+ self.assertProperReconstruction('number=')
+ self.assertProperReconstruction('number|string=')
+ self.assertProperReconstruction('(number|string)=')
+ self.assertProperReconstruction('(number|string=)')
+ self.assertProperReconstruction('(number=|string)')
+ self.assertProperReconstruction('function(...):number=')
+
+ def testIndepth(self):
+ # Do an deeper check of the crazy identifier
+ crazy = self.assertProperReconstruction(CRAZY_TYPE)
+ self.assertEquals('Array.', crazy.identifier)
+ self.assertEquals(1, len(crazy.sub_types))
+ func1 = crazy.sub_types[0]
+ func2 = func1.return_type
+ self.assertEquals('function', func1.identifier)
+ self.assertEquals('function', func2.identifier)
+ self.assertEquals(3, len(func1.sub_types))
+ self.assertEquals(1, len(func2.sub_types))
+ self.assertEquals('Object.', func2.sub_types[0].sub_types[0].identifier)
+
+ def testIterIdentifiers(self):
+ nested_identifiers = self._ParseType('(a|{b:(c|function(new:d):e)})')
+ for identifier in ('a', 'b', 'c', 'd', 'e'):
+ self.assertIn(identifier, nested_identifiers.IterIdentifiers())
+
+ def testIsEmpty(self):
+ self.assertTrue(self._ParseType('').IsEmpty())
+ self.assertFalse(self._ParseType('?').IsEmpty())
+ self.assertFalse(self._ParseType('!').IsEmpty())
+ self.assertFalse(self._ParseType('<?>').IsEmpty())
+
+ def testIsConstructor(self):
+ self.assertFalse(self._ParseType('').IsConstructor())
+ self.assertFalse(self._ParseType('Array.<number>').IsConstructor())
+ self.assertTrue(self._ParseType('function(new:T)').IsConstructor())
+
+ def testIsVarArgsType(self):
+ self.assertTrue(self._ParseType('...number').IsVarArgsType())
+ self.assertTrue(self._ParseType('...Object|Array').IsVarArgsType())
+ self.assertTrue(self._ParseType('...(Object|Array)').IsVarArgsType())
+ self.assertFalse(self._ParseType('Object|...Array').IsVarArgsType())
+ self.assertFalse(self._ParseType('(...Object|Array)').IsVarArgsType())
+
+ def testIsUnknownType(self):
+ self.assertTrue(self._ParseType('?').IsUnknownType())
+ self.assertTrue(self._ParseType('Foo.<?>').sub_types[0].IsUnknownType())
+ self.assertFalse(self._ParseType('?|!').IsUnknownType())
+ self.assertTrue(self._ParseType('?|!').sub_types[0].IsUnknownType())
+ self.assertFalse(self._ParseType('!').IsUnknownType())
+
+ long_type = 'function():?|{handleEvent:function(?=):?,sample:?}|?='
+ record = self._ParseType(long_type)
+ # First check that there's not just one type with 3 return types, but three
+ # top-level types.
+ self.assertEquals(3, len(record.sub_types))
+
+ # Now extract all unknown type instances and verify that they really are.
+ handle_event, sample = record.sub_types[1].sub_types
+ for i, sub_type in enumerate([
+ record.sub_types[0].return_type,
+ handle_event.return_type,
+ handle_event.sub_types[0],
+ sample,
+ record.sub_types[2]]):
+ self.assertTrue(sub_type.IsUnknownType(),
+ 'Type %d should be the unknown type: %s\n%s' % (
+ i, sub_type.tokens, record.Dump()))
+
+ def testTypedefNames(self):
+ easy = self._ParseType('{a}')
+ self.assertTrue(easy.record_type)
+
+ easy = self.assertProperReconstruction('{a}', '{a:}').sub_types[0]
+ self.assertEquals('a', easy.key_type.identifier)
+ self.assertEquals('', easy.identifier)
+
+ easy = self.assertProperReconstruction('{a:b}').sub_types[0]
+ self.assertEquals('a', easy.key_type.identifier)
+ self.assertEquals('b', easy.identifier)
+
+ def assertTypeError(self, type_str):
+ """Asserts that parsing the given type raises a linter error."""
+ self.assertRaises(TypeErrorException, self._ParseType, type_str)
+
+ def testParseBadTypes(self):
+ """Tests that several errors in types don't break the parser."""
+ self.assertTypeError('<')
+ self.assertTypeError('>')
+ self.assertTypeError('Foo.<Bar')
+ self.assertTypeError('Foo.Bar>=')
+ self.assertTypeError('Foo.<Bar>>=')
+ self.assertTypeError('(')
+ self.assertTypeError(')')
+ self.assertTypeError('Foo.<Bar)>')
+ self._ParseType(':')
+ self._ParseType(':foo')
+ self.assertTypeError(':)foo')
+ self.assertTypeError('(a|{b:(c|function(new:d):e')
+
+ def testNullable(self):
+ self.assertNullable('null')
+ self.assertNullable('Object')
+ self.assertNullable('?string')
+ self.assertNullable('?number')
+
+ self.assertNotNullable('string')
+ self.assertNotNullable('number')
+ self.assertNotNullable('boolean')
+ self.assertNotNullable('function(Object)')
+ self.assertNotNullable('function(Object):Object')
+ self.assertNotNullable('function(?Object):?Object')
+ self.assertNotNullable('!Object')
+
+ self.assertNotNullable('boolean|string')
+ self.assertNotNullable('(boolean|string)')
+
+ self.assertNullable('(boolean|string|null)')
+ self.assertNullable('(?boolean)')
+ self.assertNullable('?(boolean)')
+
+ self.assertNullable('(boolean|Object)')
+ self.assertNotNullable('(boolean|(string|{a:}))')
+
+ def testSpaces(self):
+ """Tests that spaces don't change the outcome."""
+ type_str = (' A < b | ( c | ? ! d e f ) > | '
+ 'function ( x : . . . ) : { y : z = } ')
+ two_spaces = type_str.replace(' ', ' ')
+ no_spaces = type_str.replace(' ', '')
+ newlines = type_str.replace(' ', '\n * ')
+ self.assertProperReconstruction(no_spaces)
+ self.assertProperReconstruction(type_str, no_spaces)
+ self.assertProperReconstruction(two_spaces, no_spaces)
+ self.assertProperReconstruction(newlines, no_spaces)
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/closure_linter.egg-info/PKG-INFO b/tools/closure_linter/closure_linter.egg-info/PKG-INFO
index 918e2433f9..8055c15c21 100644
--- a/tools/closure_linter/closure_linter.egg-info/PKG-INFO
+++ b/tools/closure_linter/closure_linter.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.0
Name: closure-linter
-Version: 2.2.6
+Version: 2.3.17
Summary: Closure Linter
Home-page: http://code.google.com/p/closure-linter
Author: The Closure Linter Authors
diff --git a/tools/closure_linter/closure_linter.egg-info/SOURCES.txt b/tools/closure_linter/closure_linter.egg-info/SOURCES.txt
index b64d829f7e..a193cdfeb9 100644
--- a/tools/closure_linter/closure_linter.egg-info/SOURCES.txt
+++ b/tools/closure_linter/closure_linter.egg-info/SOURCES.txt
@@ -1,12 +1,20 @@
README
setup.py
closure_linter/__init__.py
+closure_linter/aliaspass.py
+closure_linter/aliaspass_test.py
closure_linter/checker.py
closure_linter/checkerbase.py
+closure_linter/closurizednamespacesinfo.py
+closure_linter/closurizednamespacesinfo_test.py
closure_linter/ecmalintrules.py
closure_linter/ecmametadatapass.py
+closure_linter/error_check.py
closure_linter/error_fixer.py
+closure_linter/error_fixer_test.py
+closure_linter/errorrecord.py
closure_linter/errorrules.py
+closure_linter/errorrules_test.py
closure_linter/errors.py
closure_linter/fixjsstyle.py
closure_linter/fixjsstyle_test.py
@@ -18,8 +26,21 @@ closure_linter/javascriptstatetracker.py
closure_linter/javascriptstatetracker_test.py
closure_linter/javascripttokenizer.py
closure_linter/javascripttokens.py
+closure_linter/not_strict_test.py
+closure_linter/requireprovidesorter.py
+closure_linter/requireprovidesorter_test.py
+closure_linter/runner.py
+closure_linter/runner_test.py
+closure_linter/scopeutil.py
+closure_linter/scopeutil_test.py
closure_linter/statetracker.py
+closure_linter/statetracker_test.py
+closure_linter/strict_test.py
+closure_linter/testutil.py
closure_linter/tokenutil.py
+closure_linter/tokenutil_test.py
+closure_linter/typeannotation.py
+closure_linter/typeannotation_test.py
closure_linter.egg-info/PKG-INFO
closure_linter.egg-info/SOURCES.txt
closure_linter.egg-info/dependency_links.txt
@@ -30,7 +51,7 @@ closure_linter/common/__init__.py
closure_linter/common/error.py
closure_linter/common/erroraccumulator.py
closure_linter/common/errorhandler.py
-closure_linter/common/errorprinter.py
+closure_linter/common/erroroutput.py
closure_linter/common/filetestcase.py
closure_linter/common/htmlutil.py
closure_linter/common/lintrunner.py
@@ -38,4 +59,5 @@ closure_linter/common/matcher.py
closure_linter/common/position.py
closure_linter/common/simplefileflags.py
closure_linter/common/tokenizer.py
-closure_linter/common/tokens.py \ No newline at end of file
+closure_linter/common/tokens.py
+closure_linter/common/tokens_test.py \ No newline at end of file
diff --git a/tools/closure_linter/closure_linter/__init__.py b/tools/closure_linter/closure_linter/__init__.py
index 4265cc3e6c..1798c8cfff 100755
--- a/tools/closure_linter/closure_linter/__init__.py
+++ b/tools/closure_linter/closure_linter/__init__.py
@@ -1 +1,16 @@
#!/usr/bin/env python
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Package indicator for gjslint."""
diff --git a/tools/closure_linter/closure_linter/aliaspass.py b/tools/closure_linter/closure_linter/aliaspass.py
new file mode 100644
index 0000000000..bb37bfa07b
--- /dev/null
+++ b/tools/closure_linter/closure_linter/aliaspass.py
@@ -0,0 +1,248 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Pass that scans for goog.scope aliases and lint/usage errors."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+from closure_linter import ecmametadatapass
+from closure_linter import errors
+from closure_linter import javascripttokens
+from closure_linter import scopeutil
+from closure_linter import tokenutil
+from closure_linter.common import error
+
+
+# TODO(nnaze): Create a Pass interface and move this class, EcmaMetaDataPass,
+# and related classes onto it.
+
+
+def _GetAliasForIdentifier(identifier, alias_map):
+ """Returns the aliased_symbol name for an identifier.
+
+ Example usage:
+ >>> alias_map = {'MyClass': 'goog.foo.MyClass'}
+ >>> _GetAliasForIdentifier('MyClass.prototype.action', alias_map)
+ 'goog.foo.MyClass.prototype.action'
+
+ >>> _GetAliasForIdentifier('MyClass.prototype.action', {})
+ None
+
+ Args:
+ identifier: The identifier.
+ alias_map: A dictionary mapping a symbol to an alias.
+
+ Returns:
+ The aliased symbol name or None if not found.
+ """
+ ns = identifier.split('.', 1)[0]
+ aliased_symbol = alias_map.get(ns)
+ if aliased_symbol:
+ return aliased_symbol + identifier[len(ns):]
+
+
+def _SetTypeAlias(js_type, alias_map):
+ """Updates the alias for identifiers in a type.
+
+ Args:
+ js_type: A typeannotation.TypeAnnotation instance.
+ alias_map: A dictionary mapping a symbol to an alias.
+ """
+ aliased_symbol = _GetAliasForIdentifier(js_type.identifier, alias_map)
+ if aliased_symbol:
+ js_type.alias = aliased_symbol
+ for sub_type in js_type.IterTypes():
+ _SetTypeAlias(sub_type, alias_map)
+
+
+class AliasPass(object):
+ """Pass to identify goog.scope() usages.
+
+ Identifies goog.scope() usages and finds lint/usage errors. Notes any
+ aliases of symbols in Closurized namespaces (that is, reassignments
+ such as "var MyClass = goog.foo.MyClass;") and annotates identifiers
+ when they're using an alias (so they may be expanded to the full symbol
+ later -- that "MyClass.prototype.action" refers to
+ "goog.foo.MyClass.prototype.action" when expanded.).
+ """
+
+ def __init__(self, closurized_namespaces=None, error_handler=None):
+ """Creates a new pass.
+
+ Args:
+ closurized_namespaces: A set of Closurized namespaces (e.g. 'goog').
+ error_handler: An error handler to report lint errors to.
+ """
+
+ self._error_handler = error_handler
+
+ # If we have namespaces, freeze the set.
+ if closurized_namespaces:
+ closurized_namespaces = frozenset(closurized_namespaces)
+
+ self._closurized_namespaces = closurized_namespaces
+
+ def Process(self, start_token):
+ """Runs the pass on a token stream.
+
+ Args:
+ start_token: The first token in the stream.
+ """
+
+ if start_token is None:
+ return
+
+ # TODO(nnaze): Add more goog.scope usage checks.
+ self._CheckGoogScopeCalls(start_token)
+
+ # If we have closurized namespaces, identify aliased identifiers.
+ if self._closurized_namespaces:
+ context = start_token.metadata.context
+ root_context = context.GetRoot()
+ self._ProcessRootContext(root_context)
+
+ def _CheckGoogScopeCalls(self, start_token):
+ """Check goog.scope calls for lint/usage errors."""
+
+ def IsScopeToken(token):
+ return (token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER and
+ token.string == 'goog.scope')
+
+ # Find all the goog.scope tokens in the file
+ scope_tokens = [t for t in start_token if IsScopeToken(t)]
+
+ for token in scope_tokens:
+ scope_context = token.metadata.context
+
+ if not (scope_context.type == ecmametadatapass.EcmaContext.STATEMENT and
+ scope_context.parent.type == ecmametadatapass.EcmaContext.ROOT):
+ self._MaybeReportError(
+ error.Error(errors.INVALID_USE_OF_GOOG_SCOPE,
+ 'goog.scope call not in global scope', token))
+
+ # There should be only one goog.scope reference. Register errors for
+ # every instance after the first.
+ for token in scope_tokens[1:]:
+ self._MaybeReportError(
+ error.Error(errors.EXTRA_GOOG_SCOPE_USAGE,
+ 'More than one goog.scope call in file.', token))
+
+ def _MaybeReportError(self, err):
+ """Report an error to the handler (if registered)."""
+ if self._error_handler:
+ self._error_handler.HandleError(err)
+
+ @classmethod
+ def _YieldAllContexts(cls, context):
+ """Yields all contexts that are contained by the given context."""
+ yield context
+ for child_context in context.children:
+ for descendent_child in cls._YieldAllContexts(child_context):
+ yield descendent_child
+
+ @staticmethod
+ def _IsTokenInParentBlock(token, parent_block):
+ """Determines whether the given token is contained by the given block.
+
+ Args:
+ token: A token
+ parent_block: An EcmaContext.
+
+ Returns:
+ Whether the token is in a context that is or is a child of the given
+ parent_block context.
+ """
+ context = token.metadata.context
+
+ while context:
+ if context is parent_block:
+ return True
+ context = context.parent
+
+ return False
+
+ def _ProcessRootContext(self, root_context):
+ """Processes all goog.scope blocks under the root context."""
+
+ assert root_context.type is ecmametadatapass.EcmaContext.ROOT
+
+ # Process aliases in statements in the root scope for goog.module-style
+ # aliases.
+ global_alias_map = {}
+ for context in root_context.children:
+ if context.type == ecmametadatapass.EcmaContext.STATEMENT:
+ for statement_child in context.children:
+ if statement_child.type == ecmametadatapass.EcmaContext.VAR:
+ match = scopeutil.MatchModuleAlias(statement_child)
+ if match:
+ # goog.require aliases cannot use further aliases, the symbol is
+ # the second part of match, directly.
+ symbol = match[1]
+ if scopeutil.IsInClosurizedNamespace(symbol,
+ self._closurized_namespaces):
+ global_alias_map[match[0]] = symbol
+
+ # Process each block to find aliases.
+ for context in root_context.children:
+ self._ProcessBlock(context, global_alias_map)
+
+ def _ProcessBlock(self, context, global_alias_map):
+ """Scans a goog.scope block to find aliases and mark alias tokens."""
+ alias_map = global_alias_map.copy()
+
+ # Iterate over every token in the context. Each token points to one
+ # context, but multiple tokens may point to the same context. We only want
+ # to check each context once, so keep track of those we've seen.
+ seen_contexts = set()
+ token = context.start_token
+ while token and self._IsTokenInParentBlock(token, context):
+ token_context = token.metadata.context if token.metadata else None
+
+ # Check to see if this token is an alias.
+ if token_context and token_context not in seen_contexts:
+ seen_contexts.add(token_context)
+
+ # If this is a alias statement in the goog.scope block.
+ if (token_context.type == ecmametadatapass.EcmaContext.VAR and
+ scopeutil.IsGoogScopeBlock(token_context.parent.parent)):
+ match = scopeutil.MatchAlias(token_context)
+
+ # If this is an alias, remember it in the map.
+ if match:
+ alias, symbol = match
+ symbol = _GetAliasForIdentifier(symbol, alias_map) or symbol
+ if scopeutil.IsInClosurizedNamespace(symbol,
+ self._closurized_namespaces):
+ alias_map[alias] = symbol
+
+ # If this token is an identifier that matches an alias,
+ # mark the token as an alias to the original symbol.
+ if (token.type is javascripttokens.JavaScriptTokenType.SIMPLE_LVALUE or
+ token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER):
+ identifier = tokenutil.GetIdentifierForToken(token)
+ if identifier:
+ aliased_symbol = _GetAliasForIdentifier(identifier, alias_map)
+ if aliased_symbol:
+ token.metadata.aliased_symbol = aliased_symbol
+
+ elif token.type == javascripttokens.JavaScriptTokenType.DOC_FLAG:
+ flag = token.attached_object
+ if flag and flag.HasType() and flag.jstype:
+ _SetTypeAlias(flag.jstype, alias_map)
+
+ token = token.next # Get next token
diff --git a/tools/closure_linter/closure_linter/aliaspass_test.py b/tools/closure_linter/closure_linter/aliaspass_test.py
new file mode 100755
index 0000000000..7042e53487
--- /dev/null
+++ b/tools/closure_linter/closure_linter/aliaspass_test.py
@@ -0,0 +1,191 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the aliaspass module."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+import unittest as googletest
+
+from closure_linter import aliaspass
+from closure_linter import errors
+from closure_linter import javascriptstatetracker
+from closure_linter import testutil
+from closure_linter.common import erroraccumulator
+
+
+def _GetTokenByLineAndString(start_token, string, line_number):
+ for token in start_token:
+ if token.line_number == line_number and token.string == string:
+ return token
+
+
+class AliasPassTest(googletest.TestCase):
+
+ def testInvalidGoogScopeCall(self):
+ start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCOPE_SCRIPT)
+
+ error_accumulator = erroraccumulator.ErrorAccumulator()
+ alias_pass = aliaspass.AliasPass(
+ error_handler=error_accumulator)
+ alias_pass.Process(start_token)
+
+ alias_errors = error_accumulator.GetErrors()
+ self.assertEquals(1, len(alias_errors))
+
+ alias_error = alias_errors[0]
+
+ self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, alias_error.code)
+ self.assertEquals('goog.scope', alias_error.token.string)
+
+ def testAliasedIdentifiers(self):
+ start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT)
+ alias_pass = aliaspass.AliasPass(set(['goog', 'myproject']))
+ alias_pass.Process(start_token)
+
+ alias_token = _GetTokenByLineAndString(start_token, 'Event', 4)
+ self.assertTrue(alias_token.metadata.is_alias_definition)
+
+ my_class_token = _GetTokenByLineAndString(start_token, 'myClass', 9)
+ self.assertIsNone(my_class_token.metadata.aliased_symbol)
+
+ component_token = _GetTokenByLineAndString(start_token, 'Component', 17)
+ self.assertEquals('goog.ui.Component',
+ component_token.metadata.aliased_symbol)
+
+ event_token = _GetTokenByLineAndString(start_token, 'Event.Something', 17)
+ self.assertEquals('goog.events.Event.Something',
+ event_token.metadata.aliased_symbol)
+
+ non_closurized_token = _GetTokenByLineAndString(
+ start_token, 'NonClosurizedClass', 18)
+ self.assertIsNone(non_closurized_token.metadata.aliased_symbol)
+
+ long_start_token = _GetTokenByLineAndString(start_token, 'Event', 24)
+ self.assertEquals('goog.events.Event.MultilineIdentifier.someMethod',
+ long_start_token.metadata.aliased_symbol)
+
+ def testAliasedDoctypes(self):
+ """Tests that aliases are correctly expanded within type annotations."""
+ start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT)
+ tracker = javascriptstatetracker.JavaScriptStateTracker()
+ tracker.DocFlagPass(start_token, error_handler=None)
+
+ alias_pass = aliaspass.AliasPass(set(['goog', 'myproject']))
+ alias_pass.Process(start_token)
+
+ flag_token = _GetTokenByLineAndString(start_token, '@type', 22)
+ self.assertEquals(
+ 'goog.events.Event.<goog.ui.Component,Array<myproject.foo.MyClass>>',
+ repr(flag_token.attached_object.jstype))
+
+ def testModuleAlias(self):
+ start_token = testutil.TokenizeSourceAndRunEcmaPass("""
+goog.module('goog.test');
+var Alias = goog.require('goog.Alias');
+Alias.use();
+""")
+ alias_pass = aliaspass.AliasPass(set(['goog']))
+ alias_pass.Process(start_token)
+ alias_token = _GetTokenByLineAndString(start_token, 'Alias', 3)
+ self.assertTrue(alias_token.metadata.is_alias_definition)
+
+ def testMultipleGoogScopeCalls(self):
+ start_token = testutil.TokenizeSourceAndRunEcmaPass(
+ _TEST_MULTIPLE_SCOPE_SCRIPT)
+
+ error_accumulator = erroraccumulator.ErrorAccumulator()
+
+ alias_pass = aliaspass.AliasPass(
+ set(['goog', 'myproject']),
+ error_handler=error_accumulator)
+ alias_pass.Process(start_token)
+
+ alias_errors = error_accumulator.GetErrors()
+
+ self.assertEquals(3, len(alias_errors))
+
+ error = alias_errors[0]
+ self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, error.code)
+ self.assertEquals(7, error.token.line_number)
+
+ error = alias_errors[1]
+ self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code)
+ self.assertEquals(7, error.token.line_number)
+
+ error = alias_errors[2]
+ self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code)
+ self.assertEquals(11, error.token.line_number)
+
+
+_TEST_ALIAS_SCRIPT = """
+goog.scope(function() {
+var events = goog.events; // scope alias
+var Event = events.
+ Event; // nested multiline scope alias
+
+// This should not be registered as an aliased identifier because
+// it appears before the alias.
+var myClass = new MyClass();
+
+var Component = goog.ui.Component; // scope alias
+var MyClass = myproject.foo.MyClass; // scope alias
+
+// Scope alias of non-Closurized namespace.
+var NonClosurizedClass = aaa.bbb.NonClosurizedClass;
+
+var component = new Component(Event.Something);
+var nonClosurized = NonClosurizedClass();
+
+/**
+ * A created namespace with a really long identifier.
+ * @type {events.Event.<Component,Array<MyClass>}
+ */
+Event.
+ MultilineIdentifier.
+ someMethod = function() {};
+});
+"""
+
+_TEST_SCOPE_SCRIPT = """
+function foo () {
+ // This goog.scope call is invalid.
+ goog.scope(function() {
+
+ });
+}
+"""
+
+_TEST_MULTIPLE_SCOPE_SCRIPT = """
+goog.scope(function() {
+ // do nothing
+});
+
+function foo() {
+ var test = goog.scope; // We should not see goog.scope mentioned.
+}
+
+// This goog.scope invalid. There can be only one.
+goog.scope(function() {
+
+});
+"""
+
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/closure_linter/checker.py b/tools/closure_linter/closure_linter/checker.py
index 4cdac931ff..1c984173b0 100755
--- a/tools/closure_linter/closure_linter/checker.py
+++ b/tools/closure_linter/closure_linter/checker.py
@@ -21,62 +21,88 @@ __author__ = ('robbyw@google.com (Robert Walker)',
import gflags as flags
+from closure_linter import aliaspass
from closure_linter import checkerbase
-from closure_linter import ecmametadatapass
-from closure_linter import errors
+from closure_linter import closurizednamespacesinfo
from closure_linter import javascriptlintrules
-from closure_linter import javascriptstatetracker
-from closure_linter.common import errorprinter
-from closure_linter.common import lintrunner
-flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'],
- 'List of files with relaxed documentation checks. Will not '
- 'report errors for missing documentation, some missing '
- 'descriptions, or methods whose @return tags don\'t have a '
- 'matching return statement.')
+
+flags.DEFINE_list('closurized_namespaces', '',
+ 'Namespace prefixes, used for testing of'
+ 'goog.provide/require')
+flags.DEFINE_list('ignored_extra_namespaces', '',
+ 'Fully qualified namespaces that should be not be reported '
+ 'as extra by the linter.')
class JavaScriptStyleChecker(checkerbase.CheckerBase):
"""Checker that applies JavaScriptLintRules."""
- def __init__(self, error_handler):
+ def __init__(self, state_tracker, error_handler):
"""Initialize an JavaScriptStyleChecker object.
Args:
- error_handler: Error handler to pass all errors to
+ state_tracker: State tracker.
+ error_handler: Error handler to pass all errors to.
"""
+ self._namespaces_info = None
+ self._alias_pass = None
+ if flags.FLAGS.closurized_namespaces:
+ self._namespaces_info = (
+ closurizednamespacesinfo.ClosurizedNamespacesInfo(
+ flags.FLAGS.closurized_namespaces,
+ flags.FLAGS.ignored_extra_namespaces))
+
+ self._alias_pass = aliaspass.AliasPass(
+ flags.FLAGS.closurized_namespaces, error_handler)
+
checkerbase.CheckerBase.__init__(
self,
error_handler=error_handler,
- lint_rules=javascriptlintrules.JavaScriptLintRules(),
- state_tracker=javascriptstatetracker.JavaScriptStateTracker(
- closurized_namespaces=flags.FLAGS.closurized_namespaces),
- metadata_pass=ecmametadatapass.EcmaMetaDataPass(),
- limited_doc_files=flags.FLAGS.limited_doc_files)
+ lint_rules=javascriptlintrules.JavaScriptLintRules(
+ self._namespaces_info),
+ state_tracker=state_tracker)
+ def Check(self, start_token, limited_doc_checks=False, is_html=False,
+ stop_token=None):
+ """Checks a token stream for lint warnings/errors.
-class GJsLintRunner(lintrunner.LintRunner):
- """Wrapper class to run GJsLint."""
-
- def Run(self, filenames, error_handler=None):
- """Run GJsLint on the given filenames.
+ Adds a separate pass for computing dependency information based on
+ goog.require and goog.provide statements prior to the main linting pass.
Args:
- filenames: The filenames to check
- error_handler: An optional ErrorHandler object, an ErrorPrinter is used if
- none is specified.
-
- Returns:
- error_count, file_count: The number of errors and the number of files that
- contain errors.
+ start_token: The first token in the token stream.
+ limited_doc_checks: Whether to perform limited checks.
+ is_html: Whether this token stream is HTML.
+ stop_token: If given, checks should stop at this token.
"""
- if not error_handler:
- error_handler = errorprinter.ErrorPrinter(errors.NEW_ERRORS)
+ self._lint_rules.Initialize(self, limited_doc_checks, is_html)
+
+ self._state_tracker.DocFlagPass(start_token, self._error_handler)
- checker = JavaScriptStyleChecker(error_handler)
+ if self._alias_pass:
+ self._alias_pass.Process(start_token)
- # Check the list of files.
- for filename in filenames:
- checker.Check(filename)
+ # To maximize the amount of errors that get reported before a parse error
+ # is displayed, don't run the dependency pass if a parse error exists.
+ if self._namespaces_info:
+ self._namespaces_info.Reset()
+ self._ExecutePass(start_token, self._DependencyPass, stop_token)
- return error_handler
+ self._ExecutePass(start_token, self._LintPass, stop_token)
+
+ # If we have a stop_token, we didn't end up reading the whole file and,
+ # thus, don't call Finalize to do end-of-file checks.
+ if not stop_token:
+ self._lint_rules.Finalize(self._state_tracker)
+
+ def _DependencyPass(self, token):
+ """Processes an individual token for dependency information.
+
+ Used to encapsulate the logic needed to process an individual token so that
+ it can be passed to _ExecutePass.
+
+ Args:
+ token: The token to process.
+ """
+ self._namespaces_info.ProcessToken(token, self._state_tracker)
diff --git a/tools/closure_linter/closure_linter/checkerbase.py b/tools/closure_linter/closure_linter/checkerbase.py
index 123cb72860..6679ded05b 100755
--- a/tools/closure_linter/closure_linter/checkerbase.py
+++ b/tools/closure_linter/closure_linter/checkerbase.py
@@ -16,26 +16,16 @@
"""Base classes for writing checkers that operate on tokens."""
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
-import traceback
-
-import gflags as flags
-from closure_linter import ecmametadatapass
from closure_linter import errorrules
-from closure_linter import errors
-from closure_linter import javascripttokenizer
from closure_linter.common import error
-from closure_linter.common import htmlutil
-FLAGS = flags.FLAGS
-flags.DEFINE_boolean('debug_tokens', False,
- 'Whether to print all tokens for debugging.')
-
-flags.DEFINE_boolean('error_trace', False,
- 'Whether to show error exceptions.')
class LintRulesBase(object):
"""Base class for all classes defining the lint rules for a language."""
@@ -61,6 +51,14 @@ class LintRulesBase(object):
if errorrules.ShouldReportError(code):
self.__checker.HandleError(code, message, token, position, fix_data)
+ def _SetLimitedDocChecks(self, limited_doc_checks):
+ """Sets whether doc checking is relaxed for this file.
+
+ Args:
+ limited_doc_checks: Whether doc checking is relaxed for this file.
+ """
+ self._limited_doc_checks = limited_doc_checks
+
def CheckToken(self, token, parser_state):
"""Checks a token, given the current parser_state, for warnings and errors.
@@ -73,12 +71,11 @@ class LintRulesBase(object):
"""
raise TypeError('Abstract method CheckToken not implemented')
- def Finalize(self, parser_state, tokenizer_mode):
+ def Finalize(self, parser_state):
"""Perform all checks that need to occur after all lines are processed.
Args:
parser_state: State of the parser after parsing all tokens
- tokenizer_mode: Mode of the tokenizer after parsing the entire page
Raises:
TypeError: If not overridden.
@@ -89,8 +86,7 @@ class LintRulesBase(object):
class CheckerBase(object):
"""This class handles checking a LintRules object against a file."""
- def __init__(self, error_handler, lint_rules, state_tracker,
- limited_doc_files=None, metadata_pass=None):
+ def __init__(self, error_handler, lint_rules, state_tracker):
"""Initialize a checker object.
Args:
@@ -98,17 +94,13 @@ class CheckerBase(object):
lint_rules: LintRules object defining lint errors given a token
and state_tracker object.
state_tracker: Object that tracks the current state in the token stream.
- limited_doc_files: List of filenames that are not required to have
- documentation comments.
- metadata_pass: Object that builds metadata about the token stream.
+
"""
- self.__error_handler = error_handler
- self.__lint_rules = lint_rules
- self.__state_tracker = state_tracker
- self.__metadata_pass = metadata_pass
- self.__limited_doc_files = limited_doc_files
- self.__tokenizer = javascripttokenizer.JavaScriptTokenizer()
- self.__has_errors = False
+ self._error_handler = error_handler
+ self._lint_rules = lint_rules
+ self._state_tracker = state_tracker
+
+ self._has_errors = False
def HandleError(self, code, message, token, position=None,
fix_data=None):
@@ -122,8 +114,8 @@ class CheckerBase(object):
position: The position of the error, defaults to None.
fix_data: Metadata used for fixing the error.
"""
- self.__has_errors = True
- self.__error_handler.HandleError(
+ self._has_errors = True
+ self._error_handler.HandleError(
error.Error(code, message, token, position, fix_data))
def HasErrors(self):
@@ -132,106 +124,69 @@ class CheckerBase(object):
Returns:
True if the style checker has found any errors.
"""
- return self.__has_errors
+ return self._has_errors
+
+ def Check(self, start_token, limited_doc_checks=False, is_html=False,
+ stop_token=None):
+ """Checks a token stream, reporting errors to the error reporter.
+
+ Args:
+ start_token: First token in token stream.
+ limited_doc_checks: Whether doc checking is relaxed for this file.
+ is_html: Whether the file being checked is an HTML file with extracted
+ contents.
+ stop_token: If given, check should stop at this token.
+ """
- def Check(self, filename):
- """Checks the file, printing warnings and errors as they are found.
+ self._lint_rules.Initialize(self, limited_doc_checks, is_html)
+ self._ExecutePass(start_token, self._LintPass, stop_token=stop_token)
+ self._lint_rules.Finalize(self._state_tracker)
+
+ def _LintPass(self, token):
+ """Checks an individual token for lint warnings/errors.
+
+ Used to encapsulate the logic needed to check an individual token so that it
+ can be passed to _ExecutePass.
Args:
- filename: The name of the file to check.
+ token: The token to check.
"""
- try:
- f = open(filename)
- except IOError:
- self.__error_handler.HandleFile(filename, None)
- self.HandleError(errors.FILE_NOT_FOUND, 'File not found', None)
- self.__error_handler.FinishFile()
- return
-
- try:
- if filename.endswith('.html') or filename.endswith('.htm'):
- self.CheckLines(filename, htmlutil.GetScriptLines(f), True)
- else:
- self.CheckLines(filename, f, False)
- finally:
- f.close()
-
- def CheckLines(self, filename, lines_iter, is_html):
- """Checks a file, given as an iterable of lines, for warnings and errors.
+ self._lint_rules.CheckToken(token, self._state_tracker)
+
+ def _ExecutePass(self, token, pass_function, stop_token=None):
+ """Calls the given function for every token in the given token stream.
+
+ As each token is passed to the given function, state is kept up to date and,
+ depending on the error_trace flag, errors are either caught and reported, or
+ allowed to bubble up so developers can see the full stack trace. If a parse
+ error is specified, the pass will proceed as normal until the token causing
+ the parse error is reached.
Args:
- filename: The name of the file to check.
- lines_iter: An iterator that yields one line of the file at a time.
- is_html: Whether the file being checked is an HTML file with extracted
- contents.
+ token: The first token in the token stream.
+ pass_function: The function to call for each token in the token stream.
+ stop_token: The last token to check (if given).
- Returns:
- A boolean indicating whether the full file could be checked or if checking
- failed prematurely.
+ Raises:
+ Exception: If any error occurred while calling the given function.
"""
- limited_doc_checks = False
- if self.__limited_doc_files:
- for limited_doc_filename in self.__limited_doc_files:
- if filename.endswith(limited_doc_filename):
- limited_doc_checks = True
- break
-
- state_tracker = self.__state_tracker
- lint_rules = self.__lint_rules
- state_tracker.Reset()
- lint_rules.Initialize(self, limited_doc_checks, is_html)
-
- token = self.__tokenizer.TokenizeFile(lines_iter)
-
- parse_error = None
- if self.__metadata_pass:
- try:
- self.__metadata_pass.Reset()
- self.__metadata_pass.Process(token)
- except ecmametadatapass.ParseError, caught_parse_error:
- if FLAGS.error_trace:
- traceback.print_exc()
- parse_error = caught_parse_error
- except Exception:
- print 'Internal error in %s' % filename
- traceback.print_exc()
- return False
-
- self.__error_handler.HandleFile(filename, token)
+ self._state_tracker.Reset()
while token:
- if FLAGS.debug_tokens:
- print token
-
- if parse_error and parse_error.token == token:
- # Report any parse errors from above once we find the token.
- message = ('Error parsing file at token "%s". Unable to '
- 'check the rest of file.' % token.string)
- self.HandleError(errors.FILE_DOES_NOT_PARSE, message, token)
- self.__error_handler.FinishFile()
- return False
-
- if FLAGS.error_trace:
- state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
- else:
- try:
- state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
- except:
- self.HandleError(errors.FILE_DOES_NOT_PARSE,
- ('Error parsing file at token "%s". Unable to '
- 'check the rest of file.' % token.string),
- token)
- self.__error_handler.FinishFile()
- return False
-
- # Check the token for style guide violations.
- lint_rules.CheckToken(token, state_tracker)
-
- state_tracker.HandleAfterToken(token)
-
- # Move to the next token.
- token = token.next
+ # When we are looking at a token and decided to delete the whole line, we
+ # will delete all of them in the "HandleToken()" below. So the current
+ # token and subsequent ones may already be deleted here. The way we
+ # delete a token does not wipe out the previous and next pointers of the
+ # deleted token. So we need to check the token itself to make sure it is
+ # not deleted.
+ if not token.is_deleted:
+ # End the pass at the stop token
+ if stop_token and token is stop_token:
+ return
+
+ self._state_tracker.HandleToken(
+ token, self._state_tracker.GetLastNonSpaceToken())
+ pass_function(token)
+ self._state_tracker.HandleAfterToken(token)
- lint_rules.Finalize(state_tracker, self.__tokenizer.mode)
- self.__error_handler.FinishFile()
- return True
+ token = token.next
diff --git a/tools/closure_linter/closure_linter/closurizednamespacesinfo.py b/tools/closure_linter/closure_linter/closurizednamespacesinfo.py
new file mode 100755
index 0000000000..e7cbfd3318
--- /dev/null
+++ b/tools/closure_linter/closure_linter/closurizednamespacesinfo.py
@@ -0,0 +1,578 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Logic for computing dependency information for closurized JavaScript files.
+
+Closurized JavaScript files express dependencies using goog.require and
+goog.provide statements. In order for the linter to detect when a statement is
+missing or unnecessary, all identifiers in the JavaScript file must first be
+processed to determine if they constitute the creation or usage of a dependency.
+"""
+
+
+
+import re
+
+from closure_linter import javascripttokens
+from closure_linter import tokenutil
+
+# pylint: disable=g-bad-name
+TokenType = javascripttokens.JavaScriptTokenType
+
+DEFAULT_EXTRA_NAMESPACES = [
+ 'goog.testing.asserts',
+ 'goog.testing.jsunit',
+]
+
+
+class UsedNamespace(object):
+ """A type for information about a used namespace."""
+
+ def __init__(self, namespace, identifier, token, alias_definition):
+ """Initializes the instance.
+
+ Args:
+ namespace: the namespace of an identifier used in the file
+ identifier: the complete identifier
+ token: the token that uses the namespace
+ alias_definition: a boolean stating whether the namespace is only to used
+ for an alias definition and should not be required.
+ """
+ self.namespace = namespace
+ self.identifier = identifier
+ self.token = token
+ self.alias_definition = alias_definition
+
+ def GetLine(self):
+ return self.token.line_number
+
+ def __repr__(self):
+ return 'UsedNamespace(%s)' % ', '.join(
+ ['%s=%s' % (k, repr(v)) for k, v in self.__dict__.iteritems()])
+
+
+class ClosurizedNamespacesInfo(object):
+ """Dependency information for closurized JavaScript files.
+
+ Processes token streams for dependency creation or usage and provides logic
+ for determining if a given require or provide statement is unnecessary or if
+ there are missing require or provide statements.
+ """
+
+ def __init__(self, closurized_namespaces, ignored_extra_namespaces):
+ """Initializes an instance the ClosurizedNamespacesInfo class.
+
+ Args:
+ closurized_namespaces: A list of namespace prefixes that should be
+ processed for dependency information. Non-matching namespaces are
+ ignored.
+ ignored_extra_namespaces: A list of namespaces that should not be reported
+ as extra regardless of whether they are actually used.
+ """
+ self._closurized_namespaces = closurized_namespaces
+ self._ignored_extra_namespaces = (ignored_extra_namespaces +
+ DEFAULT_EXTRA_NAMESPACES)
+ self.Reset()
+
+ def Reset(self):
+ """Resets the internal state to prepare for processing a new file."""
+
+ # A list of goog.provide tokens in the order they appeared in the file.
+ self._provide_tokens = []
+
+ # A list of goog.require tokens in the order they appeared in the file.
+ self._require_tokens = []
+
+ # Namespaces that are already goog.provided.
+ self._provided_namespaces = []
+
+ # Namespaces that are already goog.required.
+ self._required_namespaces = []
+
+ # Note that created_namespaces and used_namespaces contain both namespaces
+ # and identifiers because there are many existing cases where a method or
+ # constant is provided directly instead of its namespace. Ideally, these
+ # two lists would only have to contain namespaces.
+
+ # A list of tuples where the first element is the namespace of an identifier
+ # created in the file, the second is the identifier itself and the third is
+ # the line number where it's created.
+ self._created_namespaces = []
+
+ # A list of UsedNamespace instances.
+ self._used_namespaces = []
+
+ # A list of seemingly-unnecessary namespaces that are goog.required() and
+ # annotated with @suppress {extraRequire}.
+ self._suppressed_requires = []
+
+ # A list of goog.provide tokens which are duplicates.
+ self._duplicate_provide_tokens = []
+
+ # A list of goog.require tokens which are duplicates.
+ self._duplicate_require_tokens = []
+
+ # Whether this file is in a goog.scope. Someday, we may add support
+ # for checking scopified namespaces, but for now let's just fail
+ # in a more reasonable way.
+ self._scopified_file = False
+
+ # TODO(user): Handle the case where there are 2 different requires
+ # that can satisfy the same dependency, but only one is necessary.
+
+ def GetProvidedNamespaces(self):
+ """Returns the namespaces which are already provided by this file.
+
+ Returns:
+ A list of strings where each string is a 'namespace' corresponding to an
+ existing goog.provide statement in the file being checked.
+ """
+ return set(self._provided_namespaces)
+
+ def GetRequiredNamespaces(self):
+ """Returns the namespaces which are already required by this file.
+
+ Returns:
+ A list of strings where each string is a 'namespace' corresponding to an
+ existing goog.require statement in the file being checked.
+ """
+ return set(self._required_namespaces)
+
+ def IsExtraProvide(self, token):
+ """Returns whether the given goog.provide token is unnecessary.
+
+ Args:
+ token: A goog.provide token.
+
+ Returns:
+ True if the given token corresponds to an unnecessary goog.provide
+ statement, otherwise False.
+ """
+ namespace = tokenutil.GetStringAfterToken(token)
+
+ if self.GetClosurizedNamespace(namespace) is None:
+ return False
+
+ if token in self._duplicate_provide_tokens:
+ return True
+
+ # TODO(user): There's probably a faster way to compute this.
+ for created_namespace, created_identifier, _ in self._created_namespaces:
+ if namespace == created_namespace or namespace == created_identifier:
+ return False
+
+ return True
+
+ def IsExtraRequire(self, token):
+ """Returns whether the given goog.require token is unnecessary.
+
+ Args:
+ token: A goog.require token.
+
+ Returns:
+ True if the given token corresponds to an unnecessary goog.require
+ statement, otherwise False.
+ """
+ namespace = tokenutil.GetStringAfterToken(token)
+
+ if self.GetClosurizedNamespace(namespace) is None:
+ return False
+
+ if namespace in self._ignored_extra_namespaces:
+ return False
+
+ if token in self._duplicate_require_tokens:
+ return True
+
+ if namespace in self._suppressed_requires:
+ return False
+
+ # If the namespace contains a component that is initial caps, then that
+ # must be the last component of the namespace.
+ parts = namespace.split('.')
+ if len(parts) > 1 and parts[-2][0].isupper():
+ return True
+
+ # TODO(user): There's probably a faster way to compute this.
+ for ns in self._used_namespaces:
+ if (not ns.alias_definition and (
+ namespace == ns.namespace or namespace == ns.identifier)):
+ return False
+
+ return True
+
+ def GetMissingProvides(self):
+ """Returns the dict of missing provided namespaces for the current file.
+
+ Returns:
+ Returns a dictionary of key as string and value as integer where each
+ string(key) is a namespace that should be provided by this file, but is
+ not and integer(value) is first line number where it's defined.
+ """
+ missing_provides = dict()
+ for namespace, identifier, line_number in self._created_namespaces:
+ if (not self._IsPrivateIdentifier(identifier) and
+ namespace not in self._provided_namespaces and
+ identifier not in self._provided_namespaces and
+ namespace not in self._required_namespaces and
+ namespace not in missing_provides):
+ missing_provides[namespace] = line_number
+
+ return missing_provides
+
+ def GetMissingRequires(self):
+ """Returns the dict of missing required namespaces for the current file.
+
+ For each non-private identifier used in the file, find either a
+ goog.require, goog.provide or a created identifier that satisfies it.
+ goog.require statements can satisfy the identifier by requiring either the
+ namespace of the identifier or the identifier itself. goog.provide
+ statements can satisfy the identifier by providing the namespace of the
+ identifier. A created identifier can only satisfy the used identifier if
+ it matches it exactly (necessary since things can be defined on a
+ namespace in more than one file). Note that provided namespaces should be
+ a subset of created namespaces, but we check both because in some cases we
+ can't always detect the creation of the namespace.
+
+ Returns:
+ Returns a dictionary of key as string and value integer where each
+ string(key) is a namespace that should be required by this file, but is
+ not and integer(value) is first line number where it's used.
+ """
+ external_dependencies = set(self._required_namespaces)
+
+ # Assume goog namespace is always available.
+ external_dependencies.add('goog')
+ # goog.module is treated as a builtin, too (for goog.module.get).
+ external_dependencies.add('goog.module')
+
+ created_identifiers = set()
+ for unused_namespace, identifier, unused_line_number in (
+ self._created_namespaces):
+ created_identifiers.add(identifier)
+
+ missing_requires = dict()
+ illegal_alias_statements = dict()
+
+ def ShouldRequireNamespace(namespace, identifier):
+ """Checks if a namespace would normally be required."""
+ return (
+ not self._IsPrivateIdentifier(identifier) and
+ namespace not in external_dependencies and
+ namespace not in self._provided_namespaces and
+ identifier not in external_dependencies and
+ identifier not in created_identifiers and
+ namespace not in missing_requires)
+
+ # First check all the used identifiers where we know that their namespace
+ # needs to be provided (unless they are optional).
+ for ns in self._used_namespaces:
+ namespace = ns.namespace
+ identifier = ns.identifier
+ if (not ns.alias_definition and
+ ShouldRequireNamespace(namespace, identifier)):
+ missing_requires[namespace] = ns.GetLine()
+
+ # Now that all required namespaces are known, we can check if the alias
+ # definitions (that are likely being used for typeannotations that don't
+ # need explicit goog.require statements) are already covered. If not
+ # the user shouldn't use the alias.
+ for ns in self._used_namespaces:
+ if (not ns.alias_definition or
+ not ShouldRequireNamespace(ns.namespace, ns.identifier)):
+ continue
+ if self._FindNamespace(ns.identifier, self._provided_namespaces,
+ created_identifiers, external_dependencies,
+ missing_requires):
+ continue
+ namespace = ns.identifier.rsplit('.', 1)[0]
+ illegal_alias_statements[namespace] = ns.token
+
+ return missing_requires, illegal_alias_statements
+
+ def _FindNamespace(self, identifier, *namespaces_list):
+ """Finds the namespace of an identifier given a list of other namespaces.
+
+ Args:
+ identifier: An identifier whose parent needs to be defined.
+ e.g. for goog.bar.foo we search something that provides
+ goog.bar.
+ *namespaces_list: var args of iterables of namespace identifiers
+ Returns:
+ The namespace that the given identifier is part of or None.
+ """
+ identifier = identifier.rsplit('.', 1)[0]
+ identifier_prefix = identifier + '.'
+ for namespaces in namespaces_list:
+ for namespace in namespaces:
+ if namespace == identifier or namespace.startswith(identifier_prefix):
+ return namespace
+ return None
+
+ def _IsPrivateIdentifier(self, identifier):
+ """Returns whether the given identifier is private."""
+ pieces = identifier.split('.')
+ for piece in pieces:
+ if piece.endswith('_'):
+ return True
+ return False
+
+ def IsFirstProvide(self, token):
+ """Returns whether token is the first provide token."""
+ return self._provide_tokens and token == self._provide_tokens[0]
+
+ def IsFirstRequire(self, token):
+ """Returns whether token is the first require token."""
+ return self._require_tokens and token == self._require_tokens[0]
+
+ def IsLastProvide(self, token):
+ """Returns whether token is the last provide token."""
+ return self._provide_tokens and token == self._provide_tokens[-1]
+
+ def IsLastRequire(self, token):
+ """Returns whether token is the last require token."""
+ return self._require_tokens and token == self._require_tokens[-1]
+
+ def ProcessToken(self, token, state_tracker):
+ """Processes the given token for dependency information.
+
+ Args:
+ token: The token to process.
+ state_tracker: The JavaScript state tracker.
+ """
+
+ # Note that this method is in the critical path for the linter and has been
+ # optimized for performance in the following ways:
+ # - Tokens are checked by type first to minimize the number of function
+ # calls necessary to determine if action needs to be taken for the token.
+ # - The most common tokens types are checked for first.
+ # - The number of function calls has been minimized (thus the length of this
+ # function.
+
+ if token.type == TokenType.IDENTIFIER:
+ # TODO(user): Consider saving the whole identifier in metadata.
+ whole_identifier_string = tokenutil.GetIdentifierForToken(token)
+ if whole_identifier_string is None:
+ # We only want to process the identifier one time. If the whole string
+ # identifier is None, that means this token was part of a multi-token
+ # identifier, but it was not the first token of the identifier.
+ return
+
+ # In the odd case that a goog.require is encountered inside a function,
+ # just ignore it (e.g. dynamic loading in test runners).
+ if token.string == 'goog.require' and not state_tracker.InFunction():
+ self._require_tokens.append(token)
+ namespace = tokenutil.GetStringAfterToken(token)
+ if namespace in self._required_namespaces:
+ self._duplicate_require_tokens.append(token)
+ else:
+ self._required_namespaces.append(namespace)
+
+ # If there is a suppression for the require, add a usage for it so it
+ # gets treated as a regular goog.require (i.e. still gets sorted).
+ if self._HasSuppression(state_tracker, 'extraRequire'):
+ self._suppressed_requires.append(namespace)
+ self._AddUsedNamespace(state_tracker, namespace, token)
+
+ elif token.string == 'goog.provide':
+ self._provide_tokens.append(token)
+ namespace = tokenutil.GetStringAfterToken(token)
+ if namespace in self._provided_namespaces:
+ self._duplicate_provide_tokens.append(token)
+ else:
+ self._provided_namespaces.append(namespace)
+
+ # If there is a suppression for the provide, add a creation for it so it
+ # gets treated as a regular goog.provide (i.e. still gets sorted).
+ if self._HasSuppression(state_tracker, 'extraProvide'):
+ self._AddCreatedNamespace(state_tracker, namespace, token.line_number)
+
+ elif token.string == 'goog.scope':
+ self._scopified_file = True
+
+ elif token.string == 'goog.setTestOnly':
+
+ # Since the message is optional, we don't want to scan to later lines.
+ for t in tokenutil.GetAllTokensInSameLine(token):
+ if t.type == TokenType.STRING_TEXT:
+ message = t.string
+
+ if re.match(r'^\w+(\.\w+)+$', message):
+ # This looks like a namespace. If it's a Closurized namespace,
+ # consider it created.
+ base_namespace = message.split('.', 1)[0]
+ if base_namespace in self._closurized_namespaces:
+ self._AddCreatedNamespace(state_tracker, message,
+ token.line_number)
+
+ break
+ else:
+ jsdoc = state_tracker.GetDocComment()
+ if token.metadata and token.metadata.aliased_symbol:
+ whole_identifier_string = token.metadata.aliased_symbol
+ elif (token.string == 'goog.module.get' and
+ not self._HasSuppression(state_tracker, 'extraRequire')):
+ # Cannot use _AddUsedNamespace as this is not an identifier, but
+ # already the entire namespace that's required.
+ namespace = tokenutil.GetStringAfterToken(token)
+ namespace = UsedNamespace(namespace, namespace, token,
+ alias_definition=False)
+ self._used_namespaces.append(namespace)
+ if jsdoc and jsdoc.HasFlag('typedef'):
+ self._AddCreatedNamespace(state_tracker, whole_identifier_string,
+ token.line_number,
+ namespace=self.GetClosurizedNamespace(
+ whole_identifier_string))
+ else:
+ is_alias_definition = (token.metadata and
+ token.metadata.is_alias_definition)
+ self._AddUsedNamespace(state_tracker, whole_identifier_string,
+ token, is_alias_definition)
+
+ elif token.type == TokenType.SIMPLE_LVALUE:
+ identifier = token.values['identifier']
+ start_token = tokenutil.GetIdentifierStart(token)
+ if start_token and start_token != token:
+ # Multi-line identifier being assigned. Get the whole identifier.
+ identifier = tokenutil.GetIdentifierForToken(start_token)
+ else:
+ start_token = token
+ # If an alias is defined on the start_token, use it instead.
+ if (start_token and
+ start_token.metadata and
+ start_token.metadata.aliased_symbol and
+ not start_token.metadata.is_alias_definition):
+ identifier = start_token.metadata.aliased_symbol
+
+ if identifier:
+ namespace = self.GetClosurizedNamespace(identifier)
+ if state_tracker.InFunction():
+ self._AddUsedNamespace(state_tracker, identifier, token)
+ elif namespace and namespace != 'goog':
+ self._AddCreatedNamespace(state_tracker, identifier,
+ token.line_number, namespace=namespace)
+
+ elif token.type == TokenType.DOC_FLAG:
+ flag = token.attached_object
+ flag_type = flag.flag_type
+ if flag and flag.HasType() and flag.jstype:
+ is_interface = state_tracker.GetDocComment().HasFlag('interface')
+ if flag_type == 'implements' or (flag_type == 'extends'
+ and is_interface):
+ identifier = flag.jstype.alias or flag.jstype.identifier
+ self._AddUsedNamespace(state_tracker, identifier, token)
+ # Since we process doctypes only for implements and extends, the
+ # type is a simple one and we don't need any iteration for subtypes.
+
+ def _AddCreatedNamespace(self, state_tracker, identifier, line_number,
+ namespace=None):
+ """Adds the namespace of an identifier to the list of created namespaces.
+
+ If the identifier is annotated with a 'missingProvide' suppression, it is
+ not added.
+
+ Args:
+ state_tracker: The JavaScriptStateTracker instance.
+ identifier: The identifier to add.
+ line_number: Line number where namespace is created.
+ namespace: The namespace of the identifier or None if the identifier is
+ also the namespace.
+ """
+ if not namespace:
+ namespace = identifier
+
+ if self._HasSuppression(state_tracker, 'missingProvide'):
+ return
+
+ self._created_namespaces.append([namespace, identifier, line_number])
+
+ def _AddUsedNamespace(self, state_tracker, identifier, token,
+ is_alias_definition=False):
+ """Adds the namespace of an identifier to the list of used namespaces.
+
+ If the identifier is annotated with a 'missingRequire' suppression, it is
+ not added.
+
+ Args:
+ state_tracker: The JavaScriptStateTracker instance.
+ identifier: An identifier which has been used.
+ token: The token in which the namespace is used.
+ is_alias_definition: If the used namespace is part of an alias_definition.
+ Aliased symbols need their parent namespace to be available, if it is
+ not yet required through another symbol, an error will be thrown.
+ """
+ if self._HasSuppression(state_tracker, 'missingRequire'):
+ return
+
+ namespace = self.GetClosurizedNamespace(identifier)
+ # b/5362203 If its a variable in scope then its not a required namespace.
+ if namespace and not state_tracker.IsVariableInScope(namespace):
+ namespace = UsedNamespace(namespace, identifier, token,
+ is_alias_definition)
+ self._used_namespaces.append(namespace)
+
+ def _HasSuppression(self, state_tracker, suppression):
+ jsdoc = state_tracker.GetDocComment()
+ return jsdoc and suppression in jsdoc.suppressions
+
+ def GetClosurizedNamespace(self, identifier):
+ """Given an identifier, returns the namespace that identifier is from.
+
+ Args:
+ identifier: The identifier to extract a namespace from.
+
+ Returns:
+ The namespace the given identifier resides in, or None if one could not
+ be found.
+ """
+ if identifier.startswith('goog.global'):
+ # Ignore goog.global, since it is, by definition, global.
+ return None
+
+ parts = identifier.split('.')
+ for namespace in self._closurized_namespaces:
+ if not identifier.startswith(namespace + '.'):
+ continue
+
+ # The namespace for a class is the shortest prefix ending in a class
+ # name, which starts with a capital letter but is not a capitalized word.
+ #
+ # We ultimately do not want to allow requiring or providing of inner
+ # classes/enums. Instead, a file should provide only the top-level class
+ # and users should require only that.
+ namespace = []
+ for part in parts:
+ if part == 'prototype' or part.isupper():
+ return '.'.join(namespace)
+ namespace.append(part)
+ if part[0].isupper():
+ return '.'.join(namespace)
+
+ # At this point, we know there's no class or enum, so the namespace is
+ # just the identifier with the last part removed. With the exception of
+ # apply, inherits, and call, which should also be stripped.
+ if parts[-1] in ('apply', 'inherits', 'call'):
+ parts.pop()
+ parts.pop()
+
+ # If the last part ends with an underscore, it is a private variable,
+ # method, or enum. The namespace is whatever is before it.
+ if parts and parts[-1].endswith('_'):
+ parts.pop()
+
+ return '.'.join(parts)
+
+ return None
diff --git a/tools/closure_linter/closure_linter/closurizednamespacesinfo_test.py b/tools/closure_linter/closure_linter/closurizednamespacesinfo_test.py
new file mode 100755
index 0000000000..7aeae21956
--- /dev/null
+++ b/tools/closure_linter/closure_linter/closurizednamespacesinfo_test.py
@@ -0,0 +1,873 @@
+#!/usr/bin/env python
+#
+# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for ClosurizedNamespacesInfo."""
+
+
+
+import unittest as googletest
+from closure_linter import aliaspass
+from closure_linter import closurizednamespacesinfo
+from closure_linter import ecmametadatapass
+from closure_linter import javascriptstatetracker
+from closure_linter import javascripttokens
+from closure_linter import testutil
+from closure_linter import tokenutil
+
+# pylint: disable=g-bad-name
+TokenType = javascripttokens.JavaScriptTokenType
+
+
+def _ToLineDict(illegal_alias_stmts):
+ """Replaces tokens with the respective line number."""
+ return {k: v.line_number for k, v in illegal_alias_stmts.iteritems()}
+
+
+class ClosurizedNamespacesInfoTest(googletest.TestCase):
+ """Tests for ClosurizedNamespacesInfo."""
+
+ _test_cases = {
+ 'goog.global.anything': None,
+ 'package.CONSTANT': 'package',
+ 'package.methodName': 'package',
+ 'package.subpackage.methodName': 'package.subpackage',
+ 'package.subpackage.methodName.apply': 'package.subpackage',
+ 'package.ClassName.something': 'package.ClassName',
+ 'package.ClassName.Enum.VALUE.methodName': 'package.ClassName',
+ 'package.ClassName.CONSTANT': 'package.ClassName',
+ 'package.namespace.CONSTANT.methodName': 'package.namespace',
+ 'package.ClassName.inherits': 'package.ClassName',
+ 'package.ClassName.apply': 'package.ClassName',
+ 'package.ClassName.methodName.apply': 'package.ClassName',
+ 'package.ClassName.methodName.call': 'package.ClassName',
+ 'package.ClassName.prototype.methodName': 'package.ClassName',
+ 'package.ClassName.privateMethod_': 'package.ClassName',
+ 'package.className.privateProperty_': 'package.className',
+ 'package.className.privateProperty_.methodName': 'package.className',
+ 'package.ClassName.PrivateEnum_': 'package.ClassName',
+ 'package.ClassName.prototype.methodName.apply': 'package.ClassName',
+ 'package.ClassName.property.subProperty': 'package.ClassName',
+ 'package.className.prototype.something.somethingElse': 'package.className'
+ }
+
+ def testGetClosurizedNamespace(self):
+ """Tests that the correct namespace is returned for various identifiers."""
+ namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
+ closurized_namespaces=['package'], ignored_extra_namespaces=[])
+ for identifier, expected_namespace in self._test_cases.items():
+ actual_namespace = namespaces_info.GetClosurizedNamespace(identifier)
+ self.assertEqual(
+ expected_namespace,
+ actual_namespace,
+ 'expected namespace "' + str(expected_namespace) +
+ '" for identifier "' + str(identifier) + '" but was "' +
+ str(actual_namespace) + '"')
+
+ def testIgnoredExtraNamespaces(self):
+ """Tests that ignored_extra_namespaces are ignored."""
+ token = self._GetRequireTokens('package.Something')
+ namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
+ closurized_namespaces=['package'],
+ ignored_extra_namespaces=['package.Something'])
+
+ self.assertFalse(namespaces_info.IsExtraRequire(token),
+ 'Should be valid since it is in ignored namespaces.')
+
+ namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
+ ['package'], [])
+
+ self.assertTrue(namespaces_info.IsExtraRequire(token),
+ 'Should be invalid since it is not in ignored namespaces.')
+
+ def testIsExtraProvide_created(self):
+ """Tests that provides for created namespaces are not extra."""
+ input_lines = [
+ 'goog.provide(\'package.Foo\');',
+ 'package.Foo = function() {};'
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertFalse(namespaces_info.IsExtraProvide(token),
+ 'Should not be extra since it is created.')
+
+ def testIsExtraProvide_createdIdentifier(self):
+ """Tests that provides for created identifiers are not extra."""
+ input_lines = [
+ 'goog.provide(\'package.Foo.methodName\');',
+ 'package.Foo.methodName = function() {};'
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertFalse(namespaces_info.IsExtraProvide(token),
+ 'Should not be extra since it is created.')
+
+ def testIsExtraProvide_notCreated(self):
+ """Tests that provides for non-created namespaces are extra."""
+ input_lines = ['goog.provide(\'package.Foo\');']
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertTrue(namespaces_info.IsExtraProvide(token),
+ 'Should be extra since it is not created.')
+
+ def testIsExtraProvide_notCreatedMultipartClosurizedNamespace(self):
+ """Tests that provides for non-created namespaces are extra."""
+ input_lines = ['goog.provide(\'multi.part.namespace.Foo\');']
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['multi.part'])
+
+ self.assertTrue(namespaces_info.IsExtraProvide(token),
+ 'Should be extra since it is not created.')
+
+ def testIsExtraProvide_duplicate(self):
+ """Tests that providing a namespace twice makes the second one extra."""
+ input_lines = [
+ 'goog.provide(\'package.Foo\');',
+ 'goog.provide(\'package.Foo\');',
+ 'package.Foo = function() {};'
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ # Advance to the second goog.provide token.
+ token = tokenutil.Search(token.next, TokenType.IDENTIFIER)
+
+ self.assertTrue(namespaces_info.IsExtraProvide(token),
+ 'Should be extra since it is already provided.')
+
+ def testIsExtraProvide_notClosurized(self):
+ """Tests that provides of non-closurized namespaces are not extra."""
+ input_lines = ['goog.provide(\'notclosurized.Foo\');']
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertFalse(namespaces_info.IsExtraProvide(token),
+ 'Should not be extra since it is not closurized.')
+
+ def testIsExtraRequire_used(self):
+ """Tests that requires for used namespaces are not extra."""
+ input_lines = [
+ 'goog.require(\'package.Foo\');',
+ 'var x = package.Foo.methodName();'
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertFalse(namespaces_info.IsExtraRequire(token),
+ 'Should not be extra since it is used.')
+
+ def testIsExtraRequire_usedIdentifier(self):
+ """Tests that requires for used methods on classes are extra."""
+ input_lines = [
+ 'goog.require(\'package.Foo.methodName\');',
+ 'var x = package.Foo.methodName();'
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertTrue(namespaces_info.IsExtraRequire(token),
+ 'Should require the package, not the method specifically.')
+
+ def testIsExtraRequire_notUsed(self):
+ """Tests that requires for unused namespaces are extra."""
+ input_lines = ['goog.require(\'package.Foo\');']
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertTrue(namespaces_info.IsExtraRequire(token),
+ 'Should be extra since it is not used.')
+
+ def testIsExtraRequire_notUsedMultiPartClosurizedNamespace(self):
+ """Tests unused require with multi-part closurized namespaces."""
+
+ input_lines = ['goog.require(\'multi.part.namespace.Foo\');']
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['multi.part'])
+
+ self.assertTrue(namespaces_info.IsExtraRequire(token),
+ 'Should be extra since it is not used.')
+
+ def testIsExtraRequire_notClosurized(self):
+ """Tests that requires of non-closurized namespaces are not extra."""
+ input_lines = ['goog.require(\'notclosurized.Foo\');']
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertFalse(namespaces_info.IsExtraRequire(token),
+ 'Should not be extra since it is not closurized.')
+
+ def testIsExtraRequire_objectOnClass(self):
+ """Tests that requiring an object on a class is extra."""
+ input_lines = [
+ 'goog.require(\'package.Foo.Enum\');',
+ 'var x = package.Foo.Enum.VALUE1;',
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertTrue(namespaces_info.IsExtraRequire(token),
+ 'The whole class, not the object, should be required.');
+
+ def testIsExtraRequire_constantOnClass(self):
+ """Tests that requiring a constant on a class is extra."""
+ input_lines = [
+ 'goog.require(\'package.Foo.CONSTANT\');',
+ 'var x = package.Foo.CONSTANT',
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertTrue(namespaces_info.IsExtraRequire(token),
+ 'The class, not the constant, should be required.');
+
+ def testIsExtraRequire_constantNotOnClass(self):
+ """Tests that requiring a constant not on a class is OK."""
+ input_lines = [
+ 'goog.require(\'package.subpackage.CONSTANT\');',
+ 'var x = package.subpackage.CONSTANT',
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertFalse(namespaces_info.IsExtraRequire(token),
+ 'Constants can be required except on classes.');
+
+ def testIsExtraRequire_methodNotOnClass(self):
+ """Tests that requiring a method not on a class is OK."""
+ input_lines = [
+ 'goog.require(\'package.subpackage.method\');',
+ 'var x = package.subpackage.method()',
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertFalse(namespaces_info.IsExtraRequire(token),
+ 'Methods can be required except on classes.');
+
+ def testIsExtraRequire_defaults(self):
+ """Tests that there are no warnings about extra requires for test utils"""
+ input_lines = ['goog.require(\'goog.testing.jsunit\');']
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['goog'])
+
+ self.assertFalse(namespaces_info.IsExtraRequire(token),
+ 'Should not be extra since it is for testing.')
+
+ def testGetMissingProvides_provided(self):
+ """Tests that provided functions don't cause a missing provide."""
+ input_lines = [
+ 'goog.provide(\'package.Foo\');',
+ 'package.Foo = function() {};'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(
+ input_lines, ['package'])
+
+ self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+
+ def testGetMissingProvides_providedIdentifier(self):
+ """Tests that provided identifiers don't cause a missing provide."""
+ input_lines = [
+ 'goog.provide(\'package.Foo.methodName\');',
+ 'package.Foo.methodName = function() {};'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+
+ def testGetMissingProvides_providedParentIdentifier(self):
+ """Tests that provided identifiers on a class don't cause a missing provide
+ on objects attached to that class."""
+ input_lines = [
+ 'goog.provide(\'package.foo.ClassName\');',
+ 'package.foo.ClassName.methodName = function() {};',
+ 'package.foo.ClassName.ObjectName = 1;',
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+
+ def testGetMissingProvides_unprovided(self):
+ """Tests that unprovided functions cause a missing provide."""
+ input_lines = ['package.Foo = function() {};']
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+
+ missing_provides = namespaces_info.GetMissingProvides()
+ self.assertEquals(1, len(missing_provides))
+ missing_provide = missing_provides.popitem()
+ self.assertEquals('package.Foo', missing_provide[0])
+ self.assertEquals(1, missing_provide[1])
+
+ def testGetMissingProvides_privatefunction(self):
+ """Tests that unprovided private functions don't cause a missing provide."""
+ input_lines = ['package.Foo_ = function() {};']
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+
+ def testGetMissingProvides_required(self):
+ """Tests that required namespaces don't cause a missing provide."""
+ input_lines = [
+ 'goog.require(\'package.Foo\');',
+ 'package.Foo.methodName = function() {};'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
+
+ def testGetMissingRequires_required(self):
+ """Tests that required namespaces don't cause a missing require."""
+ input_lines = [
+ 'goog.require(\'package.Foo\');',
+ 'package.Foo();'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(0, len(missing_requires))
+
+ def testGetMissingRequires_requiredIdentifier(self):
+ """Tests that required namespaces satisfy identifiers on that namespace."""
+ input_lines = [
+ 'goog.require(\'package.Foo\');',
+ 'package.Foo.methodName();'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(0, len(missing_requires))
+
+ def testGetMissingRequires_requiredNamespace(self):
+ """Tests that required namespaces satisfy the namespace."""
+ input_lines = [
+ 'goog.require(\'package.soy.fooTemplate\');',
+ 'render(package.soy.fooTemplate);'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(0, len(missing_requires))
+
+ def testGetMissingRequires_requiredParentClass(self):
+ """Tests that requiring a parent class of an object is sufficient to prevent
+ a missing require on that object."""
+ input_lines = [
+ 'goog.require(\'package.Foo\');',
+ 'package.Foo.methodName();',
+ 'package.Foo.methodName(package.Foo.ObjectName);'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(0, len(missing_requires))
+
+ def testGetMissingRequires_unrequired(self):
+ """Tests that unrequired namespaces cause a missing require."""
+ input_lines = ['package.Foo();']
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(1, len(missing_requires))
+ missing_req = missing_requires.popitem()
+ self.assertEquals('package.Foo', missing_req[0])
+ self.assertEquals(1, missing_req[1])
+
+ def testGetMissingRequires_provided(self):
+ """Tests that provided namespaces satisfy identifiers on that namespace."""
+ input_lines = [
+ 'goog.provide(\'package.Foo\');',
+ 'package.Foo.methodName();'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(0, len(missing_requires))
+
+ def testGetMissingRequires_created(self):
+ """Tests that created namespaces do not satisfy usage of an identifier."""
+ input_lines = [
+ 'package.Foo = function();',
+ 'package.Foo.methodName();',
+ 'package.Foo.anotherMethodName1();',
+ 'package.Foo.anotherMethodName2();'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(1, len(missing_requires))
+ missing_require = missing_requires.popitem()
+ self.assertEquals('package.Foo', missing_require[0])
+ # Make sure line number of first occurrence is reported
+ self.assertEquals(2, missing_require[1])
+
+ def testGetMissingRequires_createdIdentifier(self):
+ """Tests that created identifiers satisfy usage of the identifier."""
+ input_lines = [
+ 'package.Foo.methodName = function();',
+ 'package.Foo.methodName();'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(0, len(missing_requires))
+
+ def testGetMissingRequires_implements(self):
+ """Tests that a parametrized type requires the correct identifier."""
+ input_lines = [
+ '/** @constructor @implements {package.Bar<T>} */',
+ 'package.Foo = function();',
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertItemsEqual({'package.Bar': 1}, missing_requires)
+
+ def testGetMissingRequires_objectOnClass(self):
+ """Tests that we should require a class, not the object on the class."""
+ input_lines = [
+ 'goog.require(\'package.Foo.Enum\');',
+ 'var x = package.Foo.Enum.VALUE1;',
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(1, len(missing_requires),
+ 'The whole class, not the object, should be required.')
+
+ def testGetMissingRequires_variableWithSameName(self):
+ """Tests that we should not goog.require variables and parameters.
+
+ b/5362203 Variables in scope are not missing namespaces.
+ """
+ input_lines = [
+ 'goog.provide(\'Foo\');',
+ 'Foo.A = function();',
+ 'Foo.A.prototype.method = function(ab) {',
+ ' if (ab) {',
+ ' var docs;',
+ ' var lvalue = new Obj();',
+ ' // Variable in scope hence not goog.require here.',
+ ' docs.foo.abc = 1;',
+ ' lvalue.next();',
+ ' }',
+ ' // Since js is function scope this should also not goog.require.',
+ ' docs.foo.func();',
+ ' // Its not a variable in scope hence goog.require.',
+ ' dummy.xyz.reset();',
+ ' return this.method2();',
+ '};',
+ 'Foo.A.prototype.method1 = function(docs, abcd, xyz) {',
+ ' // Parameter hence not goog.require.',
+ ' docs.nodes.length = 2;',
+ ' lvalue.abc.reset();',
+ '};'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['Foo',
+ 'docs',
+ 'lvalue',
+ 'dummy'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals(2, len(missing_requires))
+ self.assertItemsEqual(
+ {'dummy.xyz': 14,
+ 'lvalue.abc': 20}, missing_requires)
+
+ def testIsFirstProvide(self):
+ """Tests operation of the isFirstProvide method."""
+ input_lines = [
+ 'goog.provide(\'package.Foo\');',
+ 'package.Foo.methodName();'
+ ]
+
+ token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ input_lines, ['package'])
+ self.assertTrue(namespaces_info.IsFirstProvide(token))
+
+ def testGetWholeIdentifierString(self):
+ """Tests that created identifiers satisfy usage of the identifier."""
+ input_lines = [
+ 'package.Foo.',
+ ' veryLong.',
+ ' identifier;'
+ ]
+
+ token = testutil.TokenizeSource(input_lines)
+
+ self.assertEquals('package.Foo.veryLong.identifier',
+ tokenutil.GetIdentifierForToken(token))
+
+ self.assertEquals(None,
+ tokenutil.GetIdentifierForToken(token.next))
+
+ def testScopified(self):
+ """Tests that a goog.scope call is noticed."""
+ input_lines = [
+ 'goog.scope(function() {',
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ self.assertTrue(namespaces_info._scopified_file)
+
+ def testScope_unusedAlias(self):
+ """Tests that an unused alias symbol is illegal."""
+ input_lines = [
+ 'goog.scope(function() {',
+ 'var Event = goog.events.Event;',
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, missing_requires)
+ self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
+
+ def testScope_usedMultilevelAlias(self):
+ """Tests that an used alias symbol in a deep namespace is ok."""
+ input_lines = [
+ 'goog.require(\'goog.Events\');',
+ 'goog.scope(function() {',
+ 'var Event = goog.Events.DeepNamespace.Event;',
+ 'Event();',
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, missing_requires)
+ self.assertEquals({}, illegal_alias_stmts)
+
+ def testScope_usedAlias(self):
+ """Tests that aliased symbols result in correct requires."""
+ input_lines = [
+ 'goog.scope(function() {',
+ 'var Event = goog.events.Event;',
+ 'var dom = goog.dom;',
+ 'Event(dom.classes.get);',
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, illegal_alias_stmts)
+ self.assertEquals({'goog.dom.classes': 4, 'goog.events.Event': 4},
+ missing_requires)
+
+ def testModule_alias(self):
+ """Tests that goog.module style aliases are supported."""
+ input_lines = [
+ 'goog.module(\'test.module\');',
+ 'var Unused = goog.require(\'goog.Unused\');',
+ 'var AliasedClass = goog.require(\'goog.AliasedClass\');',
+ 'var x = new AliasedClass();',
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ namespaceToken = self._GetRequireTokens('goog.AliasedClass')
+ self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken),
+ 'AliasedClass should be marked as used')
+ unusedToken = self._GetRequireTokens('goog.Unused')
+ self.assertTrue(namespaces_info.IsExtraRequire(unusedToken),
+ 'Unused should be marked as not used')
+
+ def testModule_aliasInScope(self):
+ """Tests that goog.module style aliases are supported."""
+ input_lines = [
+ 'goog.module(\'test.module\');',
+ 'var AliasedClass = goog.require(\'goog.AliasedClass\');',
+ 'goog.scope(function() {',
+ 'var x = new AliasedClass();',
+ '});',
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ namespaceToken = self._GetRequireTokens('goog.AliasedClass')
+ self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken),
+ 'AliasedClass should be marked as used')
+
+ def testModule_getAlwaysProvided(self):
+ """Tests that goog.module.get is recognized as a built-in."""
+ input_lines = [
+ 'goog.provide(\'test.MyClass\');',
+ 'goog.require(\'goog.someModule\');',
+ 'goog.scope(function() {',
+ 'var someModule = goog.module.get(\'goog.someModule\');',
+ 'test.MyClass = function() {};',
+ '});',
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ self.assertEquals({}, namespaces_info.GetMissingRequires()[0])
+
+ def testModule_requireForGet(self):
+ """Tests that goog.module.get needs a goog.require call."""
+ input_lines = [
+ 'goog.provide(\'test.MyClass\');',
+ 'function foo() {',
+ ' var someModule = goog.module.get(\'goog.someModule\');',
+ ' someModule.doSth();',
+ '}',
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ self.assertEquals({'goog.someModule': 3},
+ namespaces_info.GetMissingRequires()[0])
+
+ def testScope_usedTypeAlias(self):
+ """Tests aliased symbols in type annotations."""
+ input_lines = [
+ 'goog.scope(function() {',
+ 'var Event = goog.events.Event;',
+ '/** @type {Event} */;',
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, missing_requires)
+ self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
+
+ def testScope_partialAlias_typeOnly(self):
+ """Tests a partial alias only used in type annotations.
+
+ In this example, some goog.events namespace would need to be required
+ so that evaluating goog.events.bar doesn't throw an error.
+ """
+ input_lines = [
+ 'goog.scope(function() {',
+ 'var bar = goog.events.bar;',
+ '/** @type {bar.Foo} */;',
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, missing_requires)
+ self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
+
+ def testScope_partialAlias(self):
+ """Tests a partial alias in conjunction with a type annotation.
+
+ In this example, the partial alias is already defined by another type,
+ therefore the doc-only type doesn't need to be required.
+ """
+ input_lines = [
+ 'goog.scope(function() {',
+ 'var bar = goog.events.bar;',
+ '/** @type {bar.Event} */;',
+ 'bar.EventType();'
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({'goog.events.bar.EventType': 4}, missing_requires)
+ self.assertEquals({}, illegal_alias_stmts)
+
+ def testScope_partialAliasRequires(self):
+ """Tests partial aliases with correct requires."""
+ input_lines = [
+ 'goog.require(\'goog.events.bar.EventType\');',
+ 'goog.scope(function() {',
+ 'var bar = goog.events.bar;',
+ '/** @type {bar.Event} */;',
+ 'bar.EventType();'
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, missing_requires)
+ self.assertEquals({}, illegal_alias_stmts)
+
+ def testScope_partialAliasRequiresBoth(self):
+ """Tests partial aliases with correct requires."""
+ input_lines = [
+ 'goog.require(\'goog.events.bar.Event\');',
+ 'goog.require(\'goog.events.bar.EventType\');',
+ 'goog.scope(function() {',
+ 'var bar = goog.events.bar;',
+ '/** @type {bar.Event} */;',
+ 'bar.EventType();'
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, missing_requires)
+ self.assertEquals({}, illegal_alias_stmts)
+ event_token = self._GetRequireTokens('goog.events.bar.Event')
+ self.assertTrue(namespaces_info.IsExtraRequire(event_token))
+
+ def testScope_partialAliasNoSubtypeRequires(self):
+ """Tests that partial aliases don't yield subtype requires (regression)."""
+ input_lines = [
+ 'goog.provide(\'goog.events.Foo\');',
+ 'goog.scope(function() {',
+ 'goog.events.Foo = {};',
+ 'var Foo = goog.events.Foo;'
+ 'Foo.CssName_ = {};'
+ 'var CssName_ = Foo.CssName_;'
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, _ = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, missing_requires)
+
+ def testScope_aliasNamespace(self):
+ """Tests that an unused alias namespace is not required when available.
+
+ In the example goog.events.Bar is not required, because the namespace
+ goog.events is already defined because goog.events.Foo is required.
+ """
+ input_lines = [
+ 'goog.require(\'goog.events.Foo\');',
+ 'goog.scope(function() {',
+ 'var Bar = goog.events.Bar;',
+ '/** @type {Bar} */;',
+ 'goog.events.Foo;',
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, missing_requires)
+ self.assertEquals({}, illegal_alias_stmts)
+
+ def testScope_aliasNamespaceIllegal(self):
+ """Tests that an unused alias namespace is not required when available."""
+ input_lines = [
+ 'goog.scope(function() {',
+ 'var Bar = goog.events.Bar;',
+ '/** @type {Bar} */;',
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, missing_requires)
+ self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
+
+ def testScope_provides(self):
+ """Tests that aliased symbols result in correct provides."""
+ input_lines = [
+ 'goog.scope(function() {',
+ 'goog.bar = {};',
+ 'var bar = goog.bar;',
+ 'bar.Foo = {};',
+ '});'
+ ]
+
+ namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
+ missing_provides = namespaces_info.GetMissingProvides()
+ self.assertEquals({'goog.bar.Foo': 4}, missing_provides)
+ _, illegal_alias_stmts = namespaces_info.GetMissingRequires()
+ self.assertEquals({}, illegal_alias_stmts)
+
+ def testSetTestOnlyNamespaces(self):
+ """Tests that a namespace in setTestOnly makes it a valid provide."""
+ namespaces_info = self._GetNamespacesInfoForScript([
+ 'goog.setTestOnly(\'goog.foo.barTest\');'
+ ], ['goog'])
+
+ token = self._GetProvideTokens('goog.foo.barTest')
+ self.assertFalse(namespaces_info.IsExtraProvide(token))
+
+ token = self._GetProvideTokens('goog.foo.bazTest')
+ self.assertTrue(namespaces_info.IsExtraProvide(token))
+
+ def testSetTestOnlyComment(self):
+ """Ensure a comment in setTestOnly does not cause a created namespace."""
+ namespaces_info = self._GetNamespacesInfoForScript([
+ 'goog.setTestOnly(\'this is a comment\');'
+ ], ['goog'])
+
+ self.assertEquals(
+ [], namespaces_info._created_namespaces,
+ 'A comment in setTestOnly should not modify created namespaces.')
+
+ def _GetNamespacesInfoForScript(self, script, closurized_namespaces=None):
+ _, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
+ script, closurized_namespaces)
+
+ return namespaces_info
+
+ def _GetStartTokenAndNamespacesInfoForScript(
+ self, script, closurized_namespaces):
+
+ token = testutil.TokenizeSource(script)
+ return token, self._GetInitializedNamespacesInfo(
+ token, closurized_namespaces, [])
+
+ def _GetInitializedNamespacesInfo(self, token, closurized_namespaces,
+ ignored_extra_namespaces):
+ """Returns a namespaces info initialized with the given token stream."""
+ namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
+ closurized_namespaces=closurized_namespaces,
+ ignored_extra_namespaces=ignored_extra_namespaces)
+ state_tracker = javascriptstatetracker.JavaScriptStateTracker()
+
+ ecma_pass = ecmametadatapass.EcmaMetaDataPass()
+ ecma_pass.Process(token)
+
+ state_tracker.DocFlagPass(token, error_handler=None)
+
+ alias_pass = aliaspass.AliasPass(closurized_namespaces)
+ alias_pass.Process(token)
+
+ while token:
+ state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
+ namespaces_info.ProcessToken(token, state_tracker)
+ state_tracker.HandleAfterToken(token)
+ token = token.next
+
+ return namespaces_info
+
+ def _GetProvideTokens(self, namespace):
+ """Returns a list of tokens for a goog.require of the given namespace."""
+ line_text = 'goog.require(\'' + namespace + '\');\n'
+ return testutil.TokenizeSource([line_text])
+
+ def _GetRequireTokens(self, namespace):
+ """Returns a list of tokens for a goog.require of the given namespace."""
+ line_text = 'goog.require(\'' + namespace + '\');\n'
+ return testutil.TokenizeSource([line_text])
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/closure_linter/common/__init__.py b/tools/closure_linter/closure_linter/common/__init__.py
index 4265cc3e6c..57930436ce 100755
--- a/tools/closure_linter/closure_linter/common/__init__.py
+++ b/tools/closure_linter/closure_linter/common/__init__.py
@@ -1 +1,16 @@
#!/usr/bin/env python
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Package indicator for gjslint.common."""
diff --git a/tools/closure_linter/closure_linter/common/error.py b/tools/closure_linter/closure_linter/common/error.py
index 0e3b476010..4209c235b8 100755
--- a/tools/closure_linter/closure_linter/common/error.py
+++ b/tools/closure_linter/closure_linter/common/error.py
@@ -23,7 +23,7 @@ __author__ = ('robbyw@google.com (Robert Walker)',
class Error(object):
"""Object representing a style error."""
- def __init__(self, code, message, token, position, fix_data):
+ def __init__(self, code, message, token=None, position=None, fix_data=None):
"""Initialize the error object.
Args:
diff --git a/tools/closure_linter/closure_linter/common/erroraccumulator.py b/tools/closure_linter/closure_linter/common/erroraccumulator.py
index 7bb0c97959..55844ba603 100755
--- a/tools/closure_linter/closure_linter/common/erroraccumulator.py
+++ b/tools/closure_linter/closure_linter/common/erroraccumulator.py
@@ -35,7 +35,7 @@ class ErrorAccumulator(errorhandler.ErrorHandler):
Args:
error: The error object
"""
- self._errors.append((error.token.line_number, error.code))
+ self._errors.append(error)
def GetErrors(self):
"""Returns the accumulated errors.
diff --git a/tools/closure_linter/closure_linter/common/erroroutput.py b/tools/closure_linter/closure_linter/common/erroroutput.py
new file mode 100644
index 0000000000..149738b5d4
--- /dev/null
+++ b/tools/closure_linter/closure_linter/common/erroroutput.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utility functions to format errors."""
+
+
+__author__ = ('robbyw@google.com (Robert Walker)',
+ 'ajp@google.com (Andy Perelson)',
+ 'nnaze@google.com (Nathan Naze)')
+
+
+def GetUnixErrorOutput(filename, error, new_error=False):
+ """Get a output line for an error in UNIX format."""
+
+ line = ''
+
+ if error.token:
+ line = '%d' % error.token.line_number
+
+ error_code = '%04d' % error.code
+ if new_error:
+ error_code = 'New Error ' + error_code
+ return '%s:%s:(%s) %s' % (filename, line, error_code, error.message)
+
+
+def GetErrorOutput(error, new_error=False):
+ """Get a output line for an error in regular format."""
+
+ line = ''
+ if error.token:
+ line = 'Line %d, ' % error.token.line_number
+
+ code = 'E:%04d' % error.code
+
+ error_message = error.message
+ if new_error:
+ error_message = 'New Error ' + error_message
+
+ return '%s%s: %s' % (line, code, error.message)
diff --git a/tools/closure_linter/closure_linter/common/errorprinter.py b/tools/closure_linter/closure_linter/common/errorprinter.py
deleted file mode 100755
index c9754068f1..0000000000
--- a/tools/closure_linter/closure_linter/common/errorprinter.py
+++ /dev/null
@@ -1,203 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS-IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Linter error handler class that prints errors to stdout."""
-
-__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
-
-from closure_linter.common import error
-from closure_linter.common import errorhandler
-
-Error = error.Error
-
-
-# The error message is of the format:
-# Line <number>, E:<code>: message
-DEFAULT_FORMAT = 1
-
-# The error message is of the format:
-# filename:[line number]:message
-UNIX_FORMAT = 2
-
-
-class ErrorPrinter(errorhandler.ErrorHandler):
- """ErrorHandler that prints errors to stdout."""
-
- def __init__(self, new_errors=None):
- """Initializes this error printer.
-
- Args:
- new_errors: A sequence of error codes representing recently introduced
- errors, defaults to None.
- """
- # Number of errors
- self._error_count = 0
-
- # Number of new errors
- self._new_error_count = 0
-
- # Number of files checked
- self._total_file_count = 0
-
- # Number of files with errors
- self._error_file_count = 0
-
- # Dict of file name to number of errors
- self._file_table = {}
-
- # List of errors for each file
- self._file_errors = None
-
- # Current file
- self._filename = None
-
- self._format = DEFAULT_FORMAT
-
- if new_errors:
- self._new_errors = frozenset(new_errors)
- else:
- self._new_errors = frozenset(set())
-
- def SetFormat(self, format):
- """Sets the print format of errors.
-
- Args:
- format: One of {DEFAULT_FORMAT, UNIX_FORMAT}.
- """
- self._format = format
-
- def HandleFile(self, filename, first_token):
- """Notifies this ErrorPrinter that subsequent errors are in filename.
-
- Sets the current file name, and sets a flag stating the header for this file
- has not been printed yet.
-
- Should be called by a linter before a file is style checked.
-
- Args:
- filename: The name of the file about to be checked.
- first_token: The first token in the file, or None if there was an error
- opening the file
- """
- if self._filename and self._file_table[self._filename]:
- print
-
- self._filename = filename
- self._file_table[filename] = 0
- self._total_file_count += 1
- self._file_errors = []
-
- def HandleError(self, error):
- """Prints a formatted error message about the specified error.
-
- The error message is of the format:
- Error #<code>, line #<number>: message
-
- Args:
- error: The error object
- """
- self._file_errors.append(error)
- self._file_table[self._filename] += 1
- self._error_count += 1
-
- if self._new_errors and error.code in self._new_errors:
- self._new_error_count += 1
-
- def _PrintError(self, error):
- """Prints a formatted error message about the specified error.
-
- Args:
- error: The error object
- """
- new_error = self._new_errors and error.code in self._new_errors
- if self._format == DEFAULT_FORMAT:
- line = ''
- if error.token:
- line = 'Line %d, ' % error.token.line_number
-
- code = 'E:%04d' % error.code
- if new_error:
- print '%s%s: (New error) %s' % (line, code, error.message)
- else:
- print '%s%s: %s' % (line, code, error.message)
- else:
- # UNIX format
- filename = self._filename
- line = ''
- if error.token:
- line = '%d' % error.token.line_number
-
- error_code = '%04d' % error.code
- if new_error:
- error_code = 'New Error ' + error_code
- print '%s:%s:(%s) %s' % (filename, line, error_code, error.message)
-
- def FinishFile(self):
- """Finishes handling the current file."""
- if self._file_errors:
- self._error_file_count += 1
-
- if self._format != UNIX_FORMAT:
- print '----- FILE : %s -----' % (self._filename)
-
- self._file_errors.sort(Error.Compare)
-
- for error in self._file_errors:
- self._PrintError(error)
-
- def HasErrors(self):
- """Whether this error printer encountered any errors.
-
- Returns:
- True if the error printer encountered any errors.
- """
- return self._error_count
-
- def HasNewErrors(self):
- """Whether this error printer encountered any new errors.
-
- Returns:
- True if the error printer encountered any new errors.
- """
- return self._new_error_count
-
- def HasOldErrors(self):
- """Whether this error printer encountered any old errors.
-
- Returns:
- True if the error printer encountered any old errors.
- """
- return self._error_count - self._new_error_count
-
- def PrintSummary(self):
- """Print a summary of the number of errors and files."""
- if self.HasErrors() or self.HasNewErrors():
- print ('Found %d errors, including %d new errors, in %d files '
- '(%d files OK).' % (
- self._error_count,
- self._new_error_count,
- self._error_file_count,
- self._total_file_count - self._error_file_count))
- else:
- print '%d files checked, no errors found.' % self._total_file_count
-
- def PrintFileSummary(self):
- """Print a detailed summary of the number of errors in each file."""
- keys = self._file_table.keys()
- keys.sort()
- for filename in keys:
- print '%s: %d' % (filename, self._file_table[filename])
diff --git a/tools/closure_linter/closure_linter/common/filetestcase.py b/tools/closure_linter/closure_linter/common/filetestcase.py
index ae4b883fe2..7cd83cd1dc 100755
--- a/tools/closure_linter/closure_linter/common/filetestcase.py
+++ b/tools/closure_linter/closure_linter/common/filetestcase.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -26,6 +25,7 @@ __author__ = ('robbyw@google.com (Robert Walker)',
import re
+import gflags as flags
import unittest as googletest
from closure_linter.common import erroraccumulator
@@ -41,21 +41,27 @@ class AnnotatedFileTestCase(googletest.TestCase):
_EXPECTED_RE = re.compile(r'\s*//\s*(?:(?P<line>[+-]?[0-9]+):)?'
r'\s*(?P<msgs>%(msg)s(?:,\s*%(msg)s)*)' % _MESSAGE)
- def __init__(self, filename, runner, converter):
+ def __init__(self, filename, lint_callable, converter):
"""Create a single file lint test case.
Args:
filename: Filename to test.
- runner: Object implementing the LintRunner interface that lints a file.
+ lint_callable: Callable that lints a file. This is usually runner.Run().
converter: Function taking an error string and returning an error code.
"""
googletest.TestCase.__init__(self, 'runTest')
self._filename = filename
self._messages = []
- self._runner = runner
+ self._lint_callable = lint_callable
self._converter = converter
+ def setUp(self):
+ flags.FLAGS.dot_on_next_line = True
+
+ def tearDown(self):
+ flags.FLAGS.dot_on_next_line = False
+
def shortDescription(self):
"""Provides a description for the test."""
return 'Run linter on %s' % self._filename
@@ -65,7 +71,7 @@ class AnnotatedFileTestCase(googletest.TestCase):
try:
filename = self._filename
stream = open(filename)
- except IOError, ex:
+ except IOError as ex:
raise IOError('Could not find testdata resource for %s: %s' %
(self._filename, ex))
@@ -96,10 +102,14 @@ class AnnotatedFileTestCase(googletest.TestCase):
return messages
def _ProcessFileAndGetMessages(self, filename):
- """Trap gpylint's output parse it to get messages added."""
- errors = erroraccumulator.ErrorAccumulator()
- self._runner.Run([filename], errors)
+ """Trap gjslint's output parse it to get messages added."""
+ error_accumulator = erroraccumulator.ErrorAccumulator()
+ self._lint_callable(filename, error_accumulator)
+
+ errors = error_accumulator.GetErrors()
+
+ # Convert to expected tuple format.
- errors = errors.GetErrors()
- errors.sort()
- return errors
+ error_msgs = [(error.token.line_number, error.code) for error in errors]
+ error_msgs.sort()
+ return error_msgs
diff --git a/tools/closure_linter/closure_linter/common/tokenizer.py b/tools/closure_linter/closure_linter/common/tokenizer.py
index 0234720d73..9420ea3267 100755
--- a/tools/closure_linter/closure_linter/common/tokenizer.py
+++ b/tools/closure_linter/closure_linter/common/tokenizer.py
@@ -90,7 +90,8 @@ class Tokenizer(object):
Returns:
The newly created Token object.
"""
- return tokens.Token(string, token_type, line, line_number, values)
+ return tokens.Token(string, token_type, line, line_number, values,
+ line_number)
def __TokenizeLine(self, line):
"""Tokenizes the given line.
diff --git a/tools/closure_linter/closure_linter/common/tokens.py b/tools/closure_linter/closure_linter/common/tokens.py
index 5eaffa8cba..4703998752 100755
--- a/tools/closure_linter/closure_linter/common/tokens.py
+++ b/tools/closure_linter/closure_linter/common/tokens.py
@@ -47,7 +47,8 @@ class Token(object):
a separate metadata pass.
"""
- def __init__(self, string, token_type, line, line_number, values=None):
+ def __init__(self, string, token_type, line, line_number, values=None,
+ orig_line_number=None):
"""Creates a new Token object.
Args:
@@ -58,13 +59,18 @@ class Token(object):
values: A dict of named values within the token. For instance, a
function declaration may have a value called 'name' which captures the
name of the function.
+ orig_line_number: The line number of the original file this token comes
+ from. This should be only set during the tokenization process. For newly
+ created error fix tokens after that, it should be None.
"""
self.type = token_type
self.string = string
self.length = len(string)
self.line = line
self.line_number = line_number
+ self.orig_line_number = orig_line_number
self.values = values
+ self.is_deleted = False
# These parts can only be computed when the file is fully tokenized
self.previous = None
@@ -123,3 +129,17 @@ class Token(object):
return '<Token: %s, "%s", %r, %d, %r>' % (self.type, self.string,
self.values, self.line_number,
self.metadata)
+
+ def __iter__(self):
+ """Returns a token iterator."""
+ node = self
+ while node:
+ yield node
+ node = node.next
+
+ def __reversed__(self):
+ """Returns a reverse-direction token iterator."""
+ node = self
+ while node:
+ yield node
+ node = node.previous
diff --git a/tools/closure_linter/closure_linter/common/tokens_test.py b/tools/closure_linter/closure_linter/common/tokens_test.py
new file mode 100644
index 0000000000..01ec89d01b
--- /dev/null
+++ b/tools/closure_linter/closure_linter/common/tokens_test.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+__author__ = 'nnaze@google.com (Nathan Naze)'
+
+import unittest as googletest
+from closure_linter.common import tokens
+
+
+def _CreateDummyToken():
+ return tokens.Token('foo', None, 1, 1)
+
+
+def _CreateDummyTokens(count):
+ dummy_tokens = []
+ for _ in xrange(count):
+ dummy_tokens.append(_CreateDummyToken())
+ return dummy_tokens
+
+
+def _SetTokensAsNeighbors(neighbor_tokens):
+ for i in xrange(len(neighbor_tokens)):
+ prev_index = i - 1
+ next_index = i + 1
+
+ if prev_index >= 0:
+ neighbor_tokens[i].previous = neighbor_tokens[prev_index]
+
+ if next_index < len(neighbor_tokens):
+ neighbor_tokens[i].next = neighbor_tokens[next_index]
+
+
+class TokensTest(googletest.TestCase):
+
+ def testIsFirstInLine(self):
+
+ # First token in file (has no previous).
+ self.assertTrue(_CreateDummyToken().IsFirstInLine())
+
+ a, b = _CreateDummyTokens(2)
+ _SetTokensAsNeighbors([a, b])
+
+ # Tokens on same line
+ a.line_number = 30
+ b.line_number = 30
+
+ self.assertFalse(b.IsFirstInLine())
+
+ # Tokens on different lines
+ b.line_number = 31
+ self.assertTrue(b.IsFirstInLine())
+
+ def testIsLastInLine(self):
+ # Last token in file (has no next).
+ self.assertTrue(_CreateDummyToken().IsLastInLine())
+
+ a, b = _CreateDummyTokens(2)
+ _SetTokensAsNeighbors([a, b])
+
+ # Tokens on same line
+ a.line_number = 30
+ b.line_number = 30
+ self.assertFalse(a.IsLastInLine())
+
+ b.line_number = 31
+ self.assertTrue(a.IsLastInLine())
+
+ def testIsType(self):
+ a = tokens.Token('foo', 'fakeType1', 1, 1)
+ self.assertTrue(a.IsType('fakeType1'))
+ self.assertFalse(a.IsType('fakeType2'))
+
+ def testIsAnyType(self):
+ a = tokens.Token('foo', 'fakeType1', 1, 1)
+ self.assertTrue(a.IsAnyType(['fakeType1', 'fakeType2']))
+ self.assertFalse(a.IsAnyType(['fakeType3', 'fakeType4']))
+
+ def testRepr(self):
+ a = tokens.Token('foo', 'fakeType1', 1, 1)
+ self.assertEquals('<Token: fakeType1, "foo", None, 1, None>', str(a))
+
+ def testIter(self):
+ dummy_tokens = _CreateDummyTokens(5)
+ _SetTokensAsNeighbors(dummy_tokens)
+ a, b, c, d, e = dummy_tokens
+
+ i = iter(a)
+ self.assertListEqual([a, b, c, d, e], list(i))
+
+ def testReverseIter(self):
+ dummy_tokens = _CreateDummyTokens(5)
+ _SetTokensAsNeighbors(dummy_tokens)
+ a, b, c, d, e = dummy_tokens
+
+ ri = reversed(e)
+ self.assertListEqual([e, d, c, b, a], list(ri))
+
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/closure_linter/ecmalintrules.py b/tools/closure_linter/closure_linter/ecmalintrules.py
index a971b44d77..c07dffc86e 100755
--- a/tools/closure_linter/closure_linter/ecmalintrules.py
+++ b/tools/closure_linter/closure_linter/ecmalintrules.py
@@ -23,25 +23,28 @@ __author__ = ('robbyw@google.com (Robert Walker)',
import re
+import gflags as flags
+
from closure_linter import checkerbase
from closure_linter import ecmametadatapass
+from closure_linter import error_check
+from closure_linter import errorrules
from closure_linter import errors
from closure_linter import indentation
-from closure_linter import javascripttokens
from closure_linter import javascripttokenizer
+from closure_linter import javascripttokens
from closure_linter import statetracker
from closure_linter import tokenutil
from closure_linter.common import error
-from closure_linter.common import htmlutil
-from closure_linter.common import lintrunner
from closure_linter.common import position
-from closure_linter.common import tokens
-import gflags as flags
+
FLAGS = flags.FLAGS
-flags.DEFINE_boolean('strict', False,
- 'Whether to validate against the stricter Closure style.')
flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow')
+# TODO(user): When flipping this to True, remove logic from unit tests
+# that overrides this flag.
+flags.DEFINE_boolean('dot_on_next_line', False, 'Require dots to be'
+ 'placed on the next line for wrapped expressions')
# TODO(robbyw): Check for extra parens on return statements
# TODO(robbyw): Check for 0px in strings
@@ -53,8 +56,10 @@ Context = ecmametadatapass.EcmaContext
Error = error.Error
Modes = javascripttokenizer.JavaScriptModes
Position = position.Position
+Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType
+
class EcmaScriptLintRules(checkerbase.LintRulesBase):
"""EmcaScript lint style checking rules.
@@ -67,14 +72,15 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
language.
"""
- # Static constants.
- MAX_LINE_LENGTH = 80
+ # It will be initialized in constructor so the flags are initialized.
+ max_line_length = -1
+ # Static constants.
MISSING_PARAMETER_SPACE = re.compile(r',\S')
- EXTRA_SPACE = re.compile('(\(\s|\s\))')
+ EXTRA_SPACE = re.compile(r'(\(\s|\s\))')
- ENDS_WITH_SPACE = re.compile('\s$')
+ ENDS_WITH_SPACE = re.compile(r'\s$')
ILLEGAL_TAB = re.compile(r'\t')
@@ -85,12 +91,18 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
AUTHOR_SPEC = re.compile(r'(\s*)[^\s]+@[^(\s]+(\s*)\(.+\)')
# Acceptable tokens to remove for line too long testing.
- LONG_LINE_IGNORE = frozenset(['*', '//', '@see'] +
+ LONG_LINE_IGNORE = frozenset(
+ ['*', '//', '@see'] +
['@%s' % tag for tag in statetracker.DocFlag.HAS_TYPE])
+ JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED = frozenset([
+ '@fileoverview', '@param', '@return', '@returns'])
+
def __init__(self):
"""Initialize this lint rule object."""
checkerbase.LintRulesBase.__init__(self)
+ if EcmaScriptLintRules.max_line_length == -1:
+ EcmaScriptLintRules.max_line_length = errorrules.GetMaxLineLength()
def Initialize(self, checker, limited_doc_checks, is_html):
"""Initialize this lint rule object before parsing a new file."""
@@ -107,6 +119,7 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
Args:
last_token: The last token in the line.
+ state: parser_state object that indicates the current state in the page
"""
# Start from the last token so that we have the flag object attached to
# and DOC_FLAG tokens.
@@ -119,8 +132,8 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
while token and token.line_number == line_number:
if state.IsTypeToken(token):
line.insert(0, 'x' * len(token.string))
- elif token.type in (Type.IDENTIFIER, Type.NORMAL):
- # Dots are acceptable places to wrap.
+ elif token.type in (Type.IDENTIFIER, Type.OPERATOR):
+ # Dots are acceptable places to wrap (may be tokenized as identifiers).
line.insert(0, token.string.replace('.', ' '))
else:
line.insert(0, token.string)
@@ -130,7 +143,7 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
line = line.rstrip('\n\r\f')
try:
length = len(unicode(line, 'utf-8'))
- except:
+ except (LookupError, UnicodeDecodeError):
# Unknown encoding. The line length may be wrong, as was originally the
# case for utf-8 (see bug 1735846). For now just accept the default
# length, but as we find problems we can either add test for other
@@ -138,7 +151,7 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
# false positives at the cost of more false negatives.
length = len(line)
- if length > self.MAX_LINE_LENGTH:
+ if length > EcmaScriptLintRules.max_line_length:
# If the line matches one of the exceptions, then it's ok.
for long_line_regexp in self.GetLongLineExceptions():
@@ -150,43 +163,42 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
parts = set(line.split())
# We allow two "words" (type and name) when the line contains @param
- max = 1
+ max_parts = 1
if '@param' in parts:
- max = 2
+ max_parts = 2
# Custom tags like @requires may have url like descriptions, so ignore
# the tag, similar to how we handle @see.
custom_tags = set(['@%s' % f for f in FLAGS.custom_jsdoc_tags])
- if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags)) > max):
- self._HandleError(errors.LINE_TOO_LONG,
+ if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags))
+ > max_parts):
+ self._HandleError(
+ errors.LINE_TOO_LONG,
'Line too long (%d characters).' % len(line), last_token)
- def _CheckJsDocType(self, token):
+ def _CheckJsDocType(self, token, js_type):
"""Checks the given type for style errors.
Args:
token: The DOC_FLAG token for the flag whose type to check.
+ js_type: The flag's typeannotation.TypeAnnotation instance.
"""
- flag = token.attached_object
- type = flag.type
- if type and type is not None and not type.isspace():
- pieces = self.TYPE_SPLIT.split(type)
- if len(pieces) == 1 and type.count('|') == 1 and (
- type.endswith('|null') or type.startswith('null|')):
- self._HandleError(errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL,
- 'Prefer "?Type" to "Type|null": "%s"' % type, token)
-
- for p in pieces:
- if p.count('|') and p.count('?'):
- # TODO(robbyw): We should do actual parsing of JsDoc types. As is,
- # this won't report an error for {number|Array.<string>?}, etc.
- self._HandleError(errors.JSDOC_ILLEGAL_QUESTION_WITH_PIPE,
- 'JsDoc types cannot contain both "?" and "|": "%s"' % p, token)
-
- if FLAGS.strict and (flag.type_start_token.type != Type.DOC_START_BRACE or
- flag.type_end_token.type != Type.DOC_END_BRACE):
- self._HandleError(errors.MISSING_BRACES_AROUND_TYPE,
- 'Type must always be surrounded by curly braces.', token)
+ if not js_type: return
+
+ if js_type.type_group and len(js_type.sub_types) == 2:
+ identifiers = [t.identifier for t in js_type.sub_types]
+ if 'null' in identifiers:
+ # Don't warn if the identifier is a template type (e.g. {TYPE|null}.
+ if not identifiers[0].isupper() and not identifiers[1].isupper():
+ self._HandleError(
+ errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL,
+ 'Prefer "?Type" to "Type|null": "%s"' % js_type, token)
+
+ # TODO(user): We should report an error for wrong usage of '?' and '|'
+ # e.g. {?number|string|null} etc.
+
+ for sub_type in js_type.IterTypes():
+ self._CheckJsDocType(token, sub_type)
def _CheckForMissingSpaceBeforeToken(self, token):
"""Checks for a missing space at the beginning of a token.
@@ -206,7 +218,60 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
errors.MISSING_SPACE,
'Missing space before "%s"' % token.string,
token,
- Position.AtBeginning())
+ position=Position.AtBeginning())
+
+ def _CheckOperator(self, token):
+ """Checks an operator for spacing and line style.
+
+ Args:
+ token: The operator token.
+ """
+ last_code = token.metadata.last_code
+
+ if not self._ExpectSpaceBeforeOperator(token):
+ if (token.previous and token.previous.type == Type.WHITESPACE and
+ last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER) and
+ last_code.line_number == token.line_number):
+ self._HandleError(
+ errors.EXTRA_SPACE, 'Extra space before "%s"' % token.string,
+ token.previous, position=Position.All(token.previous.string))
+
+ elif (token.previous and
+ not token.previous.IsComment() and
+ not tokenutil.IsDot(token) and
+ token.previous.type in Type.EXPRESSION_ENDER_TYPES):
+ self._HandleError(errors.MISSING_SPACE,
+ 'Missing space before "%s"' % token.string, token,
+ position=Position.AtBeginning())
+
+ # Check wrapping of operators.
+ next_code = tokenutil.GetNextCodeToken(token)
+
+ is_dot = tokenutil.IsDot(token)
+ wrapped_before = last_code and last_code.line_number != token.line_number
+ wrapped_after = next_code and next_code.line_number != token.line_number
+
+ if FLAGS.dot_on_next_line and is_dot and wrapped_after:
+ self._HandleError(
+ errors.LINE_ENDS_WITH_DOT,
+ '"." must go on the following line',
+ token)
+ if (not is_dot and wrapped_before and
+ not token.metadata.IsUnaryOperator()):
+ self._HandleError(
+ errors.LINE_STARTS_WITH_OPERATOR,
+ 'Binary operator must go on previous line "%s"' % token.string,
+ token)
+
+ def _IsLabel(self, token):
+ # A ':' token is considered part of a label if it occurs in a case
+ # statement, a plain label, or an object literal, i.e. is not part of a
+ # ternary.
+
+ return (token.string == ':' and
+ token.metadata.context.type in (Context.LITERAL_ELEMENT,
+ Context.CASE_BLOCK,
+ Context.STATEMENT))
def _ExpectSpaceBeforeOperator(self, token):
"""Returns whether a space should appear before the given operator token.
@@ -220,13 +285,13 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
if token.string == ',' or token.metadata.IsUnaryPostOperator():
return False
+ if tokenutil.IsDot(token):
+ return False
+
# Colons should appear in labels, object literals, the case of a switch
# statement, and ternary operator. Only want a space in the case of the
# ternary operator.
- if (token.string == ':' and
- token.metadata.context.type in (Context.LITERAL_ELEMENT,
- Context.CASE_BLOCK,
- Context.STATEMENT)):
+ if self._IsLabel(token):
return False
if token.metadata.IsUnaryOperator() and token.IsFirstInLine():
@@ -246,10 +311,10 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
last_in_line = token.IsLastInLine()
last_non_space_token = state.GetLastNonSpaceToken()
- type = token.type
+ token_type = token.type
# Process the line change.
- if not self._is_html and FLAGS.strict:
+ if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION):
# TODO(robbyw): Support checking indentation in HTML files.
indentation_errors = self._indentation.CheckToken(token, state)
for indentation_error in indentation_errors:
@@ -258,11 +323,12 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
if last_in_line:
self._CheckLineLength(token, state)
- if type == Type.PARAMETERS:
+ if token_type == Type.PARAMETERS:
# Find missing spaces in parameter lists.
if self.MISSING_PARAMETER_SPACE.search(token.string):
+ fix_data = ', '.join([s.strip() for s in token.string.split(',')])
self._HandleError(errors.MISSING_SPACE, 'Missing space after ","',
- token)
+ token, position=None, fix_data=fix_data.strip())
# Find extra spaces at the beginning of parameter lists. Make sure
# we aren't at the beginning of a continuing multi-line list.
@@ -270,54 +336,56 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
space_count = len(token.string) - len(token.string.lstrip())
if space_count:
self._HandleError(errors.EXTRA_SPACE, 'Extra space after "("',
- token, Position(0, space_count))
+ token, position=Position(0, space_count))
- elif (type == Type.START_BLOCK and
+ elif (token_type == Type.START_BLOCK and
token.metadata.context.type == Context.BLOCK):
self._CheckForMissingSpaceBeforeToken(token)
- elif type == Type.END_BLOCK:
- # This check is for object literal end block tokens, but there is no need
- # to test that condition since a comma at the end of any other kind of
- # block is undoubtedly a parse error.
+ elif token_type == Type.END_BLOCK:
last_code = token.metadata.last_code
- if last_code.IsOperator(','):
- self._HandleError(errors.COMMA_AT_END_OF_LITERAL,
- 'Illegal comma at end of object literal', last_code,
- Position.All(last_code.string))
-
if state.InFunction() and state.IsFunctionClose():
- is_immediately_called = (token.next and
- token.next.type == Type.START_PAREN)
if state.InTopLevelFunction():
- # When the function was top-level and not immediately called, check
- # that it's terminated by a semi-colon.
- if state.InAssignedFunction():
- if not is_immediately_called and (last_in_line or
- not token.next.type == Type.SEMICOLON):
- self._HandleError(errors.MISSING_SEMICOLON_AFTER_FUNCTION,
- 'Missing semicolon after function assigned to a variable',
- token, Position.AtEnd(token.string))
- else:
+ # A semicolons should not be included at the end of a function
+ # declaration.
+ if not state.InAssignedFunction():
if not last_in_line and token.next.type == Type.SEMICOLON:
- self._HandleError(errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
+ self._HandleError(
+ errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
'Illegal semicolon after function declaration',
- token.next, Position.All(token.next.string))
+ token.next, position=Position.All(token.next.string))
+
+ # A semicolon should be included at the end of a function expression
+ # that is not immediately called or used by a dot operator.
+ if (state.InAssignedFunction() and token.next
+ and token.next.type != Type.SEMICOLON):
+ next_token = tokenutil.GetNextCodeToken(token)
+ is_immediately_used = (next_token.type == Type.START_PAREN or
+ tokenutil.IsDot(next_token))
+ if not is_immediately_used:
+ self._HandleError(
+ errors.MISSING_SEMICOLON_AFTER_FUNCTION,
+ 'Missing semicolon after function assigned to a variable',
+ token, position=Position.AtEnd(token.string))
- if (state.InInterfaceMethod() and last_code.type != Type.START_BLOCK):
+ if state.InInterfaceMethod() and last_code.type != Type.START_BLOCK:
self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE,
- 'Interface methods cannot contain code', last_code)
+ 'Interface methods cannot contain code', last_code)
elif (state.IsBlockClose() and
token.next and token.next.type == Type.SEMICOLON):
- self._HandleError(errors.REDUNDANT_SEMICOLON,
- 'No semicolon is required to end a code block',
- token.next, Position.All(token.next.string))
-
- elif type == Type.SEMICOLON:
+ if (last_code.metadata.context.parent.type != Context.OBJECT_LITERAL
+ and last_code.metadata.context.type != Context.OBJECT_LITERAL):
+ self._HandleError(
+ errors.REDUNDANT_SEMICOLON,
+ 'No semicolon is required to end a code block',
+ token.next, position=Position.All(token.next.string))
+
+ elif token_type == Type.SEMICOLON:
if token.previous and token.previous.type == Type.WHITESPACE:
- self._HandleError(errors.EXTRA_SPACE, 'Extra space before ";"',
- token.previous, Position.All(token.previous.string))
+ self._HandleError(
+ errors.EXTRA_SPACE, 'Extra space before ";"',
+ token.previous, position=Position.All(token.previous.string))
if token.next and token.next.line_number == token.line_number:
if token.metadata.context.type != Context.FOR_GROUP_BLOCK:
@@ -326,10 +394,11 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
elif token.next.type not in (
Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN):
- self._HandleError(errors.MISSING_SPACE,
+ self._HandleError(
+ errors.MISSING_SPACE,
'Missing space after ";" in for statement',
token.next,
- Position.AtBeginning())
+ position=Position.AtBeginning())
last_code = token.metadata.last_code
if last_code and last_code.type == Type.SEMICOLON:
@@ -338,7 +407,8 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
# NOTE(user): This is not a perfect check, and will not throw an error
# for cases like: for (var i = 0;; i < n; i++) {}, but then your code
# probably won't work either.
- for_token = tokenutil.CustomSearch(last_code,
+ for_token = tokenutil.CustomSearch(
+ last_code,
lambda token: token.type == Type.KEYWORD and token.string == 'for',
end_func=lambda token: token.type == Type.SEMICOLON,
distance=None,
@@ -346,113 +416,83 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
if not for_token:
self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon',
- token, Position.All(token.string))
-
- elif type == Type.START_PAREN:
- if token.previous and token.previous.type == Type.KEYWORD:
+ token, position=Position.All(token.string))
+
+ elif token_type == Type.START_PAREN:
+ # Ensure that opening parentheses have a space before any keyword
+ # that is not being invoked like a member function.
+ if (token.previous and token.previous.type == Type.KEYWORD and
+ (not token.previous.metadata or
+ not token.previous.metadata.last_code or
+ not token.previous.metadata.last_code.string or
+ token.previous.metadata.last_code.string[-1:] != '.')):
self._HandleError(errors.MISSING_SPACE, 'Missing space before "("',
- token, Position.AtBeginning())
+ token, position=Position.AtBeginning())
elif token.previous and token.previous.type == Type.WHITESPACE:
before_space = token.previous.previous
+ # Ensure that there is no extra space before a function invocation,
+ # even if the function being invoked happens to be a keyword.
if (before_space and before_space.line_number == token.line_number and
- before_space.type == Type.IDENTIFIER):
- self._HandleError(errors.EXTRA_SPACE, 'Extra space before "("',
- token.previous, Position.All(token.previous.string))
-
- elif type == Type.START_BRACKET:
- if (not first_in_line and token.previous.type == Type.WHITESPACE and
- last_non_space_token and
- last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
- self._HandleError(errors.EXTRA_SPACE, 'Extra space before "["',
- token.previous, Position.All(token.previous.string))
- # If the [ token is the first token in a line we shouldn't complain
- # about a missing space before [. This is because some Ecma script
- # languages allow syntax like:
- # [Annotation]
- # class MyClass {...}
- # So we don't want to blindly warn about missing spaces before [.
- # In the the future, when rules for computing exactly how many spaces
- # lines should be indented are added, then we can return errors for
- # [ tokens that are improperly indented.
- # For example:
- # var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
- # [a,b,c];
- # should trigger a proper indentation warning message as [ is not indented
- # by four spaces.
- elif (not first_in_line and token.previous and
- not token.previous.type in (
- [Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
- Type.EXPRESSION_ENDER_TYPES)):
- self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
- token, Position.AtBeginning())
-
- elif type in (Type.END_PAREN, Type.END_BRACKET):
+ before_space.type == Type.IDENTIFIER or
+ (before_space.type == Type.KEYWORD and before_space.metadata and
+ before_space.metadata.last_code and
+ before_space.metadata.last_code.string and
+ before_space.metadata.last_code.string[-1:] == '.')):
+ self._HandleError(
+ errors.EXTRA_SPACE, 'Extra space before "("',
+ token.previous, position=Position.All(token.previous.string))
+
+ elif token_type == Type.START_BRACKET:
+ self._HandleStartBracket(token, last_non_space_token)
+ elif token_type in (Type.END_PAREN, Type.END_BRACKET):
# Ensure there is no space before closing parentheses, except when
# it's in a for statement with an omitted section, or when it's at the
# beginning of a line.
if (token.previous and token.previous.type == Type.WHITESPACE and
not token.previous.IsFirstInLine() and
not (last_non_space_token and last_non_space_token.line_number ==
- token.line_number and
+ token.line_number and
last_non_space_token.type == Type.SEMICOLON)):
- self._HandleError(errors.EXTRA_SPACE, 'Extra space before "%s"' %
- token.string, token.previous, Position.All(token.previous.string))
-
- if token.type == Type.END_BRACKET:
- last_code = token.metadata.last_code
- if last_code.IsOperator(','):
- self._HandleError(errors.COMMA_AT_END_OF_LITERAL,
- 'Illegal comma at end of array literal', last_code,
- Position.All(last_code.string))
+ self._HandleError(
+ errors.EXTRA_SPACE, 'Extra space before "%s"' %
+ token.string, token.previous,
+ position=Position.All(token.previous.string))
- elif type == Type.WHITESPACE:
+ elif token_type == Type.WHITESPACE:
if self.ILLEGAL_TAB.search(token.string):
if token.IsFirstInLine():
- self._HandleError(errors.ILLEGAL_TAB,
- 'Illegal tab in whitespace before "%s"' % token.next.string,
- token, Position.All(token.string))
+ if token.next:
+ self._HandleError(
+ errors.ILLEGAL_TAB,
+ 'Illegal tab in whitespace before "%s"' % token.next.string,
+ token, position=Position.All(token.string))
+ else:
+ self._HandleError(
+ errors.ILLEGAL_TAB,
+ 'Illegal tab in whitespace',
+ token, position=Position.All(token.string))
else:
- self._HandleError(errors.ILLEGAL_TAB,
+ self._HandleError(
+ errors.ILLEGAL_TAB,
'Illegal tab in whitespace after "%s"' % token.previous.string,
- token, Position.All(token.string))
+ token, position=Position.All(token.string))
# Check whitespace length if it's not the first token of the line and
# if it's not immediately before a comment.
if last_in_line:
# Check for extra whitespace at the end of a line.
self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
- token, Position.All(token.string))
+ token, position=Position.All(token.string))
elif not first_in_line and not token.next.IsComment():
if token.length > 1:
- self._HandleError(errors.EXTRA_SPACE, 'Extra space after "%s"' %
+ self._HandleError(
+ errors.EXTRA_SPACE, 'Extra space after "%s"' %
token.previous.string, token,
- Position(1, len(token.string) - 1))
-
- elif type == Type.OPERATOR:
- last_code = token.metadata.last_code
+ position=Position(1, len(token.string) - 1))
- if not self._ExpectSpaceBeforeOperator(token):
- if (token.previous and token.previous.type == Type.WHITESPACE and
- last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER)):
- self._HandleError(errors.EXTRA_SPACE,
- 'Extra space before "%s"' % token.string, token.previous,
- Position.All(token.previous.string))
-
- elif (token.previous and
- not token.previous.IsComment() and
- token.previous.type in Type.EXPRESSION_ENDER_TYPES):
- self._HandleError(errors.MISSING_SPACE,
- 'Missing space before "%s"' % token.string, token,
- Position.AtBeginning())
-
- # Check that binary operators are not used to start lines.
- if ((not last_code or last_code.line_number != token.line_number) and
- not token.metadata.IsUnaryOperator()):
- self._HandleError(errors.LINE_STARTS_WITH_OPERATOR,
- 'Binary operator should go on previous line "%s"' % token.string,
- token)
-
- elif type == Type.DOC_FLAG:
+ elif token_type == Type.OPERATOR:
+ self._CheckOperator(token)
+ elif token_type == Type.DOC_FLAG:
flag = token.attached_object
if flag.flag_type == 'bug':
@@ -462,21 +502,25 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
if not string.isdigit():
self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG,
- '@bug should be followed by a bug number', token)
+ '@bug should be followed by a bug number', token)
elif flag.flag_type == 'suppress':
if flag.type is None:
# A syntactically invalid suppress tag will get tokenized as a normal
# flag, indicating an error.
- self._HandleError(errors.INCORRECT_SUPPRESS_SYNTAX,
+ self._HandleError(
+ errors.INCORRECT_SUPPRESS_SYNTAX,
'Invalid suppress syntax: should be @suppress {errortype}. '
'Spaces matter.', token)
- elif flag.type not in state.GetDocFlag().SUPPRESS_TYPES:
- self._HandleError(errors.INVALID_SUPPRESS_TYPE,
- 'Invalid suppression type: %s' % flag.type,
- token)
-
- elif FLAGS.strict and flag.flag_type == 'author':
+ else:
+ for suppress_type in flag.jstype.IterIdentifiers():
+ if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES:
+ self._HandleError(
+ errors.INVALID_SUPPRESS_TYPE,
+ 'Invalid suppression type: %s' % suppress_type, token)
+
+ elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and
+ flag.flag_type == 'author'):
# TODO(user): In non strict mode check the author tag for as much as
# it exists, though the full form checked below isn't required.
string = token.next.string
@@ -494,12 +538,12 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
if num_spaces < 1:
self._HandleError(errors.MISSING_SPACE,
'Missing space after email address',
- token.next, Position(result.start(2), 0))
+ token.next, position=Position(result.start(2), 0))
elif num_spaces > 1:
- self._HandleError(errors.EXTRA_SPACE,
- 'Extra space after email address',
- token.next,
- Position(result.start(2) + 1, num_spaces - 1))
+ self._HandleError(
+ errors.EXTRA_SPACE, 'Extra space after email address',
+ token.next,
+ position=Position(result.start(2) + 1, num_spaces - 1))
# Check for extra spaces before email address. Can't be too few, if
# not at least one we wouldn't match @author tag.
@@ -507,80 +551,61 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
if num_spaces > 1:
self._HandleError(errors.EXTRA_SPACE,
'Extra space before email address',
- token.next, Position(1, num_spaces - 1))
+ token.next, position=Position(1, num_spaces - 1))
elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and
not self._limited_doc_checks):
if flag.flag_type == 'param':
if flag.name is None:
self._HandleError(errors.MISSING_JSDOC_PARAM_NAME,
- 'Missing name in @param tag', token)
+ 'Missing name in @param tag', token)
if not flag.description or flag.description is None:
flag_name = token.type
if 'name' in token.values:
flag_name = '@' + token.values['name']
- self._HandleError(errors.MISSING_JSDOC_TAG_DESCRIPTION,
- 'Missing description in %s tag' % flag_name, token)
- else:
- self._CheckForMissingSpaceBeforeToken(flag.description_start_token)
-
- # We want punctuation to be inside of any tags ending a description,
- # so strip tags before checking description. See bug 1127192. Note
- # that depending on how lines break, the real description end token
- # may consist only of stripped html and the effective end token can
- # be different.
- end_token = flag.description_end_token
- end_string = htmlutil.StripTags(end_token.string).strip()
- while (end_string == '' and not
- end_token.type in Type.FLAG_ENDING_TYPES):
- end_token = end_token.previous
- if end_token.type in Type.FLAG_DESCRIPTION_TYPES:
- end_string = htmlutil.StripTags(end_token.string).rstrip()
-
- if not (end_string.endswith('.') or end_string.endswith('?') or
- end_string.endswith('!')):
- # Find the position for the missing punctuation, inside of any html
- # tags.
- desc_str = end_token.string.rstrip()
- while desc_str.endswith('>'):
- start_tag_index = desc_str.rfind('<')
- if start_tag_index < 0:
- break
- desc_str = desc_str[:start_tag_index].rstrip()
- end_position = Position(len(desc_str), 0)
+ if flag_name not in self.JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED:
self._HandleError(
- errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER,
- ('%s descriptions must end with valid punctuation such as a '
- 'period.' % token.string),
- end_token, end_position)
+ errors.MISSING_JSDOC_TAG_DESCRIPTION,
+ 'Missing description in %s tag' % flag_name, token)
+ else:
+ self._CheckForMissingSpaceBeforeToken(flag.description_start_token)
- if flag.flag_type in state.GetDocFlag().HAS_TYPE:
+ if flag.HasType():
if flag.type_start_token is not None:
self._CheckForMissingSpaceBeforeToken(
token.attached_object.type_start_token)
- if flag.type and flag.type != '' and not flag.type.isspace():
- self._CheckJsDocType(token)
-
- if type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
- if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and
- token.values['name'] not in FLAGS.custom_jsdoc_tags):
- self._HandleError(errors.INVALID_JSDOC_TAG,
- 'Invalid JsDoc tag: %s' % token.values['name'], token)
+ if flag.jstype and not flag.jstype.IsEmpty():
+ self._CheckJsDocType(token, flag.jstype)
- if (FLAGS.strict and token.values['name'] == 'inheritDoc' and
- type == Type.DOC_INLINE_FLAG):
- self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
- 'Unnecessary braces around @inheritDoc',
- token)
-
- elif type == Type.SIMPLE_LVALUE:
+ if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and (
+ flag.type_start_token.type != Type.DOC_START_BRACE or
+ flag.type_end_token.type != Type.DOC_END_BRACE):
+ self._HandleError(
+ errors.MISSING_BRACES_AROUND_TYPE,
+ 'Type must always be surrounded by curly braces.', token)
+
+ if token_type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
+ if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and
+ token.values['name'] not in FLAGS.custom_jsdoc_tags):
+ self._HandleError(
+ errors.INVALID_JSDOC_TAG,
+ 'Invalid JsDoc tag: %s' % token.values['name'], token)
+
+ if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and
+ token.values['name'] == 'inheritDoc' and
+ token_type == Type.DOC_INLINE_FLAG):
+ self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
+ 'Unnecessary braces around @inheritDoc',
+ token)
+
+ elif token_type == Type.SIMPLE_LVALUE:
identifier = token.values['identifier']
if ((not state.InFunction() or state.InConstructor()) and
- not state.InParentheses() and not state.InObjectLiteralDescendant()):
+ state.InTopLevel() and not state.InObjectLiteralDescendant()):
jsdoc = state.GetDocComment()
if not state.HasDocComment(identifier):
# Only test for documentation on identifiers with .s in them to
@@ -592,45 +617,62 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
self._limited_doc_checks):
comment = state.GetLastComment()
if not (comment and comment.lower().count('jsdoc inherited')):
- self._HandleError(errors.MISSING_MEMBER_DOCUMENTATION,
+ self._HandleError(
+ errors.MISSING_MEMBER_DOCUMENTATION,
"No docs found for member '%s'" % identifier,
- token);
+ token)
elif jsdoc and (not state.InConstructor() or
identifier.startswith('this.')):
# We are at the top level and the function/member is documented.
if identifier.endswith('_') and not identifier.endswith('__'):
- if jsdoc.HasFlag('override'):
- self._HandleError(errors.INVALID_OVERRIDE_PRIVATE,
- '%s should not override a private member.' % identifier,
- jsdoc.GetFlag('override').flag_token)
# Can have a private class which inherits documentation from a
# public superclass.
- if jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor'):
- self._HandleError(errors.INVALID_INHERIT_DOC_PRIVATE,
+ #
+ # @inheritDoc is deprecated in favor of using @override, and they
+ if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor')
+ and ('accessControls' not in jsdoc.suppressions)):
+ self._HandleError(
+ errors.INVALID_OVERRIDE_PRIVATE,
+ '%s should not override a private member.' % identifier,
+ jsdoc.GetFlag('override').flag_token)
+ if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor')
+ and ('accessControls' not in jsdoc.suppressions)):
+ self._HandleError(
+ errors.INVALID_INHERIT_DOC_PRIVATE,
'%s should not inherit from a private member.' % identifier,
jsdoc.GetFlag('inheritDoc').flag_token)
if (not jsdoc.HasFlag('private') and
- not ('underscore' in jsdoc.suppressions)):
- self._HandleError(errors.MISSING_PRIVATE,
+ ('underscore' not in jsdoc.suppressions) and not
+ ((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and
+ ('accessControls' in jsdoc.suppressions))):
+ self._HandleError(
+ errors.MISSING_PRIVATE,
'Member "%s" must have @private JsDoc.' %
identifier, token)
if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions:
- self._HandleError(errors.UNNECESSARY_SUPPRESS,
+ self._HandleError(
+ errors.UNNECESSARY_SUPPRESS,
'@suppress {underscore} is not necessary with @private',
jsdoc.suppressions['underscore'])
- elif jsdoc.HasFlag('private'):
- self._HandleError(errors.EXTRA_PRIVATE,
+ elif (jsdoc.HasFlag('private') and
+ not self.InExplicitlyTypedLanguage()):
+ # It is convention to hide public fields in some ECMA
+ # implementations from documentation using the @private tag.
+ self._HandleError(
+ errors.EXTRA_PRIVATE,
'Member "%s" must not have @private JsDoc' %
identifier, token)
- if ((jsdoc.HasFlag('desc') or jsdoc.HasFlag('hidden'))
- and not identifier.startswith('MSG_')
- and identifier.find('.MSG_') == -1):
- # TODO(user): Update error message to show the actual invalid
- # tag, either @desc or @hidden.
- self._HandleError(errors.INVALID_USE_OF_DESC_TAG,
- 'Member "%s" should not have @desc JsDoc' % identifier,
- token)
+ # These flags are only legal on localizable message definitions;
+ # such variables always begin with the prefix MSG_.
+ for f in ('desc', 'hidden', 'meaning'):
+ if (jsdoc.HasFlag(f)
+ and not identifier.startswith('MSG_')
+ and identifier.find('.MSG_') == -1):
+ self._HandleError(
+ errors.INVALID_USE_OF_DESC_TAG,
+ 'Member "%s" should not have @%s JsDoc' % (identifier, f),
+ token)
# Check for illegaly assigning live objects as prototype property values.
index = identifier.find('.prototype.')
@@ -641,28 +683,30 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
if next_code and (
next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or
next_code.IsOperator('new')):
- self._HandleError(errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,
+ self._HandleError(
+ errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,
'Member %s cannot have a non-primitive value' % identifier,
token)
- elif type == Type.END_PARAMETERS:
+ elif token_type == Type.END_PARAMETERS:
# Find extra space at the end of parameter lists. We check the token
# prior to the current one when it is a closing paren.
if (token.previous and token.previous.type == Type.PARAMETERS
and self.ENDS_WITH_SPACE.search(token.previous.string)):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before ")"',
- token.previous)
+ token.previous)
jsdoc = state.GetDocComment()
if state.GetFunction().is_interface:
if token.previous and token.previous.type == Type.PARAMETERS:
- self._HandleError(errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,
+ self._HandleError(
+ errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,
'Interface constructor cannot have parameters',
token.previous)
elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see')
- and not jsdoc.InheritsDocumentation()
- and not state.InObjectLiteralDescendant() and not
- jsdoc.IsInvalidated()):
+ and not jsdoc.InheritsDocumentation()
+ and not state.InObjectLiteralDescendant() and not
+ jsdoc.IsInvalidated()):
distance, edit = jsdoc.CompareParameters(state.GetParams())
if distance:
params_iter = iter(state.GetParams())
@@ -677,68 +721,108 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
# Languages that don't allow variables to by typed such as
# JavaScript care but languages such as ActionScript or Java
# that allow variables to be typed don't care.
- self.HandleMissingParameterDoc(token, params_iter.next())
+ if not self._limited_doc_checks:
+ self.HandleMissingParameterDoc(token, params_iter.next())
elif op == 'D':
# Deletion
self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION,
- 'Found docs for non-existing parameter: "%s"' %
- docs_iter.next(), token)
+ 'Found docs for non-existing parameter: "%s"' %
+ docs_iter.next(), token)
elif op == 'S':
# Substitution
- self._HandleError(errors.WRONG_PARAMETER_DOCUMENTATION,
- 'Parameter mismatch: got "%s", expected "%s"' %
- (params_iter.next(), docs_iter.next()), token)
+ if not self._limited_doc_checks:
+ self._HandleError(
+ errors.WRONG_PARAMETER_DOCUMENTATION,
+ 'Parameter mismatch: got "%s", expected "%s"' %
+ (params_iter.next(), docs_iter.next()), token)
else:
# Equality - just advance the iterators
params_iter.next()
docs_iter.next()
- elif type == Type.STRING_TEXT:
+ elif token_type == Type.STRING_TEXT:
# If this is the first token after the start of the string, but it's at
# the end of a line, we know we have a multi-line string.
- if token.previous.type in (Type.SINGLE_QUOTE_STRING_START,
+ if token.previous.type in (
+ Type.SINGLE_QUOTE_STRING_START,
Type.DOUBLE_QUOTE_STRING_START) and last_in_line:
self._HandleError(errors.MULTI_LINE_STRING,
- 'Multi-line strings are not allowed', token)
-
+ 'Multi-line strings are not allowed', token)
# This check is orthogonal to the ones above, and repeats some types, so
# it is a plain if and not an elif.
if token.type in Type.COMMENT_TYPES:
if self.ILLEGAL_TAB.search(token.string):
self._HandleError(errors.ILLEGAL_TAB,
- 'Illegal tab in comment "%s"' % token.string, token)
+ 'Illegal tab in comment "%s"' % token.string, token)
trimmed = token.string.rstrip()
if last_in_line and token.string != trimmed:
# Check for extra whitespace at the end of a line.
- self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
- token, Position(len(trimmed), len(token.string) - len(trimmed)))
+ self._HandleError(
+ errors.EXTRA_SPACE, 'Extra space at end of line', token,
+ position=Position(len(trimmed), len(token.string) - len(trimmed)))
# This check is also orthogonal since it is based on metadata.
if token.metadata.is_implied_semicolon:
self._HandleError(errors.MISSING_SEMICOLON,
- 'Missing semicolon at end of line', token)
+ 'Missing semicolon at end of line', token)
+
+ def _HandleStartBracket(self, token, last_non_space_token):
+ """Handles a token that is an open bracket.
+
+ Args:
+ token: The token to handle.
+ last_non_space_token: The last token that was not a space.
+ """
+ if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and
+ last_non_space_token and
+ last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
+ self._HandleError(
+ errors.EXTRA_SPACE, 'Extra space before "["',
+ token.previous, position=Position.All(token.previous.string))
+ # If the [ token is the first token in a line we shouldn't complain
+ # about a missing space before [. This is because some Ecma script
+ # languages allow syntax like:
+ # [Annotation]
+ # class MyClass {...}
+ # So we don't want to blindly warn about missing spaces before [.
+ # In the the future, when rules for computing exactly how many spaces
+ # lines should be indented are added, then we can return errors for
+ # [ tokens that are improperly indented.
+ # For example:
+ # var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
+ # [a,b,c];
+ # should trigger a proper indentation warning message as [ is not indented
+ # by four spaces.
+ elif (not token.IsFirstInLine() and token.previous and
+ token.previous.type not in (
+ [Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
+ Type.EXPRESSION_ENDER_TYPES)):
+ self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
+ token, position=Position.AtBeginning())
+
+ def Finalize(self, state):
+ """Perform all checks that need to occur after all lines are processed.
+
+ Args:
+ state: State of the parser after parsing all tokens
- def Finalize(self, state, tokenizer_mode):
+ Raises:
+ TypeError: If not overridden.
+ """
last_non_space_token = state.GetLastNonSpaceToken()
# Check last line for ending with newline.
- if state.GetLastLine() and not (state.GetLastLine().isspace() or
+ if state.GetLastLine() and not (
+ state.GetLastLine().isspace() or
state.GetLastLine().rstrip('\n\r\f') != state.GetLastLine()):
self._HandleError(
errors.FILE_MISSING_NEWLINE,
'File does not end with new line. (%s)' % state.GetLastLine(),
last_non_space_token)
- # Check that the mode is not mid comment, argument list, etc.
- if not tokenizer_mode == Modes.TEXT_MODE:
- self._HandleError(
- errors.FILE_IN_BLOCK,
- 'File ended in mode "%s".' % tokenizer_mode,
- last_non_space_token)
-
try:
self._indentation.Finalize()
except Exception, e:
@@ -748,5 +832,13 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
last_non_space_token)
def GetLongLineExceptions(self):
- """Gets a list of regexps for lines which can be longer than the limit."""
+ """Gets a list of regexps for lines which can be longer than the limit.
+
+ Returns:
+ A list of regexps, used as matches (rather than searches).
+ """
return []
+
+ def InExplicitlyTypedLanguage(self):
+ """Returns whether this ecma implementation is explicitly typed."""
+ return False
diff --git a/tools/closure_linter/closure_linter/ecmametadatapass.py b/tools/closure_linter/closure_linter/ecmametadatapass.py
index 2c797b3c39..50621610ef 100755
--- a/tools/closure_linter/closure_linter/ecmametadatapass.py
+++ b/tools/closure_linter/closure_linter/ecmametadatapass.py
@@ -115,18 +115,30 @@ class EcmaContext(object):
BLOCK_TYPES = frozenset([
ROOT, BLOCK, CASE_BLOCK, FOR_GROUP_BLOCK, IMPLIED_BLOCK])
- def __init__(self, type, start_token, parent):
+ def __init__(self, context_type, start_token, parent=None):
"""Initializes the context object.
Args:
+ context_type: The context type.
+ start_token: The token where this context starts.
+ parent: The parent context.
+
+ Attributes:
type: The context type.
start_token: The token where this context starts.
+ end_token: The token where this context ends.
parent: The parent context.
+ children: The child contexts of this context, in order.
"""
- self.type = type
+ self.type = context_type
self.start_token = start_token
self.end_token = None
- self.parent = parent
+
+ self.parent = None
+ self.children = []
+
+ if parent:
+ parent.AddChild(self)
def __repr__(self):
"""Returns a string representation of the context object."""
@@ -137,6 +149,32 @@ class EcmaContext(object):
context = context.parent
return 'Context(%s)' % ' > '.join(stack)
+ def AddChild(self, child):
+ """Adds a child to this context and sets child's parent to this context.
+
+ Args:
+ child: A child EcmaContext. The child's parent will be set to this
+ context.
+ """
+
+ child.parent = self
+
+ self.children.append(child)
+ self.children.sort(EcmaContext._CompareContexts)
+
+ def GetRoot(self):
+ """Get the root context that contains this context, if any."""
+ context = self
+ while context:
+ if context.type is EcmaContext.ROOT:
+ return context
+ context = context.parent
+
+ @staticmethod
+ def _CompareContexts(context1, context2):
+ """Sorts contexts 1 and 2 by start token document position."""
+ return tokenutil.Compare(context1.start_token, context2.start_token)
+
class EcmaMetaData(object):
"""Token metadata for EcmaScript languages.
@@ -146,6 +184,11 @@ class EcmaMetaData(object):
context: The context this token appears in.
operator_type: The operator type, will be one of the *_OPERATOR constants
defined below.
+ aliased_symbol: The full symbol being identified, as a string (e.g. an
+ 'XhrIo' alias for 'goog.net.XhrIo'). Only applicable to identifier
+ tokens. This is set in aliaspass.py and is a best guess.
+ is_alias_definition: True if the symbol is part of an alias definition.
+ If so, these symbols won't be counted towards goog.requires/provides.
"""
UNARY_OPERATOR = 'unary'
@@ -164,6 +207,8 @@ class EcmaMetaData(object):
self.is_implied_semicolon = False
self.is_implied_block = False
self.is_implied_block_close = False
+ self.aliased_symbol = None
+ self.is_alias_definition = False
def __repr__(self):
"""Returns a string representation of the context object."""
@@ -172,6 +217,8 @@ class EcmaMetaData(object):
parts.append('optype: %r' % self.operator_type)
if self.is_implied_semicolon:
parts.append('implied;')
+ if self.aliased_symbol:
+ parts.append('alias for: %s' % self.aliased_symbol)
return 'MetaData(%s)' % ', '.join(parts)
def IsUnaryOperator(self):
@@ -196,21 +243,21 @@ class EcmaMetaDataPass(object):
self._AddContext(EcmaContext.ROOT)
self._last_code = None
- def _CreateContext(self, type):
+ def _CreateContext(self, context_type):
"""Overridable by subclasses to create the appropriate context type."""
- return EcmaContext(type, self._token, self._context)
+ return EcmaContext(context_type, self._token, self._context)
def _CreateMetaData(self):
"""Overridable by subclasses to create the appropriate metadata type."""
return EcmaMetaData()
- def _AddContext(self, type):
+ def _AddContext(self, context_type):
"""Adds a context of the given type to the context stack.
Args:
- type: The type of context to create
+ context_type: The type of context to create
"""
- self._context = self._CreateContext(type)
+ self._context = self._CreateContext(context_type)
def _PopContext(self):
"""Moves up one level in the context stack.
@@ -233,7 +280,7 @@ class EcmaMetaDataPass(object):
"""Pops the context stack until a context of the given type is popped.
Args:
- stop_types: The types of context to pop to - stops at the first match.
+ *stop_types: The types of context to pop to - stops at the first match.
Returns:
The context object of the given type that was popped.
@@ -364,10 +411,14 @@ class EcmaMetaDataPass(object):
self._AddContext(EcmaContext.SWITCH)
elif (token_type == TokenType.KEYWORD and
- token.string in ('case', 'default')):
+ token.string in ('case', 'default') and
+ self._context.type != EcmaContext.OBJECT_LITERAL):
# Pop up to but not including the switch block.
while self._context.parent.type != EcmaContext.SWITCH:
self._PopContext()
+ if self._context.parent is None:
+ raise ParseError(token, 'Encountered case/default statement '
+ 'without switch statement')
elif token.IsOperator('?'):
self._AddContext(EcmaContext.TERNARY_TRUE)
@@ -386,9 +437,9 @@ class EcmaMetaDataPass(object):
# ternary_false > ternary_true > statement > root
elif (self._context.type == EcmaContext.TERNARY_FALSE and
self._context.parent.type == EcmaContext.TERNARY_TRUE):
- self._PopContext() # Leave current ternary false context.
- self._PopContext() # Leave current parent ternary true
- self._AddContext(EcmaContext.TERNARY_FALSE)
+ self._PopContext() # Leave current ternary false context.
+ self._PopContext() # Leave current parent ternary true
+ self._AddContext(EcmaContext.TERNARY_FALSE)
elif self._context.parent.type == EcmaContext.SWITCH:
self._AddContext(EcmaContext.CASE_BLOCK)
@@ -444,25 +495,27 @@ class EcmaMetaDataPass(object):
is_implied_block = self._context == EcmaContext.IMPLIED_BLOCK
is_last_code_in_line = token.IsCode() and (
not next_code or next_code.line_number != token.line_number)
- is_continued_identifier = (token.type == TokenType.IDENTIFIER and
- token.string.endswith('.'))
is_continued_operator = (token.type == TokenType.OPERATOR and
not token.metadata.IsUnaryPostOperator())
is_continued_dot = token.string == '.'
next_code_is_operator = next_code and next_code.type == TokenType.OPERATOR
- next_code_is_dot = next_code and next_code.string == '.'
- is_end_of_block = (token.type == TokenType.END_BLOCK and
+ is_end_of_block = (
+ token.type == TokenType.END_BLOCK and
token.metadata.context.type != EcmaContext.OBJECT_LITERAL)
is_multiline_string = token.type == TokenType.STRING_TEXT
+ is_continued_var_decl = (token.IsKeyword('var') and
+ next_code and
+ (next_code.type in [TokenType.IDENTIFIER,
+ TokenType.SIMPLE_LVALUE]) and
+ token.line_number < next_code.line_number)
next_code_is_block = next_code and next_code.type == TokenType.START_BLOCK
if (is_last_code_in_line and
self._StatementCouldEndInContext() and
not is_multiline_string and
not is_end_of_block and
- not is_continued_identifier and
+ not is_continued_var_decl and
not is_continued_operator and
not is_continued_dot and
- not next_code_is_dot and
not next_code_is_operator and
not is_implied_block and
not next_code_is_block):
@@ -470,7 +523,7 @@ class EcmaMetaDataPass(object):
self._EndStatement()
def _StatementCouldEndInContext(self):
- """Returns whether the current statement (if any) may end in this context."""
+ """Returns if the current statement (if any) may end in this context."""
# In the basic statement or variable declaration context, statement can
# always end in this context.
if self._context.type in (EcmaContext.STATEMENT, EcmaContext.VAR):
diff --git a/tools/closure_linter/closure_linter/error_check.py b/tools/closure_linter/closure_linter/error_check.py
new file mode 100755
index 0000000000..8d657fe917
--- /dev/null
+++ b/tools/closure_linter/closure_linter/error_check.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Specific JSLint errors checker."""
+
+
+
+import gflags as flags
+
+FLAGS = flags.FLAGS
+
+
+class Rule(object):
+ """Different rules to check."""
+
+ # Documentations for specific rules goes in flag definition.
+ BLANK_LINES_AT_TOP_LEVEL = 'blank_lines_at_top_level'
+ INDENTATION = 'indentation'
+ WELL_FORMED_AUTHOR = 'well_formed_author'
+ NO_BRACES_AROUND_INHERIT_DOC = 'no_braces_around_inherit_doc'
+ BRACES_AROUND_TYPE = 'braces_around_type'
+ OPTIONAL_TYPE_MARKER = 'optional_type_marker'
+ VARIABLE_ARG_MARKER = 'variable_arg_marker'
+ UNUSED_PRIVATE_MEMBERS = 'unused_private_members'
+ UNUSED_LOCAL_VARIABLES = 'unused_local_variables'
+
+ # Rule to raise all known errors.
+ ALL = 'all'
+
+ # All rules that are to be checked when using the strict flag. E.g. the rules
+ # that are specific to the stricter Closure style.
+ CLOSURE_RULES = frozenset([BLANK_LINES_AT_TOP_LEVEL,
+ INDENTATION,
+ WELL_FORMED_AUTHOR,
+ NO_BRACES_AROUND_INHERIT_DOC,
+ BRACES_AROUND_TYPE,
+ OPTIONAL_TYPE_MARKER,
+ VARIABLE_ARG_MARKER])
+
+
+flags.DEFINE_boolean('strict', False,
+ 'Whether to validate against the stricter Closure style. '
+ 'This includes ' + (', '.join(Rule.CLOSURE_RULES)) + '.')
+flags.DEFINE_multistring('jslint_error', [],
+ 'List of specific lint errors to check. Here is a list'
+ ' of accepted values:\n'
+ ' - ' + Rule.ALL + ': enables all following errors.\n'
+ ' - ' + Rule.BLANK_LINES_AT_TOP_LEVEL + ': validates'
+ 'number of blank lines between blocks at top level.\n'
+ ' - ' + Rule.INDENTATION + ': checks correct '
+ 'indentation of code.\n'
+ ' - ' + Rule.WELL_FORMED_AUTHOR + ': validates the '
+ '@author JsDoc tags.\n'
+ ' - ' + Rule.NO_BRACES_AROUND_INHERIT_DOC + ': '
+ 'forbids braces around @inheritdoc JsDoc tags.\n'
+ ' - ' + Rule.BRACES_AROUND_TYPE + ': enforces braces '
+ 'around types in JsDoc tags.\n'
+ ' - ' + Rule.OPTIONAL_TYPE_MARKER + ': checks correct '
+ 'use of optional marker = in param types.\n'
+ ' - ' + Rule.UNUSED_PRIVATE_MEMBERS + ': checks for '
+ 'unused private variables.\n'
+ ' - ' + Rule.UNUSED_LOCAL_VARIABLES + ': checks for '
+ 'unused local variables.\n')
+
+
+def ShouldCheck(rule):
+ """Returns whether the optional rule should be checked.
+
+ Computes different flags (strict, jslint_error, jslint_noerror) to find out if
+ this specific rule should be checked.
+
+ Args:
+ rule: Name of the rule (see Rule).
+
+ Returns:
+ True if the rule should be checked according to the flags, otherwise False.
+ """
+ if rule in FLAGS.jslint_error or Rule.ALL in FLAGS.jslint_error:
+ return True
+ # Checks strict rules.
+ return FLAGS.strict and rule in Rule.CLOSURE_RULES
diff --git a/tools/closure_linter/closure_linter/error_fixer.py b/tools/closure_linter/closure_linter/error_fixer.py
index 904cf86605..88f9c720ab 100755
--- a/tools/closure_linter/closure_linter/error_fixer.py
+++ b/tools/closure_linter/closure_linter/error_fixer.py
@@ -16,6 +16,9 @@
"""Main class responsible for automatically fixing simple style violations."""
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
__author__ = 'robbyw@google.com (Robert Walker)'
import re
@@ -24,6 +27,7 @@ import gflags as flags
from closure_linter import errors
from closure_linter import javascriptstatetracker
from closure_linter import javascripttokens
+from closure_linter import requireprovidesorter
from closure_linter import tokenutil
from closure_linter.common import errorhandler
@@ -33,24 +37,46 @@ Type = javascripttokens.JavaScriptTokenType
END_OF_FLAG_TYPE = re.compile(r'(}?\s*)$')
+# Regex to represent common mistake inverting author name and email as
+# @author User Name (user@company)
+INVERTED_AUTHOR_SPEC = re.compile(r'(?P<leading_whitespace>\s*)'
+ r'(?P<name>[^(]+)'
+ r'(?P<whitespace_after_name>\s+)'
+ r'\('
+ r'(?P<email>[^\s]+@[^)\s]+)'
+ r'\)'
+ r'(?P<trailing_characters>.*)')
+
FLAGS = flags.FLAGS
flags.DEFINE_boolean('disable_indentation_fixing', False,
'Whether to disable automatic fixing of indentation.')
+flags.DEFINE_list('fix_error_codes', [], 'A list of specific error codes to '
+ 'fix. Defaults to all supported error codes when empty. '
+ 'See errors.py for a list of error codes.')
+
class ErrorFixer(errorhandler.ErrorHandler):
"""Object that fixes simple style errors."""
- def __init__(self, external_file = None):
+ def __init__(self, external_file=None):
"""Initialize the error fixer.
Args:
external_file: If included, all output will be directed to this file
instead of overwriting the files the errors are found in.
"""
+ errorhandler.ErrorHandler.__init__(self)
+
self._file_name = None
self._file_token = None
self._external_file = external_file
+ try:
+ self._fix_error_codes = set([errors.ByName(error.upper()) for error in
+ FLAGS.fix_error_codes])
+ except KeyError as ke:
+ raise ValueError('Unknown error code ' + ke.args[0])
+
def HandleFile(self, filename, first_token):
"""Notifies this ErrorPrinter that subsequent errors are in filename.
@@ -59,6 +85,7 @@ class ErrorFixer(errorhandler.ErrorHandler):
first_token: The first token in the file.
"""
self._file_name = filename
+ self._file_is_html = filename.endswith('.html') or filename.endswith('.htm')
self._file_token = first_token
self._file_fix_count = 0
self._file_changed_lines = set()
@@ -76,6 +103,44 @@ class ErrorFixer(errorhandler.ErrorHandler):
for token in tokens:
self._file_changed_lines.add(token.line_number)
+ def _FixJsDocPipeNull(self, js_type):
+ """Change number|null or null|number to ?number.
+
+ Args:
+ js_type: The typeannotation.TypeAnnotation instance to fix.
+ """
+
+ # Recurse into all sub_types if the error was at a deeper level.
+ map(self._FixJsDocPipeNull, js_type.IterTypes())
+
+ if js_type.type_group and len(js_type.sub_types) == 2:
+ # Find and remove the null sub_type:
+ sub_type = None
+ for sub_type in js_type.sub_types:
+ if sub_type.identifier == 'null':
+ map(tokenutil.DeleteToken, sub_type.tokens)
+ self._AddFix(sub_type.tokens)
+ break
+ else:
+ return
+
+ first_token = js_type.FirstToken()
+ question_mark = Token('?', Type.DOC_TYPE_MODIFIER, first_token.line,
+ first_token.line_number)
+ tokenutil.InsertTokenBefore(question_mark, first_token)
+ js_type.tokens.insert(0, question_mark)
+ js_type.tokens.remove(sub_type)
+ js_type.or_null = True
+
+ # Now also remove the separator, which is in the parent's token list,
+ # either before or after the sub_type, there is exactly one. Scan for it.
+ for token in js_type.tokens:
+ if (token and isinstance(token, Token) and
+ token.type == Type.DOC_TYPE_MODIFIER and token.string == '|'):
+ tokenutil.DeleteToken(token)
+ self._AddFix(token)
+ break
+
def HandleError(self, error):
"""Attempts to fix the error.
@@ -85,20 +150,33 @@ class ErrorFixer(errorhandler.ErrorHandler):
code = error.code
token = error.token
+ if self._fix_error_codes and code not in self._fix_error_codes:
+ return
+
if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL:
+ self._FixJsDocPipeNull(token.attached_object.jstype)
+
+ elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE:
+ iterator = token.attached_object.type_end_token
+ if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace():
+ iterator = iterator.previous
+
+ ending_space = len(iterator.string) - len(iterator.string.rstrip())
+ iterator.string = '%s=%s' % (iterator.string.rstrip(),
+ ' ' * ending_space)
+
+ # Create a new flag object with updated type info.
+ token.attached_object = javascriptstatetracker.JsDocFlag(token)
+ self._AddFix(token)
+
+ elif code == errors.JSDOC_MISSING_VAR_ARGS_TYPE:
iterator = token.attached_object.type_start_token
if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace():
iterator = iterator.next
- leading_space = len(iterator.string) - len(iterator.string.lstrip())
- iterator.string = '%s?%s' % (' ' * leading_space,
- iterator.string.lstrip())
-
- # Cover the no outer brace case where the end token is part of the type.
- while iterator and iterator != token.attached_object.type_end_token.next:
- iterator.string = iterator.string.replace(
- 'null|', '').replace('|null', '')
- iterator = iterator.next
+ starting_space = len(iterator.string) - len(iterator.string.lstrip())
+ iterator.string = '%s...%s' % (' ' * starting_space,
+ iterator.string.lstrip())
# Create a new flag object with updated type info.
token.attached_object = javascriptstatetracker.JsDocFlag(token)
@@ -116,7 +194,7 @@ class ErrorFixer(errorhandler.ErrorHandler):
elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
errors.REDUNDANT_SEMICOLON,
errors.COMMA_AT_END_OF_LITERAL):
- tokenutil.DeleteToken(token)
+ self._DeleteToken(token)
self._AddFix(token)
elif code == errors.INVALID_JSDOC_TAG:
@@ -129,7 +207,10 @@ class ErrorFixer(errorhandler.ErrorHandler):
self._AddFix(token)
elif code == errors.MISSING_SPACE:
- if error.position:
+ if error.fix_data:
+ token.string = error.fix_data
+ self._AddFix(token)
+ elif error.position:
if error.position.IsAtBeginning():
tokenutil.InsertSpaceTokenAfter(token.previous)
elif error.position.IsAtEnd(token.string):
@@ -143,19 +224,15 @@ class ErrorFixer(errorhandler.ErrorHandler):
token.string = error.position.Set(token.string, '')
self._AddFix(token)
- elif code == errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER:
- token.string = error.position.Set(token.string, '.')
- self._AddFix(token)
-
elif code == errors.MISSING_LINE:
if error.position.IsAtBeginning():
- tokenutil.InsertLineAfter(token.previous)
+ tokenutil.InsertBlankLineAfter(token.previous)
else:
- tokenutil.InsertLineAfter(token)
+ tokenutil.InsertBlankLineAfter(token)
self._AddFix(token)
elif code == errors.EXTRA_LINE:
- tokenutil.DeleteToken(token)
+ self._DeleteToken(token)
self._AddFix(token)
elif code == errors.WRONG_BLANK_LINE_COUNT:
@@ -167,29 +244,30 @@ class ErrorFixer(errorhandler.ErrorHandler):
should_delete = False
if num_lines < 0:
- num_lines = num_lines * -1
+ num_lines *= -1
should_delete = True
- for i in xrange(1, num_lines + 1):
+ for unused_i in xrange(1, num_lines + 1):
if should_delete:
# TODO(user): DeleteToken should update line numbers.
- tokenutil.DeleteToken(token.previous)
+ self._DeleteToken(token.previous)
else:
- tokenutil.InsertLineAfter(token.previous)
+ tokenutil.InsertBlankLineAfter(token.previous)
self._AddFix(token)
elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING:
end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END)
if end_quote:
- single_quote_start = Token("'", Type.SINGLE_QUOTE_STRING_START,
- token.line, token.line_number)
- single_quote_end = Token("'", Type.SINGLE_QUOTE_STRING_START,
- end_quote.line, token.line_number)
+ single_quote_start = Token(
+ "'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number)
+ single_quote_end = Token(
+ "'", Type.SINGLE_QUOTE_STRING_START, end_quote.line,
+ token.line_number)
tokenutil.InsertTokenAfter(single_quote_start, token)
tokenutil.InsertTokenAfter(single_quote_end, end_quote)
- tokenutil.DeleteToken(token)
- tokenutil.DeleteToken(end_quote)
+ self._DeleteToken(token)
+ self._DeleteToken(end_quote)
self._AddFix([token, end_quote])
elif code == errors.MISSING_BRACES_AROUND_TYPE:
@@ -197,15 +275,15 @@ class ErrorFixer(errorhandler.ErrorHandler):
start_token = token.attached_object.type_start_token
if start_token.type != Type.DOC_START_BRACE:
- leading_space = (len(start_token.string) -
- len(start_token.string.lstrip()))
+ leading_space = (
+ len(start_token.string) - len(start_token.string.lstrip()))
if leading_space:
start_token = tokenutil.SplitToken(start_token, leading_space)
# Fix case where start and end token were the same.
if token.attached_object.type_end_token == start_token.previous:
token.attached_object.type_end_token = start_token
- new_token = Token("{", Type.DOC_START_BRACE, start_token.line,
+ new_token = Token('{', Type.DOC_START_BRACE, start_token.line,
start_token.line_number)
tokenutil.InsertTokenAfter(new_token, start_token.previous)
token.attached_object.type_start_token = new_token
@@ -217,7 +295,7 @@ class ErrorFixer(errorhandler.ErrorHandler):
# FLAG_ENDING_TYPE token, if there wasn't a starting brace then
# the end token is the last token of the actual type.
last_type = end_token
- if not len(fixed_tokens):
+ if not fixed_tokens:
last_type = end_token.previous
while last_type.string.isspace():
@@ -233,7 +311,7 @@ class ErrorFixer(errorhandler.ErrorHandler):
tokenutil.SplitToken(last_type,
len(last_type.string) - trailing_space)
- new_token = Token("}", Type.DOC_END_BRACE, last_type.line,
+ new_token = Token('}', Type.DOC_END_BRACE, last_type.line,
last_type.line_number)
tokenutil.InsertTokenAfter(new_token, last_type)
token.attached_object.type_end_token = new_token
@@ -241,35 +319,86 @@ class ErrorFixer(errorhandler.ErrorHandler):
self._AddFix(fixed_tokens)
- elif code in (errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
- errors.GOOG_PROVIDES_NOT_ALPHABETIZED):
- tokens = error.fix_data
- strings = map(lambda x: x.string, tokens)
- sorted_strings = sorted(strings)
+ elif code == errors.LINE_STARTS_WITH_OPERATOR:
+ # Remove whitespace following the operator so the line starts clean.
+ self._StripSpace(token, before=False)
- index = 0
- changed_tokens = []
- for token in tokens:
- if token.string != sorted_strings[index]:
- token.string = sorted_strings[index]
- changed_tokens.append(token)
- index += 1
+ # Remove the operator.
+ tokenutil.DeleteToken(token)
+ self._AddFix(token)
+
+ insertion_point = tokenutil.GetPreviousCodeToken(token)
- self._AddFix(changed_tokens)
+ # Insert a space between the previous token and the new operator.
+ space = Token(' ', Type.WHITESPACE, insertion_point.line,
+ insertion_point.line_number)
+ tokenutil.InsertTokenAfter(space, insertion_point)
+
+ # Insert the operator on the end of the previous line.
+ new_token = Token(token.string, token.type, insertion_point.line,
+ insertion_point.line_number)
+ tokenutil.InsertTokenAfter(new_token, space)
+ self._AddFix(new_token)
+
+ elif code == errors.LINE_ENDS_WITH_DOT:
+ # Remove whitespace preceding the operator to remove trailing whitespace.
+ self._StripSpace(token, before=True)
+
+ # Remove the dot.
+ tokenutil.DeleteToken(token)
+ self._AddFix(token)
+
+ insertion_point = tokenutil.GetNextCodeToken(token)
+
+ # Insert the dot at the beginning of the next line of code.
+ new_token = Token(token.string, token.type, insertion_point.line,
+ insertion_point.line_number)
+ tokenutil.InsertTokenBefore(new_token, insertion_point)
+ self._AddFix(new_token)
+
+ elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED:
+ require_start_token = error.fix_data
+ sorter = requireprovidesorter.RequireProvideSorter()
+ sorter.FixRequires(require_start_token)
+
+ self._AddFix(require_start_token)
+
+ elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED:
+ provide_start_token = error.fix_data
+ sorter = requireprovidesorter.RequireProvideSorter()
+ sorter.FixProvides(provide_start_token)
+
+ self._AddFix(provide_start_token)
elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
if token.previous.string == '{' and token.next.string == '}':
- tokenutil.DeleteToken(token.previous)
- tokenutil.DeleteToken(token.next)
+ self._DeleteToken(token.previous)
+ self._DeleteToken(token.next)
self._AddFix([token])
+ elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION:
+ match = INVERTED_AUTHOR_SPEC.match(token.string)
+ if match:
+ token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'),
+ match.group('email'),
+ match.group('whitespace_after_name'),
+ match.group('name'),
+ match.group('trailing_characters'))
+ self._AddFix(token)
+
elif (code == errors.WRONG_INDENTATION and
- not FLAGS.disable_indentation_fixing):
+ not FLAGS.disable_indentation_fixing):
token = tokenutil.GetFirstTokenInSameLine(token)
actual = error.position.start
expected = error.position.length
- if token.type in (Type.WHITESPACE, Type.PARAMETERS):
+ # Cases where first token is param but with leading spaces.
+ if (len(token.string.lstrip()) == len(token.string) - actual and
+ token.string.lstrip()):
+ token.string = token.string.lstrip()
+ actual = 0
+
+ if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0:
token.string = token.string.lstrip() + (' ' * expected)
self._AddFix([token])
else:
@@ -282,52 +411,205 @@ class ErrorFixer(errorhandler.ErrorHandler):
tokenutil.InsertTokenAfter(new_token, token.previous)
self._AddFix([token])
- elif code == errors.EXTRA_GOOG_REQUIRE:
- fixed_tokens = []
- while token:
- if token.type == Type.IDENTIFIER:
- if token.string not in ['goog.require', 'goog.provide']:
- # Stop iterating over tokens once we're out of the requires and
- # provides.
- break
- if token.string == 'goog.require':
- # Text of form: goog.require('required'), skipping past open paren
- # and open quote to the string text.
- required = token.next.next.next.string
- if required in error.fix_data:
- fixed_tokens.append(token)
- # Want to delete: goog.require + open paren + open single-quote +
- # text + close single-quote + close paren + semi-colon = 7.
- tokenutil.DeleteTokens(token, 7)
- token = token.next
+ elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT,
+ errors.MISSING_END_OF_SCOPE_COMMENT]:
+ # Only fix cases where }); is found with no trailing content on the line
+ # other than a comment. Value of 'token' is set to } for this error.
+ if (token.type == Type.END_BLOCK and
+ token.next.type == Type.END_PAREN and
+ token.next.next.type == Type.SEMICOLON):
+ current_token = token.next.next.next
+ removed_tokens = []
+ while current_token and current_token.line_number == token.line_number:
+ if current_token.IsAnyType(Type.WHITESPACE,
+ Type.START_SINGLE_LINE_COMMENT,
+ Type.COMMENT):
+ removed_tokens.append(current_token)
+ current_token = current_token.next
+ else:
+ return
+
+ if removed_tokens:
+ self._DeleteTokens(removed_tokens[0], len(removed_tokens))
+
+ whitespace_token = Token(' ', Type.WHITESPACE, token.line,
+ token.line_number)
+ start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT,
+ token.line, token.line_number)
+ comment_token = Token(' goog.scope', Type.COMMENT, token.line,
+ token.line_number)
+ insertion_tokens = [whitespace_token, start_comment_token,
+ comment_token]
+
+ tokenutil.InsertTokensAfter(insertion_tokens, token.next.next)
+ self._AddFix(removed_tokens + insertion_tokens)
+
+ elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]:
+ tokens_in_line = tokenutil.GetAllTokensInSameLine(token)
+ num_delete_tokens = len(tokens_in_line)
+ # If line being deleted is preceded and succeed with blank lines then
+ # delete one blank line also.
+ if (tokens_in_line[0].previous and tokens_in_line[-1].next
+ and tokens_in_line[0].previous.type == Type.BLANK_LINE
+ and tokens_in_line[-1].next.type == Type.BLANK_LINE):
+ num_delete_tokens += 1
+ self._DeleteTokens(tokens_in_line[0], num_delete_tokens)
+ self._AddFix(tokens_in_line)
+
+ elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]:
+ missing_namespaces = error.fix_data[0]
+ need_blank_line = error.fix_data[1] or (not token.previous)
+
+ insert_location = Token('', Type.NORMAL, '', token.line_number - 1)
+ dummy_first_token = insert_location
+ tokenutil.InsertTokenBefore(insert_location, token)
+
+ # If inserting a blank line check blank line does not exist before
+ # token to avoid extra blank lines.
+ if (need_blank_line and insert_location.previous
+ and insert_location.previous.type != Type.BLANK_LINE):
+ tokenutil.InsertBlankLineAfter(insert_location)
+ insert_location = insert_location.next
+
+ for missing_namespace in missing_namespaces:
+ new_tokens = self._GetNewRequireOrProvideTokens(
+ code == errors.MISSING_GOOG_PROVIDE,
+ missing_namespace, insert_location.line_number + 1)
+ tokenutil.InsertLineAfter(insert_location, new_tokens)
+ insert_location = new_tokens[-1]
+ self._AddFix(new_tokens)
+
+ # If inserting a blank line check blank line does not exist after
+ # token to avoid extra blank lines.
+ if (need_blank_line and insert_location.next
+ and insert_location.next.type != Type.BLANK_LINE):
+ tokenutil.InsertBlankLineAfter(insert_location)
+
+ tokenutil.DeleteToken(dummy_first_token)
+
+ def _StripSpace(self, token, before):
+ """Strip whitespace tokens either preceding or following the given token.
- self._AddFix(fixed_tokens)
+ Args:
+ token: The token.
+ before: If true, strip space before the token, if false, after it.
+ """
+ token = token.previous if before else token.next
+ while token and token.type == Type.WHITESPACE:
+ tokenutil.DeleteToken(token)
+ token = token.previous if before else token.next
+
+ def _GetNewRequireOrProvideTokens(self, is_provide, namespace, line_number):
+ """Returns a list of tokens to create a goog.require/provide statement.
+
+ Args:
+ is_provide: True if getting tokens for a provide, False for require.
+ namespace: The required or provided namespaces to get tokens for.
+ line_number: The line number the new require or provide statement will be
+ on.
+
+ Returns:
+ Tokens to create a new goog.require or goog.provide statement.
+ """
+ string = 'goog.require'
+ if is_provide:
+ string = 'goog.provide'
+ line_text = string + '(\'' + namespace + '\');\n'
+ return [
+ Token(string, Type.IDENTIFIER, line_text, line_number),
+ Token('(', Type.START_PAREN, line_text, line_number),
+ Token('\'', Type.SINGLE_QUOTE_STRING_START, line_text, line_number),
+ Token(namespace, Type.STRING_TEXT, line_text, line_number),
+ Token('\'', Type.SINGLE_QUOTE_STRING_END, line_text, line_number),
+ Token(')', Type.END_PAREN, line_text, line_number),
+ Token(';', Type.SEMICOLON, line_text, line_number)
+ ]
+
+ def _DeleteToken(self, token):
+ """Deletes the specified token from the linked list of tokens.
+
+ Updates instance variables pointing to tokens such as _file_token if
+ they reference the deleted token.
+
+ Args:
+ token: The token to delete.
+ """
+ if token == self._file_token:
+ self._file_token = token.next
+
+ tokenutil.DeleteToken(token)
+
+ def _DeleteTokens(self, token, token_count):
+ """Deletes the given number of tokens starting with the given token.
+
+ Updates instance variables pointing to tokens such as _file_token if
+ they reference the deleted token.
+
+ Args:
+ token: The first token to delete.
+ token_count: The total number of tokens to delete.
+ """
+ if token == self._file_token:
+ for unused_i in xrange(token_count):
+ self._file_token = self._file_token.next
+
+ tokenutil.DeleteTokens(token, token_count)
def FinishFile(self):
"""Called when the current file has finished style checking.
- Used to go back and fix any errors in the file.
+ Used to go back and fix any errors in the file. It currently supports both
+ js and html files. For js files it does a simple dump of all tokens, but in
+ order to support html file, we need to merge the original file with the new
+ token set back together. This works because the tokenized html file is the
+ original html file with all non js lines kept but blanked out with one blank
+ line token per line of html.
"""
if self._file_fix_count:
+ # Get the original file content for html.
+ if self._file_is_html:
+ f = open(self._file_name, 'r')
+ original_lines = f.readlines()
+ f.close()
+
f = self._external_file
if not f:
- print "Fixed %d errors in %s" % (self._file_fix_count, self._file_name)
+ error_noun = 'error' if self._file_fix_count == 1 else 'errors'
+ print 'Fixed %d %s in %s' % (
+ self._file_fix_count, error_noun, self._file_name)
f = open(self._file_name, 'w')
token = self._file_token
+ # Finding the first not deleted token.
+ while token.is_deleted:
+ token = token.next
+ # If something got inserted before first token (e.g. due to sorting)
+ # then move to start. Bug 8398202.
+ while token.previous:
+ token = token.previous
char_count = 0
+ line = ''
while token:
- f.write(token.string)
+ line += token.string
char_count += len(token.string)
if token.IsLastInLine():
- f.write('\n')
+ # We distinguish if a blank line in html was from stripped original
+ # file or newly added error fix by looking at the "org_line_number"
+ # field on the token. It is only set in the tokenizer, so for all
+ # error fixes, the value should be None.
+ if (line or not self._file_is_html or
+ token.orig_line_number is None):
+ f.write(line)
+ f.write('\n')
+ else:
+ f.write(original_lines[token.orig_line_number - 1])
+ line = ''
if char_count > 80 and token.line_number in self._file_changed_lines:
- print "WARNING: Line %d of %s is now longer than 80 characters." % (
+ print 'WARNING: Line %d of %s is now longer than 80 characters.' % (
token.line_number, self._file_name)
char_count = 0
- self._file_changed_lines
token = token.next
diff --git a/tools/closure_linter/closure_linter/error_fixer_test.py b/tools/closure_linter/closure_linter/error_fixer_test.py
new file mode 100644
index 0000000000..49f449de42
--- /dev/null
+++ b/tools/closure_linter/closure_linter/error_fixer_test.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the error_fixer module."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+
+
+import unittest as googletest
+from closure_linter import error_fixer
+from closure_linter import testutil
+
+
+class ErrorFixerTest(googletest.TestCase):
+ """Unit tests for error_fixer."""
+
+ def setUp(self):
+ self.error_fixer = error_fixer.ErrorFixer()
+
+ def testDeleteToken(self):
+ start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
+ second_token = start_token.next
+ self.error_fixer.HandleFile('test_file', start_token)
+
+ self.error_fixer._DeleteToken(start_token)
+
+ self.assertEqual(second_token, self.error_fixer._file_token)
+
+ def testDeleteTokens(self):
+ start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
+ fourth_token = start_token.next.next.next
+ self.error_fixer.HandleFile('test_file', start_token)
+
+ self.error_fixer._DeleteTokens(start_token, 3)
+
+ self.assertEqual(fourth_token, self.error_fixer._file_token)
+
+_TEST_SCRIPT = """\
+var x = 3;
+"""
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/closure_linter/errorrecord.py b/tools/closure_linter/closure_linter/errorrecord.py
new file mode 100644
index 0000000000..ce9fb908c7
--- /dev/null
+++ b/tools/closure_linter/closure_linter/errorrecord.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""A simple, pickle-serializable class to represent a lint error."""
+
+__author__ = 'nnaze@google.com (Nathan Naze)'
+
+import gflags as flags
+
+from closure_linter import errors
+from closure_linter.common import erroroutput
+
+FLAGS = flags.FLAGS
+
+
+class ErrorRecord(object):
+ """Record-keeping struct that can be serialized back from a process.
+
+ Attributes:
+ path: Path to the file.
+ error_string: Error string for the user.
+ new_error: Whether this is a "new error" (see errors.NEW_ERRORS).
+ """
+
+ def __init__(self, path, error_string, new_error):
+ self.path = path
+ self.error_string = error_string
+ self.new_error = new_error
+
+
+def MakeErrorRecord(path, error):
+ """Make an error record with correctly formatted error string.
+
+ Errors are not able to be serialized (pickled) over processes because of
+ their pointers to the complex token/context graph. We use an intermediary
+ serializable class to pass back just the relevant information.
+
+ Args:
+ path: Path of file the error was found in.
+ error: An error.Error instance.
+
+ Returns:
+ _ErrorRecord instance.
+ """
+ new_error = error.code in errors.NEW_ERRORS
+
+ if FLAGS.unix_mode:
+ error_string = erroroutput.GetUnixErrorOutput(
+ path, error, new_error=new_error)
+ else:
+ error_string = erroroutput.GetErrorOutput(error, new_error=new_error)
+
+ return ErrorRecord(path, error_string, new_error)
diff --git a/tools/closure_linter/closure_linter/errorrules.py b/tools/closure_linter/closure_linter/errorrules.py
index afb6fa9606..b1b72aab6d 100755
--- a/tools/closure_linter/closure_linter/errorrules.py
+++ b/tools/closure_linter/closure_linter/errorrules.py
@@ -25,18 +25,48 @@ from closure_linter import errors
FLAGS = flags.FLAGS
flags.DEFINE_boolean('jsdoc', True,
'Whether to report errors for missing JsDoc.')
+flags.DEFINE_list('disable', None,
+ 'Disable specific error. Usage Ex.: gjslint --disable 1,'
+ '0011 foo.js.')
+flags.DEFINE_integer('max_line_length', 80, 'Maximum line length allowed '
+ 'without warning.', lower_bound=1)
+
+disabled_error_nums = None
+
+
+def GetMaxLineLength():
+ """Returns allowed maximum length of line.
+
+ Returns:
+ Length of line allowed without any warning.
+ """
+ return FLAGS.max_line_length
def ShouldReportError(error):
"""Whether the given error should be reported.
-
+
Returns:
- True for all errors except missing documentation errors. For these,
- it returns the value of the jsdoc flag.
+ True for all errors except missing documentation errors and disabled
+ errors. For missing documentation, it returns the value of the
+ jsdoc flag.
"""
- return FLAGS.jsdoc or error not in (
+ global disabled_error_nums
+ if disabled_error_nums is None:
+ disabled_error_nums = []
+ if FLAGS.disable:
+ for error_str in FLAGS.disable:
+ error_num = 0
+ try:
+ error_num = int(error_str)
+ except ValueError:
+ pass
+ disabled_error_nums.append(error_num)
+
+ return ((FLAGS.jsdoc or error not in (
errors.MISSING_PARAMETER_DOCUMENTATION,
errors.MISSING_RETURN_DOCUMENTATION,
errors.MISSING_MEMBER_DOCUMENTATION,
errors.MISSING_PRIVATE,
- errors.MISSING_JSDOC_TAG_THIS)
+ errors.MISSING_JSDOC_TAG_THIS)) and
+ (not FLAGS.disable or error not in disabled_error_nums))
diff --git a/tools/closure_linter/closure_linter/errorrules_test.py b/tools/closure_linter/closure_linter/errorrules_test.py
new file mode 100644
index 0000000000..cb903785e6
--- /dev/null
+++ b/tools/closure_linter/closure_linter/errorrules_test.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+# Copyright 2013 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Medium tests for the gjslint errorrules.
+
+Currently its just verifying that warnings can't be disabled.
+"""
+
+
+
+import gflags as flags
+import unittest as googletest
+
+from closure_linter import errors
+from closure_linter import runner
+from closure_linter.common import erroraccumulator
+
+flags.FLAGS.strict = True
+flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
+flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
+
+
+class ErrorRulesTest(googletest.TestCase):
+ """Test case to for gjslint errorrules."""
+
+ def testNoMaxLineLengthFlagExists(self):
+ """Tests that --max_line_length flag does not exists."""
+ self.assertTrue('max_line_length' not in flags.FLAGS.FlagDict())
+
+ def testGetMaxLineLength(self):
+ """Tests warning are reported for line greater than 80.
+ """
+
+ # One line > 100 and one line > 80 and < 100. So should produce two
+ # line too long error.
+ original = [
+ 'goog.require(\'dummy.aa\');',
+ '',
+ 'function a() {',
+ ' dummy.aa.i = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13'
+ ' + 14 + 15 + 16 + 17 + 18 + 19 + 20;',
+ ' dummy.aa.j = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13'
+ ' + 14 + 15 + 16 + 17 + 18;',
+ '}',
+ ''
+ ]
+
+ # Expect line too long.
+ expected = [errors.LINE_TOO_LONG, errors.LINE_TOO_LONG]
+
+ self._AssertErrors(original, expected)
+
+ def testNoDisableFlagExists(self):
+ """Tests that --disable flag does not exists."""
+ self.assertTrue('disable' not in flags.FLAGS.FlagDict())
+
+ def testWarningsNotDisabled(self):
+ """Tests warnings are reported when nothing is disabled.
+ """
+ original = [
+ 'goog.require(\'dummy.aa\');',
+ 'goog.require(\'dummy.Cc\');',
+ 'goog.require(\'dummy.Dd\');',
+ '',
+ 'function a() {',
+ ' dummy.aa.i = 1;',
+ ' dummy.Cc.i = 1;',
+ ' dummy.Dd.i = 1;',
+ '}',
+ ]
+
+ expected = [errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
+ errors.FILE_MISSING_NEWLINE]
+
+ self._AssertErrors(original, expected)
+
+ def _AssertErrors(self, original, expected_errors, include_header=True):
+ """Asserts that the error fixer corrects original to expected."""
+ if include_header:
+ original = self._GetHeader() + original
+
+ # Trap gjslint's output parse it to get messages added.
+ error_accumulator = erroraccumulator.ErrorAccumulator()
+ runner.Run('testing.js', error_accumulator, source=original)
+ error_nums = [e.code for e in error_accumulator.GetErrors()]
+
+ error_nums.sort()
+ expected_errors.sort()
+ self.assertListEqual(error_nums, expected_errors)
+
+ def _GetHeader(self):
+ """Returns a fake header for a JavaScript file."""
+ return [
+ '// Copyright 2011 Google Inc. All Rights Reserved.',
+ '',
+ '/**',
+ ' * @fileoverview Fake file overview.',
+ ' * @author fake@google.com (Fake Person)',
+ ' */',
+ ''
+ ]
+
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/closure_linter/errors.py b/tools/closure_linter/closure_linter/errors.py
index 7c86941f39..356ee0c5a6 100755
--- a/tools/closure_linter/closure_linter/errors.py
+++ b/tools/closure_linter/closure_linter/errors.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -19,6 +18,7 @@
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
+
def ByName(name):
"""Get the error code for the given error name.
@@ -55,8 +55,11 @@ ILLEGAL_PROTOTYPE_MEMBER_VALUE = 100
LINE_TOO_LONG = 110
LINE_STARTS_WITH_OPERATOR = 120
COMMA_AT_END_OF_LITERAL = 121
+LINE_ENDS_WITH_DOT = 122
MULTI_LINE_STRING = 130
UNNECESSARY_DOUBLE_QUOTED_STRING = 131
+UNUSED_PRIVATE_MEMBER = 132
+UNUSED_LOCAL_VARIABLE = 133
# Requires, provides
GOOG_REQUIRES_NOT_ALPHABETIZED = 140
@@ -64,6 +67,8 @@ GOOG_PROVIDES_NOT_ALPHABETIZED = 141
MISSING_GOOG_REQUIRE = 142
MISSING_GOOG_PROVIDE = 143
EXTRA_GOOG_REQUIRE = 144
+EXTRA_GOOG_PROVIDE = 145
+ALIAS_STMT_NEEDS_GOOG_REQUIRE = 146
# JsDoc
INVALID_JSDOC_TAG = 200
@@ -89,7 +94,11 @@ UNNECESSARY_BRACES_AROUND_INHERIT_DOC = 226
INVALID_AUTHOR_TAG_DESCRIPTION = 227
JSDOC_PREFER_QUESTION_TO_PIPE_NULL = 230
JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231
-JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER = 240
+JSDOC_MISSING_OPTIONAL_TYPE = 232
+JSDOC_MISSING_OPTIONAL_PREFIX = 233
+JSDOC_MISSING_VAR_ARGS_TYPE = 234
+JSDOC_MISSING_VAR_ARGS_NAME = 235
+JSDOC_DOES_NOT_PARSE = 236
# TODO(robbyw): Split this in to more specific syntax problems.
INCORRECT_SUPPRESS_SYNTAX = 250
INVALID_SUPPRESS_TYPE = 251
@@ -103,6 +112,15 @@ FILE_IN_BLOCK = 301
INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS = 400
INTERFACE_METHOD_CANNOT_HAVE_CODE = 401
+# Comments
+MISSING_END_OF_SCOPE_COMMENT = 500
+MALFORMED_END_OF_SCOPE_COMMENT = 501
+
+# goog.scope - Namespace aliasing
+# TODO(nnaze) Add additional errors here and in aliaspass.py
+INVALID_USE_OF_GOOG_SCOPE = 600
+EXTRA_GOOG_SCOPE_USAGE = 601
+
# ActionScript specific errors:
# TODO(user): move these errors to their own file and move all JavaScript
# specific errors to their own file as well.
@@ -125,7 +143,12 @@ NEW_ERRORS = frozenset([
# Errors added after 2.0.2:
WRONG_INDENTATION,
MISSING_SEMICOLON,
- # Errors added after 2.2.5:
- WRONG_BLANK_LINE_COUNT,
- EXTRA_GOOG_REQUIRE,
+ # Errors added after 2.3.9:
+ JSDOC_MISSING_VAR_ARGS_TYPE,
+ JSDOC_MISSING_VAR_ARGS_NAME,
+ # Errors added after 2.3.15:
+ ALIAS_STMT_NEEDS_GOOG_REQUIRE,
+ JSDOC_DOES_NOT_PARSE,
+ LINE_ENDS_WITH_DOT,
+ # Errors added after 2.3.17:
])
diff --git a/tools/closure_linter/closure_linter/fixjsstyle.py b/tools/closure_linter/closure_linter/fixjsstyle.py
index 8782e648e6..2d65e0398f 100755
--- a/tools/closure_linter/closure_linter/fixjsstyle.py
+++ b/tools/closure_linter/closure_linter/fixjsstyle.py
@@ -18,15 +18,23 @@
__author__ = 'robbyw@google.com (Robert Walker)'
+import StringIO
import sys
import gflags as flags
-from closure_linter import checker
+
from closure_linter import error_fixer
+from closure_linter import runner
from closure_linter.common import simplefileflags as fileflags
+FLAGS = flags.FLAGS
+flags.DEFINE_list('additional_extensions', None, 'List of additional file '
+ 'extensions (not js) that should be treated as '
+ 'JavaScript files.')
+flags.DEFINE_boolean('dry_run', False, 'Do not modify the file, only print it.')
+
-def main(argv = None):
+def main(argv=None):
"""Main function.
Args:
@@ -35,13 +43,24 @@ def main(argv = None):
if argv is None:
argv = flags.FLAGS(sys.argv)
- files = fileflags.GetFileList(argv, 'JavaScript', ['.js'])
+ suffixes = ['.js']
+ if FLAGS.additional_extensions:
+ suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
+
+ files = fileflags.GetFileList(argv, 'JavaScript', suffixes)
- style_checker = checker.JavaScriptStyleChecker(error_fixer.ErrorFixer())
+ output_buffer = None
+ if FLAGS.dry_run:
+ output_buffer = StringIO.StringIO()
+
+ fixer = error_fixer.ErrorFixer(output_buffer)
# Check the list of files.
for filename in files:
- style_checker.Check(filename)
+ runner.Run(filename, fixer)
+ if FLAGS.dry_run:
+ print output_buffer.getvalue()
+
if __name__ == '__main__':
main()
diff --git a/tools/closure_linter/closure_linter/fixjsstyle_test.py b/tools/closure_linter/closure_linter/fixjsstyle_test.py
index 42e9c59377..34de3f8488 100755
--- a/tools/closure_linter/closure_linter/fixjsstyle_test.py
+++ b/tools/closure_linter/closure_linter/fixjsstyle_test.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -22,8 +21,9 @@ import StringIO
import gflags as flags
import unittest as googletest
-from closure_linter import checker
from closure_linter import error_fixer
+from closure_linter import runner
+
_RESOURCE_PREFIX = 'closure_linter/testdata'
@@ -31,30 +31,584 @@ flags.FLAGS.strict = True
flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
+
class FixJsStyleTest(googletest.TestCase):
"""Test case to for gjslint auto-fixing."""
+ def setUp(self):
+ flags.FLAGS.dot_on_next_line = True
+
+ def tearDown(self):
+ flags.FLAGS.dot_on_next_line = False
+
def testFixJsStyle(self):
- input_filename = None
- try:
- input_filename = '%s/fixjsstyle.in.js' % (_RESOURCE_PREFIX)
+ test_cases = [
+ ['fixjsstyle.in.js', 'fixjsstyle.out.js'],
+ ['indentation.js', 'fixjsstyle.indentation.out.js'],
+ ['fixjsstyle.html.in.html', 'fixjsstyle.html.out.html'],
+ ['fixjsstyle.oplineend.in.js', 'fixjsstyle.oplineend.out.js']]
+ for [running_input_file, running_output_file] in test_cases:
+ print 'Checking %s vs %s' % (running_input_file, running_output_file)
+ input_filename = None
+ golden_filename = None
+ current_filename = None
+ try:
+ input_filename = '%s/%s' % (_RESOURCE_PREFIX, running_input_file)
+ current_filename = input_filename
- golden_filename = '%s/fixjsstyle.out.js' % (_RESOURCE_PREFIX)
- except IOError, ex:
- raise IOError('Could not find testdata resource for %s: %s' %
- (self._filename, ex))
+ golden_filename = '%s/%s' % (_RESOURCE_PREFIX, running_output_file)
+ current_filename = golden_filename
+ except IOError as ex:
+ raise IOError('Could not find testdata resource for %s: %s' %
+ (current_filename, ex))
- # Autofix the file, sending output to a fake file.
- actual = StringIO.StringIO()
- style_checker = checker.JavaScriptStyleChecker(
- error_fixer.ErrorFixer(actual))
- style_checker.Check(input_filename)
+ if running_input_file == 'fixjsstyle.in.js':
+ with open(input_filename) as f:
+ for line in f:
+ # Go to last line.
+ pass
+ self.assertTrue(line == line.rstrip(), '%s file should not end '
+ 'with a new line.' % (input_filename))
+
+ # Autofix the file, sending output to a fake file.
+ actual = StringIO.StringIO()
+ runner.Run(input_filename, error_fixer.ErrorFixer(actual))
+
+ # Now compare the files.
+ actual.seek(0)
+ expected = open(golden_filename, 'r')
+
+ # Uncomment to generate new golden files and run
+ # open('/'.join(golden_filename.split('/')[4:]), 'w').write(actual.read())
+ # actual.seek(0)
+
+ self.assertEqual(actual.readlines(), expected.readlines())
+
+ def testAddProvideFirstLine(self):
+ """Tests handling of case where goog.provide is added."""
+ original = [
+ 'dummy.bb.cc = 1;',
+ ]
+
+ expected = [
+ 'goog.provide(\'dummy.bb\');',
+ '',
+ 'dummy.bb.cc = 1;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ original = [
+ '',
+ 'dummy.bb.cc = 1;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testAddRequireFirstLine(self):
+ """Tests handling of case where goog.require is added."""
+ original = [
+ 'a = dummy.bb.cc;',
+ ]
+
+ expected = [
+ 'goog.require(\'dummy.bb\');',
+ '',
+ 'a = dummy.bb.cc;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ original = [
+ '',
+ 'a = dummy.bb.cc;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testDeleteProvideAndAddProvideFirstLine(self):
+ """Tests handling of case where goog.provide is deleted and added.
+
+ Bug 14832597.
+ """
+ original = [
+ 'goog.provide(\'dummy.aa\');',
+ '',
+ 'dummy.bb.cc = 1;',
+ ]
+
+ expected = [
+ 'goog.provide(\'dummy.bb\');',
+ '',
+ 'dummy.bb.cc = 1;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ original = [
+ 'goog.provide(\'dummy.aa\');',
+ 'dummy.bb.cc = 1;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testDeleteProvideAndAddRequireFirstLine(self):
+ """Tests handling where goog.provide is deleted and goog.require added.
+
+ Bug 14832597.
+ """
+ original = [
+ 'goog.provide(\'dummy.aa\');',
+ '',
+ 'a = dummy.bb.cc;',
+ ]
+
+ expected = [
+ 'goog.require(\'dummy.bb\');',
+ '',
+ 'a = dummy.bb.cc;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ original = [
+ 'goog.provide(\'dummy.aa\');',
+ 'a = dummy.bb.cc;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testDeleteRequireAndAddRequireFirstLine(self):
+ """Tests handling of case where goog.require is deleted and added.
+
+ Bug 14832597.
+ """
+ original = [
+ 'goog.require(\'dummy.aa\');',
+ '',
+ 'a = dummy.bb.cc;',
+ ]
+
+ expected = [
+ 'goog.require(\'dummy.bb\');',
+ '',
+ 'a = dummy.bb.cc;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ original = [
+ 'goog.require(\'dummy.aa\');',
+ 'a = dummy.bb.cc;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testDeleteRequireAndAddProvideFirstLine(self):
+ """Tests handling where goog.require is deleted and goog.provide added.
+
+ Bug 14832597.
+ """
+ original = [
+ 'goog.require(\'dummy.aa\');',
+ '',
+ 'dummy.bb.cc = 1;',
+ ]
+
+ expected = [
+ 'goog.provide(\'dummy.bb\');',
+ '',
+ 'dummy.bb.cc = 1;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ original = [
+ 'goog.require(\'dummy.aa\');',
+ 'dummy.bb.cc = 1;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testMultipleProvideInsert(self):
+ original = [
+ 'goog.provide(\'dummy.bb\');',
+ 'goog.provide(\'dummy.dd\');',
+ '',
+ 'dummy.aa.ff = 1;',
+ 'dummy.bb.ff = 1;',
+ 'dummy.cc.ff = 1;',
+ 'dummy.dd.ff = 1;',
+ 'dummy.ee.ff = 1;',
+ ]
+
+ expected = [
+ 'goog.provide(\'dummy.aa\');',
+ 'goog.provide(\'dummy.bb\');',
+ 'goog.provide(\'dummy.cc\');',
+ 'goog.provide(\'dummy.dd\');',
+ 'goog.provide(\'dummy.ee\');',
+ '',
+ 'dummy.aa.ff = 1;',
+ 'dummy.bb.ff = 1;',
+ 'dummy.cc.ff = 1;',
+ 'dummy.dd.ff = 1;',
+ 'dummy.ee.ff = 1;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testMultipleRequireInsert(self):
+ original = [
+ 'goog.require(\'dummy.bb\');',
+ 'goog.require(\'dummy.dd\');',
+ '',
+ 'a = dummy.aa.ff;',
+ 'b = dummy.bb.ff;',
+ 'c = dummy.cc.ff;',
+ 'd = dummy.dd.ff;',
+ 'e = dummy.ee.ff;',
+ ]
+
+ expected = [
+ 'goog.require(\'dummy.aa\');',
+ 'goog.require(\'dummy.bb\');',
+ 'goog.require(\'dummy.cc\');',
+ 'goog.require(\'dummy.dd\');',
+ 'goog.require(\'dummy.ee\');',
+ '',
+ 'a = dummy.aa.ff;',
+ 'b = dummy.bb.ff;',
+ 'c = dummy.cc.ff;',
+ 'd = dummy.dd.ff;',
+ 'e = dummy.ee.ff;',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testUnsortedRequires(self):
+ """Tests handling of unsorted goog.require statements without header.
+
+ Bug 8398202.
+ """
+ original = [
+ 'goog.require(\'dummy.aa\');',
+ 'goog.require(\'dummy.Cc\');',
+ 'goog.require(\'dummy.Dd\');',
+ '',
+ 'function a() {',
+ ' dummy.aa.i = 1;',
+ ' dummy.Cc.i = 1;',
+ ' dummy.Dd.i = 1;',
+ '}',
+ ]
- # Now compare the files.
+ expected = [
+ 'goog.require(\'dummy.Cc\');',
+ 'goog.require(\'dummy.Dd\');',
+ 'goog.require(\'dummy.aa\');',
+ '',
+ 'function a() {',
+ ' dummy.aa.i = 1;',
+ ' dummy.Cc.i = 1;',
+ ' dummy.Dd.i = 1;',
+ '}',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testMissingExtraAndUnsortedRequires(self):
+ """Tests handling of missing extra and unsorted goog.require statements."""
+ original = [
+ 'goog.require(\'dummy.aa\');',
+ 'goog.require(\'dummy.Cc\');',
+ 'goog.require(\'dummy.Dd\');',
+ '',
+ 'var x = new dummy.Bb();',
+ 'dummy.Cc.someMethod();',
+ 'dummy.aa.someMethod();',
+ ]
+
+ expected = [
+ 'goog.require(\'dummy.Bb\');',
+ 'goog.require(\'dummy.Cc\');',
+ 'goog.require(\'dummy.aa\');',
+ '',
+ 'var x = new dummy.Bb();',
+ 'dummy.Cc.someMethod();',
+ 'dummy.aa.someMethod();',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testExtraRequireOnFirstLine(self):
+ """Tests handling of extra goog.require statement on the first line.
+
+ There was a bug when fixjsstyle quits with an exception. It happened if
+ - the first line of the file is an extra goog.require() statement,
+ - goog.require() statements are not sorted.
+ """
+ original = [
+ 'goog.require(\'dummy.aa\');',
+ 'goog.require(\'dummy.cc\');',
+ 'goog.require(\'dummy.bb\');',
+ '',
+ 'var x = new dummy.bb();',
+ 'var y = new dummy.cc();',
+ ]
+
+ expected = [
+ 'goog.require(\'dummy.bb\');',
+ 'goog.require(\'dummy.cc\');',
+ '',
+ 'var x = new dummy.bb();',
+ 'var y = new dummy.cc();',
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testUnsortedProvides(self):
+ """Tests handling of unsorted goog.provide statements without header.
+
+ Bug 8398202.
+ """
+ original = [
+ 'goog.provide(\'dummy.aa\');',
+ 'goog.provide(\'dummy.Cc\');',
+ 'goog.provide(\'dummy.Dd\');',
+ '',
+ 'dummy.aa = function() {};'
+ 'dummy.Cc = function() {};'
+ 'dummy.Dd = function() {};'
+ ]
+
+ expected = [
+ 'goog.provide(\'dummy.Cc\');',
+ 'goog.provide(\'dummy.Dd\');',
+ 'goog.provide(\'dummy.aa\');',
+ '',
+ 'dummy.aa = function() {};'
+ 'dummy.Cc = function() {};'
+ 'dummy.Dd = function() {};'
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testMissingExtraAndUnsortedProvides(self):
+ """Tests handling of missing extra and unsorted goog.provide statements."""
+ original = [
+ 'goog.provide(\'dummy.aa\');',
+ 'goog.provide(\'dummy.Cc\');',
+ 'goog.provide(\'dummy.Dd\');',
+ '',
+ 'dummy.Cc = function() {};',
+ 'dummy.Bb = function() {};',
+ 'dummy.aa.someMethod = function();',
+ ]
+
+ expected = [
+ 'goog.provide(\'dummy.Bb\');',
+ 'goog.provide(\'dummy.Cc\');',
+ 'goog.provide(\'dummy.aa\');',
+ '',
+ 'dummy.Cc = function() {};',
+ 'dummy.Bb = function() {};',
+ 'dummy.aa.someMethod = function();',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testNoRequires(self):
+ """Tests positioning of missing requires without existing requires."""
+ original = [
+ 'goog.provide(\'dummy.Something\');',
+ '',
+ 'dummy.Something = function() {};',
+ '',
+ 'var x = new dummy.Bb();',
+ ]
+
+ expected = [
+ 'goog.provide(\'dummy.Something\');',
+ '',
+ 'goog.require(\'dummy.Bb\');',
+ '',
+ 'dummy.Something = function() {};',
+ '',
+ 'var x = new dummy.Bb();',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testNoProvides(self):
+ """Tests positioning of missing provides without existing provides."""
+ original = [
+ 'goog.require(\'dummy.Bb\');',
+ '',
+ 'dummy.Something = function() {};',
+ '',
+ 'var x = new dummy.Bb();',
+ ]
+
+ expected = [
+ 'goog.provide(\'dummy.Something\');',
+ '',
+ 'goog.require(\'dummy.Bb\');',
+ '',
+ 'dummy.Something = function() {};',
+ '',
+ 'var x = new dummy.Bb();',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testOutputOkayWhenFirstTokenIsDeleted(self):
+ """Tests that autofix output is is correct when first token is deleted.
+
+ Regression test for bug 4581567
+ """
+ original = ['"use strict";']
+ expected = ["'use strict';"]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testGoogScopeIndentation(self):
+ """Tests Handling a typical end-of-scope indentation fix."""
+ original = [
+ 'goog.scope(function() {',
+ ' // TODO(brain): Take over the world.',
+ '}); // goog.scope',
+ ]
+
+ expected = [
+ 'goog.scope(function() {',
+ '// TODO(brain): Take over the world.',
+ '}); // goog.scope',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testMissingEndOfScopeComment(self):
+ """Tests Handling a missing comment at end of goog.scope."""
+ original = [
+ 'goog.scope(function() {',
+ '});',
+ ]
+
+ expected = [
+ 'goog.scope(function() {',
+ '}); // goog.scope',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testMissingEndOfScopeCommentWithOtherComment(self):
+ """Tests handling an irrelevant comment at end of goog.scope."""
+ original = [
+ 'goog.scope(function() {',
+ "}); // I don't belong here!",
+ ]
+
+ expected = [
+ 'goog.scope(function() {',
+ '}); // goog.scope',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testMalformedEndOfScopeComment(self):
+ """Tests Handling a malformed comment at end of goog.scope."""
+ original = [
+ 'goog.scope(function() {',
+ '}); // goog.scope FTW',
+ ]
+
+ expected = [
+ 'goog.scope(function() {',
+ '}); // goog.scope',
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testEndsWithIdentifier(self):
+ """Tests Handling case where script ends with identifier. Bug 7643404."""
+ original = [
+ 'goog.provide(\'xyz\');',
+ '',
+ 'abc'
+ ]
+
+ expected = [
+ 'goog.provide(\'xyz\');',
+ '',
+ 'abc;'
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def testFileStartsWithSemicolon(self):
+ """Tests handling files starting with semicolon.
+
+ b/10062516
+ """
+ original = [
+ ';goog.provide(\'xyz\');',
+ '',
+ 'abc;'
+ ]
+
+ expected = [
+ 'goog.provide(\'xyz\');',
+ '',
+ 'abc;'
+ ]
+
+ self._AssertFixes(original, expected, include_header=False)
+
+ def testCodeStartsWithSemicolon(self):
+ """Tests handling code in starting with semicolon after comments.
+
+ b/10062516
+ """
+ original = [
+ ';goog.provide(\'xyz\');',
+ '',
+ 'abc;'
+ ]
+
+ expected = [
+ 'goog.provide(\'xyz\');',
+ '',
+ 'abc;'
+ ]
+
+ self._AssertFixes(original, expected)
+
+ def _AssertFixes(self, original, expected, include_header=True):
+ """Asserts that the error fixer corrects original to expected."""
+ if include_header:
+ original = self._GetHeader() + original
+ expected = self._GetHeader() + expected
+
+ actual = StringIO.StringIO()
+ runner.Run('testing.js', error_fixer.ErrorFixer(actual), original)
actual.seek(0)
- expected = open(golden_filename, 'r')
- self.assertEqual(actual.readlines(), expected.readlines())
+ expected = [x + '\n' for x in expected]
+
+ self.assertListEqual(actual.readlines(), expected)
+
+ def _GetHeader(self):
+ """Returns a fake header for a JavaScript file."""
+ return [
+ '// Copyright 2011 Google Inc. All Rights Reserved.',
+ '',
+ '/**',
+ ' * @fileoverview Fake file overview.',
+ ' * @author fake@google.com (Fake Person)',
+ ' */',
+ ''
+ ]
if __name__ == '__main__':
diff --git a/tools/closure_linter/closure_linter/full_test.py b/tools/closure_linter/closure_linter/full_test.py
index f11f235493..d0a1557dc2 100755
--- a/tools/closure_linter/closure_linter/full_test.py
+++ b/tools/closure_linter/closure_linter/full_test.py
@@ -23,7 +23,6 @@ devtools/javascript/gpylint/full_test.py
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
-import re
import os
import sys
import unittest
@@ -31,8 +30,9 @@ import unittest
import gflags as flags
import unittest as googletest
-from closure_linter import checker
+from closure_linter import error_check
from closure_linter import errors
+from closure_linter import runner
from closure_linter.common import filetestcase
_RESOURCE_PREFIX = 'closure_linter/testdata'
@@ -40,38 +40,57 @@ _RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = True
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
-flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js')
+flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
+ 'limited_doc_checks.js')
+flags.FLAGS.jslint_error = error_check.Rule.ALL
# List of files under testdata to test.
# We need to list files explicitly since pyglib can't list directories.
+# TODO(user): Figure out how to list the directory.
_TEST_FILES = [
'all_js_wrapped.js',
'blank_lines.js',
'ends_with_block.js',
+ 'empty_file.js',
'externs.js',
+ 'externs_jsdoc.js',
+ 'goog_scope.js',
'html_parse_error.html',
'indentation.js',
'interface.js',
'jsdoc.js',
+ 'limited_doc_checks.js',
'minimal.js',
'other.js',
+ 'provide_blank.js',
+ 'provide_extra.js',
+ 'provide_missing.js',
+ 'require_alias.js',
'require_all_caps.js',
+ 'require_blank.js',
'require_extra.js',
'require_function.js',
'require_function_missing.js',
'require_function_through_both.js',
'require_function_through_namespace.js',
'require_interface.js',
+ 'require_interface_alias.js',
+ 'require_interface_base.js',
'require_lower_case.js',
+ 'require_missing.js',
'require_numeric.js',
- 'require_provide_ok.js',
+ 'require_provide_blank.js',
'require_provide_missing.js',
+ 'require_provide_ok.js',
+ 'semicolon_missing.js',
'simple.html',
'spaces.js',
'tokenizer.js',
'unparseable.js',
- 'utf8.html'
- ]
+ 'unused_local_variables.js',
+ 'unused_private_members.js',
+ 'utf8.html',
+]
class GJsLintTestSuite(unittest.TestSuite):
@@ -91,8 +110,11 @@ class GJsLintTestSuite(unittest.TestSuite):
test_files = _TEST_FILES
for test_file in test_files:
resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
- self.addTest(filetestcase.AnnotatedFileTestCase(resource_path,
- checker.GJsLintRunner(), errors.ByName))
+ self.addTest(
+ filetestcase.AnnotatedFileTestCase(
+ resource_path,
+ runner.Run,
+ errors.ByName))
if __name__ == '__main__':
# Don't let main parse args; it happens in the TestSuite.
diff --git a/tools/closure_linter/closure_linter/gjslint.py b/tools/closure_linter/closure_linter/gjslint.py
index e33bdddc19..824e025dcb 100755
--- a/tools/closure_linter/closure_linter/gjslint.py
+++ b/tools/closure_linter/closure_linter/gjslint.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -33,33 +32,201 @@ is in tokenizer.py and checker.py.
"""
__author__ = ('robbyw@google.com (Robert Walker)',
- 'ajp@google.com (Andy Perelson)')
+ 'ajp@google.com (Andy Perelson)',
+ 'nnaze@google.com (Nathan Naze)',)
+import errno
+import itertools
+import os
+import platform
+import re
import sys
import time
-from closure_linter import checker
-from closure_linter import errors
-from closure_linter.common import errorprinter
-from closure_linter.common import simplefileflags as fileflags
import gflags as flags
+from closure_linter import errorrecord
+from closure_linter import runner
+from closure_linter.common import erroraccumulator
+from closure_linter.common import simplefileflags as fileflags
+
+# Attempt import of multiprocessing (should be available in Python 2.6 and up).
+try:
+ # pylint: disable=g-import-not-at-top
+ import multiprocessing
+except ImportError:
+ multiprocessing = None
FLAGS = flags.FLAGS
flags.DEFINE_boolean('unix_mode', False,
'Whether to emit warnings in standard unix format.')
flags.DEFINE_boolean('beep', True, 'Whether to beep when errors are found.')
flags.DEFINE_boolean('time', False, 'Whether to emit timing statistics.')
+flags.DEFINE_boolean('quiet', False, 'Whether to minimize logged messages. '
+ 'Most useful for per-file linting, such as that performed '
+ 'by the presubmit linter service.')
flags.DEFINE_boolean('check_html', False,
'Whether to check javascript in html files.')
flags.DEFINE_boolean('summary', False,
'Whether to show an error count summary.')
+flags.DEFINE_list('additional_extensions', None, 'List of additional file '
+ 'extensions (not js) that should be treated as '
+ 'JavaScript files.')
+flags.DEFINE_boolean('multiprocess',
+ platform.system() is 'Linux' and bool(multiprocessing),
+ 'Whether to attempt parallelized linting using the '
+ 'multiprocessing module. Enabled by default on Linux '
+ 'if the multiprocessing module is present (Python 2.6+). '
+ 'Otherwise disabled by default. '
+ 'Disabling may make debugging easier.')
+flags.ADOPT_module_key_flags(fileflags)
+flags.ADOPT_module_key_flags(runner)
+
GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time',
- '--check_html', '--summary']
+ '--check_html', '--summary', '--quiet']
+
+
+
+def _MultiprocessCheckPaths(paths):
+ """Run _CheckPath over mutltiple processes.
+
+ Tokenization, passes, and checks are expensive operations. Running in a
+ single process, they can only run on one CPU/core. Instead,
+ shard out linting over all CPUs with multiprocessing to parallelize.
+
+ Args:
+ paths: paths to check.
+
+ Yields:
+ errorrecord.ErrorRecords for any found errors.
+ """
+
+ pool = multiprocessing.Pool()
+
+ path_results = pool.imap(_CheckPath, paths)
+ for results in path_results:
+ for result in results:
+ yield result
+
+ # Force destruct before returning, as this can sometimes raise spurious
+ # "interrupted system call" (EINTR), which we can ignore.
+ try:
+ pool.close()
+ pool.join()
+ del pool
+ except OSError as err:
+ if err.errno is not errno.EINTR:
+ raise err
+
+
+def _CheckPaths(paths):
+ """Run _CheckPath on all paths in one thread.
+
+ Args:
+ paths: paths to check.
+
+ Yields:
+ errorrecord.ErrorRecords for any found errors.
+ """
+
+ for path in paths:
+ results = _CheckPath(path)
+ for record in results:
+ yield record
+
+
+def _CheckPath(path):
+ """Check a path and return any errors.
+
+ Args:
+ path: paths to check.
+
+ Returns:
+ A list of errorrecord.ErrorRecords for any found errors.
+ """
+
+ error_handler = erroraccumulator.ErrorAccumulator()
+ runner.Run(path, error_handler)
+
+ make_error_record = lambda err: errorrecord.MakeErrorRecord(path, err)
+ return map(make_error_record, error_handler.GetErrors())
-def FormatTime(t):
+def _GetFilePaths(argv):
+ suffixes = ['.js']
+ if FLAGS.additional_extensions:
+ suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
+ if FLAGS.check_html:
+ suffixes += ['.html', '.htm']
+ return fileflags.GetFileList(argv, 'JavaScript', suffixes)
+
+
+# Error printing functions
+
+
+def _PrintFileSummary(paths, records):
+ """Print a detailed summary of the number of errors in each file."""
+
+ paths = list(paths)
+ paths.sort()
+
+ for path in paths:
+ path_errors = [e for e in records if e.path == path]
+ print '%s: %d' % (path, len(path_errors))
+
+
+def _PrintFileSeparator(path):
+ print '----- FILE : %s -----' % path
+
+
+def _PrintSummary(paths, error_records):
+ """Print a summary of the number of errors and files."""
+
+ error_count = len(error_records)
+ all_paths = set(paths)
+ all_paths_count = len(all_paths)
+
+ if error_count is 0:
+ print '%d files checked, no errors found.' % all_paths_count
+
+ new_error_count = len([e for e in error_records if e.new_error])
+
+ error_paths = set([e.path for e in error_records])
+ error_paths_count = len(error_paths)
+ no_error_paths_count = all_paths_count - error_paths_count
+
+ if (error_count or new_error_count) and not FLAGS.quiet:
+ error_noun = 'error' if error_count == 1 else 'errors'
+ new_error_noun = 'error' if new_error_count == 1 else 'errors'
+ error_file_noun = 'file' if error_paths_count == 1 else 'files'
+ ok_file_noun = 'file' if no_error_paths_count == 1 else 'files'
+ print ('Found %d %s, including %d new %s, in %d %s (%d %s OK).' %
+ (error_count,
+ error_noun,
+ new_error_count,
+ new_error_noun,
+ error_paths_count,
+ error_file_noun,
+ no_error_paths_count,
+ ok_file_noun))
+
+
+def _PrintErrorRecords(error_records):
+ """Print error records strings in the expected format."""
+
+ current_path = None
+ for record in error_records:
+
+ if current_path != record.path:
+ current_path = record.path
+ if not FLAGS.unix_mode:
+ _PrintFileSeparator(current_path)
+
+ print record.error_string
+
+
+def _FormatTime(t):
"""Formats a duration as a human-readable string.
Args:
@@ -74,7 +241,9 @@ def FormatTime(t):
return '%.2fs' % t
-def main(argv = None):
+
+
+def main(argv=None):
"""Main function.
Args:
@@ -82,33 +251,41 @@ def main(argv = None):
"""
if argv is None:
argv = flags.FLAGS(sys.argv)
-
+
if FLAGS.time:
- start_time = time.time()
+ start_time = time.time()
suffixes = ['.js']
+ if FLAGS.additional_extensions:
+ suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
if FLAGS.check_html:
suffixes += ['.html', '.htm']
- files = fileflags.GetFileList(argv, 'JavaScript', suffixes)
+ paths = fileflags.GetFileList(argv, 'JavaScript', suffixes)
- error_handler = None
- if FLAGS.unix_mode:
- error_handler = errorprinter.ErrorPrinter(errors.NEW_ERRORS)
- error_handler.SetFormat(errorprinter.UNIX_FORMAT)
+ if FLAGS.multiprocess:
+ records_iter = _MultiprocessCheckPaths(paths)
+ else:
+ records_iter = _CheckPaths(paths)
+
+ records_iter, records_iter_copy = itertools.tee(records_iter, 2)
+ _PrintErrorRecords(records_iter_copy)
- runner = checker.GJsLintRunner()
- result = runner.Run(files, error_handler)
- result.PrintSummary()
+ error_records = list(records_iter)
+ _PrintSummary(paths, error_records)
exit_code = 0
- if result.HasOldErrors():
+
+ # If there are any errors
+ if error_records:
exit_code += 1
- if result.HasNewErrors():
+
+ # If there are any new errors
+ if [r for r in error_records if r.new_error]:
exit_code += 2
if exit_code:
if FLAGS.summary:
- result.PrintFileSummary()
+ _PrintFileSummary(paths, error_records)
if FLAGS.beep:
# Make a beep noise.
@@ -124,16 +301,16 @@ def main(argv = None):
else:
fix_args.append(flag)
- print """
+ if not FLAGS.quiet:
+ print """
Some of the errors reported by GJsLint may be auto-fixable using the script
fixjsstyle. Please double check any changes it makes and report any bugs. The
script can be run by executing:
-fixjsstyle %s
-""" % ' '.join(fix_args)
+fixjsstyle %s """ % ' '.join(fix_args)
if FLAGS.time:
- print 'Done in %s.' % FormatTime(time.time() - start_time)
+ print 'Done in %s.' % _FormatTime(time.time() - start_time)
sys.exit(exit_code)
diff --git a/tools/closure_linter/closure_linter/indentation.py b/tools/closure_linter/closure_linter/indentation.py
index d740607530..d48ad2b862 100755
--- a/tools/closure_linter/closure_linter/indentation.py
+++ b/tools/closure_linter/closure_linter/indentation.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -18,6 +17,8 @@
__author__ = ('robbyw@google.com (Robert Walker)')
+import gflags as flags
+
from closure_linter import ecmametadatapass
from closure_linter import errors
from closure_linter import javascripttokens
@@ -25,7 +26,6 @@ from closure_linter import tokenutil
from closure_linter.common import error
from closure_linter.common import position
-import gflags as flags
flags.DEFINE_boolean('debug_indentation', False,
'Whether to print debugging information for indentation.')
@@ -89,7 +89,7 @@ class TokenInfo(object):
self.overridden_by = None
self.is_permanent_override = False
self.is_block = is_block
- self.is_transient = not is_block and not token.type in (
+ self.is_transient = not is_block and token.type not in (
Type.START_PAREN, Type.START_PARAMETERS)
self.line_number = token.line_number
@@ -121,7 +121,7 @@ class IndentationRules(object):
if self._stack:
old_stack = self._stack
self._stack = []
- raise Exception("INTERNAL ERROR: indentation stack is not empty: %r" %
+ raise Exception('INTERNAL ERROR: indentation stack is not empty: %r' %
old_stack)
def CheckToken(self, token, state):
@@ -152,26 +152,40 @@ class IndentationRules(object):
self._PopTo(Type.START_BRACKET)
elif token_type == Type.END_BLOCK:
- self._PopTo(Type.START_BLOCK)
+ start_token = self._PopTo(Type.START_BLOCK)
+ # Check for required goog.scope comment.
+ if start_token:
+ goog_scope = tokenutil.GoogScopeOrNoneFromStartBlock(start_token.token)
+ if goog_scope is not None:
+ if not token.line.endswith('; // goog.scope\n'):
+ if (token.line.find('//') > -1 and
+ token.line.find('goog.scope') >
+ token.line.find('//')):
+ indentation_errors.append([
+ errors.MALFORMED_END_OF_SCOPE_COMMENT,
+ ('Malformed end of goog.scope comment. Please use the '
+ 'exact following syntax to close the scope:\n'
+ '}); // goog.scope'),
+ token,
+ Position(token.start_index, token.length)])
+ else:
+ indentation_errors.append([
+ errors.MISSING_END_OF_SCOPE_COMMENT,
+ ('Missing comment for end of goog.scope which opened at line '
+ '%d. End the scope with:\n'
+ '}); // goog.scope' %
+ (start_token.line_number)),
+ token,
+ Position(token.start_index, token.length)])
elif token_type == Type.KEYWORD and token.string in ('case', 'default'):
self._Add(self._PopTo(Type.START_BLOCK))
- elif is_first and token.string == '.':
- # This token should have been on the previous line, so treat it as if it
- # was there.
- info = TokenInfo(token)
- info.line_number = token.line_number - 1
- self._Add(info)
-
elif token_type == Type.SEMICOLON:
self._PopTransient()
- not_binary_operator = (token_type != Type.OPERATOR or
- token.metadata.IsUnaryOperator())
- not_dot = token.string != '.'
- if is_first and not_binary_operator and not_dot and token.type not in (
- Type.COMMENT, Type.DOC_PREFIX, Type.STRING_TEXT):
+ if (is_first and
+ token_type not in (Type.COMMENT, Type.DOC_PREFIX, Type.STRING_TEXT)):
if flags.FLAGS.debug_indentation:
print 'Line #%d: stack %r' % (token.line_number, stack)
@@ -198,21 +212,21 @@ class IndentationRules(object):
indentation_errors.append([
errors.WRONG_INDENTATION,
'Wrong indentation: expected any of {%s} but got %d' % (
- ', '.join(
- ['%d' % x for x in expected]), actual),
+ ', '.join('%d' % x for x in expected if x < 80), actual),
token,
Position(actual, expected[0])])
self._start_index_offset[token.line_number] = expected[0] - actual
# Add tokens that could increase indentation.
if token_type == Type.START_BRACKET:
- self._Add(TokenInfo(token=token,
+ self._Add(TokenInfo(
+ token=token,
is_block=token.metadata.context.type == Context.ARRAY_LITERAL))
elif token_type == Type.START_BLOCK or token.metadata.is_implied_block:
self._Add(TokenInfo(token=token, is_block=True))
- elif token_type in (Type.START_PAREN, Type.START_PARAMETERS):
+ elif token_type in (Type.START_PAREN, Type.START_PARAMETERS):
self._Add(TokenInfo(token=token, is_block=False))
elif token_type == Type.KEYWORD and token.string == 'return':
@@ -229,12 +243,15 @@ class IndentationRules(object):
# Add some tokens only if they appear at the end of the line.
is_last = self._IsLastCodeInLine(token)
if is_last:
+ next_code_token = tokenutil.GetNextCodeToken(token)
+ # Increase required indentation if this is an overlong wrapped statement
+ # ending in an operator.
if token_type == Type.OPERATOR:
if token.string == ':':
- if (stack and stack[-1].token.string == '?'):
+ if stack and stack[-1].token.string == '?':
# When a ternary : is on a different line than its '?', it doesn't
# add indentation.
- if (token.line_number == stack[-1].token.line_number):
+ if token.line_number == stack[-1].token.line_number:
self._Add(TokenInfo(token))
elif token.metadata.context.type == Context.CASE_BLOCK:
# Pop transient tokens from say, line continuations, e.g.,
@@ -249,7 +266,6 @@ class IndentationRules(object):
# When in an object literal, acts as operator indicating line
# continuations.
self._Add(TokenInfo(token))
- pass
else:
# ':' might also be a statement label, no effect on indentation in
# this case.
@@ -263,13 +279,16 @@ class IndentationRules(object):
self._Add(TokenInfo(token))
elif token.metadata.context.type != Context.PARAMETERS:
self._PopTransient()
-
- elif (token.string.endswith('.')
- and token_type in (Type.IDENTIFIER, Type.NORMAL)):
+ # Increase required indentation if this is the end of a statement that's
+ # continued with an operator on the next line (e.g. the '.').
+ elif (next_code_token and next_code_token.type == Type.OPERATOR and
+ not next_code_token.metadata.IsUnaryOperator()):
self._Add(TokenInfo(token))
elif token_type == Type.PARAMETERS and token.string.endswith(','):
# Parameter lists.
self._Add(TokenInfo(token))
+ elif token.IsKeyword('var'):
+ self._Add(TokenInfo(token))
elif token.metadata.is_implied_semicolon:
self._PopTransient()
elif token.IsAssignment():
@@ -297,6 +316,12 @@ class IndentationRules(object):
def _IsHardStop(self, token):
"""Determines if the given token can have a hard stop after it.
+ Args:
+ token: token to examine
+
+ Returns:
+ Whether the token can have a hard stop after it.
+
Hard stops are indentations defined by the position of another token as in
indentation lined up with return, (, [, and ?.
"""
@@ -341,7 +366,15 @@ class IndentationRules(object):
# Handle hard stops after (, [, return, =, and ?
if self._IsHardStop(token):
override_is_hard_stop = (token_info.overridden_by and
- self._IsHardStop(token_info.overridden_by.token))
+ self._IsHardStop(
+ token_info.overridden_by.token))
+ if token.type == Type.START_PAREN and token.previous:
+ # For someFunction(...) we allow to indent at the beginning of the
+ # identifier +4
+ prev = token.previous
+ if (prev.type == Type.IDENTIFIER and
+ prev.line_number == token.line_number):
+ hard_stops.add(prev.start_index + 4)
if not override_is_hard_stop:
start_index = token.start_index
if token.line_number in self._start_index_offset:
@@ -353,7 +386,7 @@ class IndentationRules(object):
elif token.string == 'return' and not token_info.overridden_by:
hard_stops.add(start_index + 7)
- elif (token.type == Type.START_BRACKET):
+ elif token.type == Type.START_BRACKET:
hard_stops.add(start_index + 1)
elif token.IsAssignment():
@@ -423,6 +456,31 @@ class IndentationRules(object):
if token.type not in Type.NON_CODE_TYPES:
return False
+ def _AllFunctionPropertyAssignTokens(self, start_token, end_token):
+ """Checks if tokens are (likely) a valid function property assignment.
+
+ Args:
+ start_token: Start of the token range.
+ end_token: End of the token range.
+
+ Returns:
+ True if all tokens between start_token and end_token are legal tokens
+ within a function declaration and assignment into a property.
+ """
+ for token in tokenutil.GetTokenRange(start_token, end_token):
+ fn_decl_tokens = (Type.FUNCTION_DECLARATION,
+ Type.PARAMETERS,
+ Type.START_PARAMETERS,
+ Type.END_PARAMETERS,
+ Type.END_PAREN)
+ if (token.type not in fn_decl_tokens and
+ token.IsCode() and
+ not tokenutil.IsIdentifierOrDot(token) and
+ not token.IsAssignment() and
+ not (token.type == Type.OPERATOR and token.string == ',')):
+ return False
+ return True
+
def _Add(self, token_info):
"""Adds the given token info to the stack.
@@ -434,9 +492,35 @@ class IndentationRules(object):
return
if token_info.is_block or token_info.token.type == Type.START_PAREN:
- index = 1
- while index <= len(self._stack):
- stack_info = self._stack[-index]
+ scope_token = tokenutil.GoogScopeOrNoneFromStartBlock(token_info.token)
+ token_info.overridden_by = TokenInfo(scope_token) if scope_token else None
+
+ if (token_info.token.type == Type.START_BLOCK and
+ token_info.token.metadata.context.type == Context.BLOCK):
+ # Handle function() {} assignments: their block contents get special
+ # treatment and are allowed to just indent by two whitespace.
+ # For example
+ # long.long.name = function(
+ # a) {
+ # In this case the { and the = are on different lines. But the
+ # override should still apply for all previous stack tokens that are
+ # part of an assignment of a block.
+
+ has_assignment = any(x for x in self._stack if x.token.IsAssignment())
+ if has_assignment:
+ last_token = token_info.token.previous
+ for stack_info in reversed(self._stack):
+ if (last_token and
+ not self._AllFunctionPropertyAssignTokens(stack_info.token,
+ last_token)):
+ break
+ stack_info.overridden_by = token_info
+ stack_info.is_permanent_override = True
+ last_token = stack_info.token
+
+ index = len(self._stack) - 1
+ while index >= 0:
+ stack_info = self._stack[index]
stack_token = stack_info.token
if stack_info.line_number == token_info.line_number:
@@ -451,24 +535,14 @@ class IndentationRules(object):
# a: 10
# },
# 30);
+ # b/11450054. If a string is not closed properly then close_block
+ # could be null.
close_block = token_info.token.metadata.context.end_token
- stack_info.is_permanent_override = \
- close_block.line_number != token_info.token.line_number
- elif (token_info.token.type == Type.START_BLOCK and
- token_info.token.metadata.context.type == Context.BLOCK and
- (stack_token.IsAssignment() or
- stack_token.type == Type.IDENTIFIER)):
- # When starting a function block, the override can transcend lines.
- # For example
- # long.long.name = function(
- # a) {
- # In this case the { and the = are on different lines. But the
- # override should still apply.
- stack_info.overridden_by = token_info
- stack_info.is_permanent_override = True
+ stack_info.is_permanent_override = close_block and (
+ close_block.line_number != token_info.token.line_number)
else:
break
- index += 1
+ index -= 1
self._stack.append(token_info)
diff --git a/tools/closure_linter/closure_linter/javascriptlintrules.py b/tools/closure_linter/closure_linter/javascriptlintrules.py
index 6b9f1be705..9578009daa 100755..100644
--- a/tools/closure_linter/closure_linter/javascriptlintrules.py
+++ b/tools/closure_linter/closure_linter/javascriptlintrules.py
@@ -1,6 +1,5 @@
#!/usr/bin/env python
-#
-# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -24,50 +23,46 @@ __author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
-import gflags as flags
+import re
+
from closure_linter import ecmalintrules
+from closure_linter import error_check
from closure_linter import errors
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
+from closure_linter import requireprovidesorter
from closure_linter import tokenutil
from closure_linter.common import error
from closure_linter.common import position
-FLAGS = flags.FLAGS
-flags.DEFINE_list('closurized_namespaces', '',
- 'Namespace prefixes, used for testing of'
- 'goog.provide/require')
-flags.DEFINE_list('ignored_extra_namespaces', '',
- 'Fully qualified namespaces that should be not be reported '
- 'as extra by the linter.')
-
# Shorthand
Error = error.Error
Position = position.Position
+Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType
class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
"""JavaScript lint rules that catch JavaScript specific style errors."""
+ def __init__(self, namespaces_info):
+ """Initializes a JavaScriptLintRules instance."""
+ ecmalintrules.EcmaScriptLintRules.__init__(self)
+ self._namespaces_info = namespaces_info
+ self._declared_private_member_tokens = {}
+ self._declared_private_members = set()
+ self._used_private_members = set()
+ # A stack of dictionaries, one for each function scope entered. Each
+ # dictionary is keyed by an identifier that defines a local variable and has
+ # a token as its value.
+ self._unused_local_variables_by_scope = []
+
def HandleMissingParameterDoc(self, token, param_name):
"""Handle errors associated with a parameter missing a param tag."""
self._HandleError(errors.MISSING_PARAMETER_DOCUMENTATION,
'Missing docs for parameter: "%s"' % param_name, token)
- def __ContainsRecordType(self, token):
- """Check whether the given token contains a record type.
-
- Args:
- token: The token being checked
- """
- # If we see more than one left-brace in the string of an annotation token,
- # then there's a record type in there.
- return (token and token.type == Type.DOC_FLAG and
- token.attached_object.type is not None and
- token.attached_object.type.find('{') != token.string.rfind('{'))
-
-
+ # pylint: disable=too-many-statements
def CheckToken(self, token, state):
"""Checks a token, given the current parser_state, for warnings and errors.
@@ -75,33 +70,94 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
token: The current token under consideration
state: parser_state object that indicates the current state in the page
"""
- if self.__ContainsRecordType(token):
- # We should bail out and not emit any warnings for this annotation.
- # TODO(nicksantos): Support record types for real.
- state.GetDocComment().Invalidate()
- return
# Call the base class's CheckToken function.
super(JavaScriptLintRules, self).CheckToken(token, state)
# Store some convenience variables
- first_in_line = token.IsFirstInLine()
- last_in_line = token.IsLastInLine()
- type = token.type
-
- if type == Type.DOC_FLAG:
+ namespaces_info = self._namespaces_info
+
+ if error_check.ShouldCheck(Rule.UNUSED_LOCAL_VARIABLES):
+ self._CheckUnusedLocalVariables(token, state)
+
+ if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
+ # Find all assignments to private members.
+ if token.type == Type.SIMPLE_LVALUE:
+ identifier = token.string
+ if identifier.endswith('_') and not identifier.endswith('__'):
+ doc_comment = state.GetDocComment()
+ suppressed = doc_comment and (
+ 'underscore' in doc_comment.suppressions or
+ 'unusedPrivateMembers' in doc_comment.suppressions)
+ if not suppressed:
+ # Look for static members defined on a provided namespace.
+ if namespaces_info:
+ namespace = namespaces_info.GetClosurizedNamespace(identifier)
+ provided_namespaces = namespaces_info.GetProvidedNamespaces()
+ else:
+ namespace = None
+ provided_namespaces = set()
+
+ # Skip cases of this.something_.somethingElse_.
+ regex = re.compile(r'^this\.[a-zA-Z_]+$')
+ if namespace in provided_namespaces or regex.match(identifier):
+ variable = identifier.split('.')[-1]
+ self._declared_private_member_tokens[variable] = token
+ self._declared_private_members.add(variable)
+ elif not identifier.endswith('__'):
+ # Consider setting public members of private members to be a usage.
+ for piece in identifier.split('.'):
+ if piece.endswith('_'):
+ self._used_private_members.add(piece)
+
+ # Find all usages of private members.
+ if token.type == Type.IDENTIFIER:
+ for piece in token.string.split('.'):
+ if piece.endswith('_'):
+ self._used_private_members.add(piece)
+
+ if token.type == Type.DOC_FLAG:
flag = token.attached_object
if flag.flag_type == 'param' and flag.name_token is not None:
self._CheckForMissingSpaceBeforeToken(
token.attached_object.name_token)
+ if flag.type is not None and flag.name is not None:
+ if error_check.ShouldCheck(Rule.VARIABLE_ARG_MARKER):
+ # Check for variable arguments marker in type.
+ if flag.jstype.IsVarArgsType() and flag.name != 'var_args':
+ self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_NAME,
+ 'Variable length argument %s must be renamed '
+ 'to var_args.' % flag.name,
+ token)
+ elif not flag.jstype.IsVarArgsType() and flag.name == 'var_args':
+ self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_TYPE,
+ 'Variable length argument %s type must start '
+ 'with \'...\'.' % flag.name,
+ token)
+
+ if error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER):
+ # Check for optional marker in type.
+ if (flag.jstype.opt_arg and
+ not flag.name.startswith('opt_')):
+ self._HandleError(errors.JSDOC_MISSING_OPTIONAL_PREFIX,
+ 'Optional parameter name %s must be prefixed '
+ 'with opt_.' % flag.name,
+ token)
+ elif (not flag.jstype.opt_arg and
+ flag.name.startswith('opt_')):
+ self._HandleError(errors.JSDOC_MISSING_OPTIONAL_TYPE,
+ 'Optional parameter %s type must end with =.' %
+ flag.name,
+ token)
+
if flag.flag_type in state.GetDocFlag().HAS_TYPE:
# Check for both missing type token and empty type braces '{}'
- # Missing suppress types are reported separately and we allow enums
- # without types.
- if (flag.flag_type not in ('suppress', 'enum') and
- (flag.type == None or flag.type == '' or flag.type.isspace())):
+ # Missing suppress types are reported separately and we allow enums,
+ # const, private, public and protected without types.
+ if (flag.flag_type not in state.GetDocFlag().CAN_OMIT_TYPE
+ and (not flag.jstype or flag.jstype.IsEmpty())):
self._HandleError(errors.MISSING_JSDOC_TAG_TYPE,
'Missing type in %s tag' % token.string, token)
@@ -112,43 +168,64 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
'Type should be immediately after %s tag' % token.string,
token)
- elif type == Type.DOUBLE_QUOTE_STRING_START:
- next = token.next
- while next.type == Type.STRING_TEXT:
+ elif token.type == Type.DOUBLE_QUOTE_STRING_START:
+ next_token = token.next
+ while next_token.type == Type.STRING_TEXT:
if javascripttokenizer.JavaScriptTokenizer.SINGLE_QUOTE.search(
- next.string):
+ next_token.string):
break
- next = next.next
+ next_token = next_token.next
else:
self._HandleError(
errors.UNNECESSARY_DOUBLE_QUOTED_STRING,
'Single-quoted string preferred over double-quoted string.',
token,
- Position.All(token.string))
+ position=Position.All(token.string))
+
+ elif token.type == Type.END_DOC_COMMENT:
+ doc_comment = state.GetDocComment()
+
+ # When @externs appears in a @fileoverview comment, it should trigger
+ # the same limited doc checks as a special filename like externs.js.
+ if doc_comment.HasFlag('fileoverview') and doc_comment.HasFlag('externs'):
+ self._SetLimitedDocChecks(True)
- elif type == Type.END_DOC_COMMENT:
- if (FLAGS.strict and not self._is_html and state.InTopLevel() and
- not state.InBlock()):
+ if (error_check.ShouldCheck(Rule.BLANK_LINES_AT_TOP_LEVEL) and
+ not self._is_html and
+ state.InTopLevel() and
+ not state.InNonScopeBlock()):
# Check if we're in a fileoverview or constructor JsDoc.
- doc_comment = state.GetDocComment()
- is_constructor = (doc_comment.HasFlag('constructor') or
+ is_constructor = (
+ doc_comment.HasFlag('constructor') or
doc_comment.HasFlag('interface'))
- is_file_overview = doc_comment.HasFlag('fileoverview')
+ # @fileoverview is an optional tag so if the dosctring is the first
+ # token in the file treat it as a file level docstring.
+ is_file_level_comment = (
+ doc_comment.HasFlag('fileoverview') or
+ not doc_comment.start_token.previous)
# If the comment is not a file overview, and it does not immediately
# precede some code, skip it.
# NOTE: The tokenutil methods are not used here because of their
# behavior at the top of a file.
- next = token.next
- if (not next or
- (not is_file_overview and next.type in Type.NON_CODE_TYPES)):
+ next_token = token.next
+ if (not next_token or
+ (not is_file_level_comment and
+ next_token.type in Type.NON_CODE_TYPES)):
+ return
+
+ # Don't require extra blank lines around suppression of extra
+ # goog.require errors.
+ if (doc_comment.SuppressionOnly() and
+ next_token.type == Type.IDENTIFIER and
+ next_token.string in ['goog.provide', 'goog.require']):
return
# Find the start of this block (include comments above the block, unless
# this is a file overview).
block_start = doc_comment.start_token
- if not is_file_overview:
+ if not is_file_level_comment:
token = block_start.previous
while token and token.type in Type.COMMENT_TYPES:
block_start = token
@@ -170,23 +247,27 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
error_message = False
expected_blank_lines = 0
- if is_file_overview and blank_lines == 0:
+ # Only need blank line before file overview if it is not the beginning
+ # of the file, e.g. copyright is first.
+ if is_file_level_comment and blank_lines == 0 and block_start.previous:
error_message = 'Should have a blank line before a file overview.'
expected_blank_lines = 1
elif is_constructor and blank_lines != 3:
- error_message = ('Should have 3 blank lines before a constructor/'
- 'interface.')
+ error_message = (
+ 'Should have 3 blank lines before a constructor/interface.')
expected_blank_lines = 3
- elif not is_file_overview and not is_constructor and blank_lines != 2:
+ elif (not is_file_level_comment and not is_constructor and
+ blank_lines != 2):
error_message = 'Should have 2 blank lines between top-level blocks.'
expected_blank_lines = 2
if error_message:
- self._HandleError(errors.WRONG_BLANK_LINE_COUNT, error_message,
- block_start, Position.AtBeginning(),
- expected_blank_lines - blank_lines)
+ self._HandleError(
+ errors.WRONG_BLANK_LINE_COUNT, error_message,
+ block_start, position=Position.AtBeginning(),
+ fix_data=expected_blank_lines - blank_lines)
- elif type == Type.END_BLOCK:
+ elif token.type == Type.END_BLOCK:
if state.InFunction() and state.IsFunctionClose():
is_immediately_called = (token.next and
token.next.type == Type.START_PAREN)
@@ -202,44 +283,88 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
self._HandleError(
errors.MISSING_RETURN_DOCUMENTATION,
'Missing @return JsDoc in function with non-trivial return',
- function.doc.end_token, Position.AtBeginning())
- elif (not function.has_return and function.doc and
+ function.doc.end_token, position=Position.AtBeginning())
+ elif (not function.has_return and
+ not function.has_throw and
+ function.doc and
function.doc.HasFlag('return') and
not state.InInterfaceMethod()):
- return_flag = function.doc.GetFlag('return')
- if (return_flag.type is None or (
- 'undefined' not in return_flag.type and
- 'void' not in return_flag.type and
- '*' not in return_flag.type)):
+ flag = function.doc.GetFlag('return')
+ valid_no_return_names = ['undefined', 'void', '*']
+ invalid_return = flag.jstype is None or not any(
+ sub_type.identifier in valid_no_return_names
+ for sub_type in flag.jstype.IterTypeGroup())
+
+ if invalid_return:
self._HandleError(
errors.UNNECESSARY_RETURN_DOCUMENTATION,
'Found @return JsDoc on function that returns nothing',
- return_flag.flag_token, Position.AtBeginning())
+ flag.flag_token, position=Position.AtBeginning())
+
+ # b/4073735. Method in object literal definition of prototype can
+ # safely reference 'this'.
+ prototype_object_literal = False
+ block_start = None
+ previous_code = None
+ previous_previous_code = None
+
+ # Search for cases where prototype is defined as object literal.
+ # previous_previous_code
+ # | previous_code
+ # | | block_start
+ # | | |
+ # a.b.prototype = {
+ # c : function() {
+ # this.d = 1;
+ # }
+ # }
+
+ # If in object literal, find first token of block so to find previous
+ # tokens to check above condition.
+ if state.InObjectLiteral():
+ block_start = state.GetCurrentBlockStart()
+
+ # If an object literal then get previous token (code type). For above
+ # case it should be '='.
+ if block_start:
+ previous_code = tokenutil.SearchExcept(block_start,
+ Type.NON_CODE_TYPES,
+ reverse=True)
+
+ # If previous token to block is '=' then get its previous token.
+ if previous_code and previous_code.IsOperator('='):
+ previous_previous_code = tokenutil.SearchExcept(previous_code,
+ Type.NON_CODE_TYPES,
+ reverse=True)
+
+ # If variable/token before '=' ends with '.prototype' then its above
+ # case of prototype defined with object literal.
+ prototype_object_literal = (previous_previous_code and
+ previous_previous_code.string.endswith(
+ '.prototype'))
- if state.InFunction() and state.IsFunctionClose():
- is_immediately_called = (token.next and
- token.next.type == Type.START_PAREN)
if (function.has_this and function.doc and
not function.doc.HasFlag('this') and
not function.is_constructor and
not function.is_interface and
- '.prototype.' not in function.name):
+ '.prototype.' not in function.name and
+ not prototype_object_literal):
self._HandleError(
errors.MISSING_JSDOC_TAG_THIS,
'Missing @this JsDoc in function referencing "this". ('
'this usually means you are trying to reference "this" in '
'a static function, or you have forgotten to mark a '
'constructor with @constructor)',
- function.doc.end_token, Position.AtBeginning())
+ function.doc.end_token, position=Position.AtBeginning())
- elif type == Type.IDENTIFIER:
+ elif token.type == Type.IDENTIFIER:
if token.string == 'goog.inherits' and not state.InFunction():
if state.GetLastNonSpaceToken().line_number == token.line_number:
self._HandleError(
errors.MISSING_LINE,
'Missing newline between constructor and goog.inherits',
token,
- Position.AtBeginning())
+ position=Position.AtBeginning())
extra_space = state.GetLastNonSpaceToken().next
while extra_space != token:
@@ -253,7 +378,92 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
# TODO(robbyw): Test the last function was a constructor.
# TODO(robbyw): Test correct @extends and @implements documentation.
- elif type == Type.OPERATOR:
+ elif (token.string == 'goog.provide' and
+ not state.InFunction() and
+ namespaces_info is not None):
+ namespace = tokenutil.GetStringAfterToken(token)
+
+ # Report extra goog.provide statement.
+ if not namespace or namespaces_info.IsExtraProvide(token):
+ if not namespace:
+ msg = 'Empty namespace in goog.provide'
+ else:
+ msg = 'Unnecessary goog.provide: ' + namespace
+
+ # Hint to user if this is a Test namespace.
+ if namespace.endswith('Test'):
+ msg += (' *Test namespaces must be mentioned in the '
+ 'goog.setTestOnly() call')
+
+ self._HandleError(
+ errors.EXTRA_GOOG_PROVIDE,
+ msg,
+ token, position=Position.AtBeginning())
+
+ if namespaces_info.IsLastProvide(token):
+ # Report missing provide statements after the last existing provide.
+ missing_provides = namespaces_info.GetMissingProvides()
+ if missing_provides:
+ self._ReportMissingProvides(
+ missing_provides,
+ tokenutil.GetLastTokenInSameLine(token).next,
+ False)
+
+ # If there are no require statements, missing requires should be
+ # reported after the last provide.
+ if not namespaces_info.GetRequiredNamespaces():
+ missing_requires, illegal_alias_statements = (
+ namespaces_info.GetMissingRequires())
+ if missing_requires:
+ self._ReportMissingRequires(
+ missing_requires,
+ tokenutil.GetLastTokenInSameLine(token).next,
+ True)
+ if illegal_alias_statements:
+ self._ReportIllegalAliasStatement(illegal_alias_statements)
+
+ elif (token.string == 'goog.require' and
+ not state.InFunction() and
+ namespaces_info is not None):
+ namespace = tokenutil.GetStringAfterToken(token)
+
+ # If there are no provide statements, missing provides should be
+ # reported before the first require.
+ if (namespaces_info.IsFirstRequire(token) and
+ not namespaces_info.GetProvidedNamespaces()):
+ missing_provides = namespaces_info.GetMissingProvides()
+ if missing_provides:
+ self._ReportMissingProvides(
+ missing_provides,
+ tokenutil.GetFirstTokenInSameLine(token),
+ True)
+
+ # Report extra goog.require statement.
+ if not namespace or namespaces_info.IsExtraRequire(token):
+ if not namespace:
+ msg = 'Empty namespace in goog.require'
+ else:
+ msg = 'Unnecessary goog.require: ' + namespace
+
+ self._HandleError(
+ errors.EXTRA_GOOG_REQUIRE,
+ msg,
+ token, position=Position.AtBeginning())
+
+ # Report missing goog.require statements.
+ if namespaces_info.IsLastRequire(token):
+ missing_requires, illegal_alias_statements = (
+ namespaces_info.GetMissingRequires())
+ if missing_requires:
+ self._ReportMissingRequires(
+ missing_requires,
+ tokenutil.GetLastTokenInSameLine(token).next,
+ False)
+ if illegal_alias_statements:
+ self._ReportIllegalAliasStatement(illegal_alias_statements)
+
+ elif token.type == Type.OPERATOR:
+ last_in_line = token.IsLastInLine()
# If the token is unary and appears to be used in a unary context
# it's ok. Otherwise, if it's at the end of the line or immediately
# before a comment, it's ok.
@@ -262,15 +472,18 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
if (not token.metadata.IsUnaryOperator() and not last_in_line
and not token.next.IsComment()
and not token.next.IsOperator(',')
- and not token.next.type in (Type.WHITESPACE, Type.END_PAREN,
+ and not tokenutil.IsDot(token)
+ and token.next.type not in (Type.WHITESPACE, Type.END_PAREN,
Type.END_BRACKET, Type.SEMICOLON,
Type.START_BRACKET)):
self._HandleError(
errors.MISSING_SPACE,
'Missing space after "%s"' % token.string,
token,
- Position.AtEnd(token.string))
- elif type == Type.WHITESPACE:
+ position=Position.AtEnd(token.string))
+ elif token.type == Type.WHITESPACE:
+ first_in_line = token.IsFirstInLine()
+ last_in_line = token.IsLastInLine()
# Check whitespace length if it's not the first token of the line and
# if it's not immediately before a comment.
if not last_in_line and not first_in_line and not token.next.IsComment():
@@ -282,114 +495,260 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
errors.EXTRA_SPACE,
'Extra space after "%s"' % token.previous.string,
token,
- Position.All(token.string))
+ position=Position.All(token.string))
+ elif token.type == Type.SEMICOLON:
+ previous_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES,
+ reverse=True)
+ if not previous_token:
+ self._HandleError(
+ errors.REDUNDANT_SEMICOLON,
+ 'Semicolon without any statement',
+ token,
+ position=Position.AtEnd(token.string))
+ elif (previous_token.type == Type.KEYWORD and
+ previous_token.string not in ['break', 'continue', 'return']):
+ self._HandleError(
+ errors.REDUNDANT_SEMICOLON,
+ ('Semicolon after \'%s\' without any statement.'
+ ' Looks like an error.' % previous_token.string),
+ token,
+ position=Position.AtEnd(token.string))
- def Finalize(self, state, tokenizer_mode):
- """Perform all checks that need to occur after all lines are processed."""
- # Call the base class's Finalize function.
- super(JavaScriptLintRules, self).Finalize(state, tokenizer_mode)
-
- # Check for sorted requires statements.
- goog_require_tokens = state.GetGoogRequireTokens()
- requires = [require_token.string for require_token in goog_require_tokens]
- sorted_requires = sorted(requires)
- index = 0
- bad = False
- for item in requires:
- if item != sorted_requires[index]:
- bad = True
+ def _CheckUnusedLocalVariables(self, token, state):
+ """Checks for unused local variables in function blocks.
+
+ Args:
+ token: The token to check.
+ state: The state tracker.
+ """
+ # We don't use state.InFunction because that disregards scope functions.
+ in_function = state.FunctionDepth() > 0
+ if token.type == Type.SIMPLE_LVALUE or token.type == Type.IDENTIFIER:
+ if in_function:
+ identifier = token.string
+ # Check whether the previous token was var.
+ previous_code_token = tokenutil.CustomSearch(
+ token,
+ lambda t: t.type not in Type.NON_CODE_TYPES,
+ reverse=True)
+ if previous_code_token and previous_code_token.IsKeyword('var'):
+ # Add local variable declaration to the top of the unused locals
+ # stack.
+ self._unused_local_variables_by_scope[-1][identifier] = token
+ elif token.type == Type.IDENTIFIER:
+ # This covers most cases where the variable is used as an identifier.
+ self._MarkLocalVariableUsed(token.string)
+ elif token.type == Type.SIMPLE_LVALUE and '.' in identifier:
+ # This covers cases where a value is assigned to a property of the
+ # variable.
+ self._MarkLocalVariableUsed(token.string)
+ elif token.type == Type.START_BLOCK:
+ if in_function and state.IsFunctionOpen():
+ # Push a new map onto the stack
+ self._unused_local_variables_by_scope.append({})
+ elif token.type == Type.END_BLOCK:
+ if state.IsFunctionClose():
+ # Pop the stack and report any remaining locals as unused.
+ unused_local_variables = self._unused_local_variables_by_scope.pop()
+ for unused_token in unused_local_variables.values():
+ self._HandleError(
+ errors.UNUSED_LOCAL_VARIABLE,
+ 'Unused local variable: %s.' % unused_token.string,
+ unused_token)
+ elif token.type == Type.DOC_FLAG:
+ # Flags that use aliased symbols should be counted.
+ flag = token.attached_object
+ js_type = flag and flag.jstype
+ if flag and flag.flag_type in state.GetDocFlag().HAS_TYPE and js_type:
+ self._MarkAliasUsed(js_type)
+
+ def _MarkAliasUsed(self, js_type):
+ """Marks aliases in a type as used.
+
+ Recursively iterates over all subtypes in a jsdoc type annotation and
+ tracks usage of aliased symbols (which may be local variables).
+ Marks the local variable as used in the scope nearest to the current
+ scope that matches the given token.
+
+ Args:
+ js_type: The jsdoc type, a typeannotation.TypeAnnotation object.
+ """
+ if js_type.alias:
+ self._MarkLocalVariableUsed(js_type.identifier)
+ for sub_type in js_type.IterTypes():
+ self._MarkAliasUsed(sub_type)
+
+ def _MarkLocalVariableUsed(self, identifier):
+ """Marks the local variable as used in the relevant scope.
+
+ Marks the local variable in the scope nearest to the current scope that
+ matches the given identifier as used.
+
+ Args:
+ identifier: The identifier representing the potential usage of a local
+ variable.
+ """
+ identifier = identifier.split('.', 1)[0]
+ # Find the first instance of the identifier in the stack of function scopes
+ # and mark it used.
+ for unused_local_variables in reversed(
+ self._unused_local_variables_by_scope):
+ if identifier in unused_local_variables:
+ del unused_local_variables[identifier]
break
- index += 1
- if bad:
+ def _ReportMissingProvides(self, missing_provides, token, need_blank_line):
+ """Reports missing provide statements to the error handler.
+
+ Args:
+ missing_provides: A dictionary of string(key) and integer(value) where
+ each string(key) is a namespace that should be provided, but is not
+ and integer(value) is first line number where it's required.
+ token: The token where the error was detected (also where the new provides
+ will be inserted.
+ need_blank_line: Whether a blank line needs to be inserted after the new
+ provides are inserted. May be True, False, or None, where None
+ indicates that the insert location is unknown.
+ """
+
+ missing_provides_msg = 'Missing the following goog.provide statements:\n'
+ missing_provides_msg += '\n'.join(['goog.provide(\'%s\');' % x for x in
+ sorted(missing_provides)])
+ missing_provides_msg += '\n'
+
+ missing_provides_msg += '\nFirst line where provided: \n'
+ missing_provides_msg += '\n'.join(
+ [' %s : line %d' % (x, missing_provides[x]) for x in
+ sorted(missing_provides)])
+ missing_provides_msg += '\n'
+
+ self._HandleError(
+ errors.MISSING_GOOG_PROVIDE,
+ missing_provides_msg,
+ token, position=Position.AtBeginning(),
+ fix_data=(missing_provides.keys(), need_blank_line))
+
+ def _ReportMissingRequires(self, missing_requires, token, need_blank_line):
+ """Reports missing require statements to the error handler.
+
+ Args:
+ missing_requires: A dictionary of string(key) and integer(value) where
+ each string(key) is a namespace that should be required, but is not
+ and integer(value) is first line number where it's required.
+ token: The token where the error was detected (also where the new requires
+ will be inserted.
+ need_blank_line: Whether a blank line needs to be inserted before the new
+ requires are inserted. May be True, False, or None, where None
+ indicates that the insert location is unknown.
+ """
+
+ missing_requires_msg = 'Missing the following goog.require statements:\n'
+ missing_requires_msg += '\n'.join(['goog.require(\'%s\');' % x for x in
+ sorted(missing_requires)])
+ missing_requires_msg += '\n'
+
+ missing_requires_msg += '\nFirst line where required: \n'
+ missing_requires_msg += '\n'.join(
+ [' %s : line %d' % (x, missing_requires[x]) for x in
+ sorted(missing_requires)])
+ missing_requires_msg += '\n'
+
+ self._HandleError(
+ errors.MISSING_GOOG_REQUIRE,
+ missing_requires_msg,
+ token, position=Position.AtBeginning(),
+ fix_data=(missing_requires.keys(), need_blank_line))
+
+ def _ReportIllegalAliasStatement(self, illegal_alias_statements):
+ """Reports alias statements that would need a goog.require."""
+ for namespace, token in illegal_alias_statements.iteritems():
self._HandleError(
- errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
- 'goog.require classes must be alphabetized. The correct code is:\n' +
- '\n'.join(map(lambda x: 'goog.require(\'%s\');' % x,
- sorted_requires)),
- goog_require_tokens[index],
- position=Position.AtBeginning(),
- fix_data=goog_require_tokens)
-
- # Check for sorted provides statements.
- goog_provide_tokens = state.GetGoogProvideTokens()
- provides = [provide_token.string for provide_token in goog_provide_tokens]
- sorted_provides = sorted(provides)
- index = 0
- bad = False
- for item in provides:
- if item != sorted_provides[index]:
- bad = True
- break
- index += 1
+ errors.ALIAS_STMT_NEEDS_GOOG_REQUIRE,
+ 'The alias definition would need the namespace \'%s\' which is not '
+ 'required through any other symbol.' % namespace,
+ token, position=Position.AtBeginning())
- if bad:
+ def Finalize(self, state):
+ """Perform all checks that need to occur after all lines are processed."""
+ # Call the base class's Finalize function.
+ super(JavaScriptLintRules, self).Finalize(state)
+
+ if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
+ # Report an error for any declared private member that was never used.
+ unused_private_members = (self._declared_private_members -
+ self._used_private_members)
+
+ for variable in unused_private_members:
+ token = self._declared_private_member_tokens[variable]
+ self._HandleError(errors.UNUSED_PRIVATE_MEMBER,
+ 'Unused private member: %s.' % token.string,
+ token)
+
+ # Clear state to prepare for the next file.
+ self._declared_private_member_tokens = {}
+ self._declared_private_members = set()
+ self._used_private_members = set()
+
+ namespaces_info = self._namespaces_info
+ if namespaces_info is not None:
+ # If there are no provide or require statements, missing provides and
+ # requires should be reported on line 1.
+ if (not namespaces_info.GetProvidedNamespaces() and
+ not namespaces_info.GetRequiredNamespaces()):
+ missing_provides = namespaces_info.GetMissingProvides()
+ if missing_provides:
+ self._ReportMissingProvides(
+ missing_provides, state.GetFirstToken(), None)
+
+ missing_requires, illegal_alias = namespaces_info.GetMissingRequires()
+ if missing_requires:
+ self._ReportMissingRequires(
+ missing_requires, state.GetFirstToken(), None)
+ if illegal_alias:
+ self._ReportIllegalAliasStatement(illegal_alias)
+
+ self._CheckSortedRequiresProvides(state.GetFirstToken())
+
+ def _CheckSortedRequiresProvides(self, token):
+ """Checks that all goog.require and goog.provide statements are sorted.
+
+ Note that this method needs to be run after missing statements are added to
+ preserve alphabetical order.
+
+ Args:
+ token: The first token in the token stream.
+ """
+ sorter = requireprovidesorter.RequireProvideSorter()
+ first_provide_token = sorter.CheckProvides(token)
+ if first_provide_token:
+ new_order = sorter.GetFixedProvideString(first_provide_token)
self._HandleError(
errors.GOOG_PROVIDES_NOT_ALPHABETIZED,
'goog.provide classes must be alphabetized. The correct code is:\n' +
- '\n'.join(map(lambda x: 'goog.provide(\'%s\');' % x,
- sorted_provides)),
- goog_provide_tokens[index],
+ new_order,
+ first_provide_token,
position=Position.AtBeginning(),
- fix_data=goog_provide_tokens)
+ fix_data=first_provide_token)
- if FLAGS.closurized_namespaces:
- # Check that we provide everything we need.
- provided_namespaces = state.GetProvidedNamespaces()
- missing_provides = provided_namespaces - set(provides)
- if missing_provides:
- self._HandleError(
- errors.MISSING_GOOG_PROVIDE,
- 'Missing the following goog.provide statements:\n' +
- '\n'.join(map(lambda x: 'goog.provide(\'%s\');' % x,
- sorted(missing_provides))),
- state.GetFirstToken(), position=Position.AtBeginning(),
- fix_data=missing_provides)
-
- # Compose a set of all available namespaces. Explicitly omit goog
- # because if you can call goog.require, you already have goog.
- available_namespaces = (set(requires) | set(provides) | set(['goog']) |
- provided_namespaces)
-
- # Check that we require everything we need.
- missing_requires = set()
- for namespace_variants in state.GetUsedNamespaces():
- # Namespace variants is a list of potential things to require. If we
- # find we're missing one, we are lazy and choose to require the first
- # in the sequence - which should be the namespace.
- if not set(namespace_variants) & available_namespaces:
- missing_requires.add(namespace_variants[0])
-
- if missing_requires:
- self._HandleError(
- errors.MISSING_GOOG_REQUIRE,
- 'Missing the following goog.require statements:\n' +
- '\n'.join(map(lambda x: 'goog.require(\'%s\');' % x,
- sorted(missing_requires))),
- state.GetFirstToken(), position=Position.AtBeginning(),
- fix_data=missing_requires)
-
- # Check that we don't require things we don't actually use.
- namespace_variants = state.GetUsedNamespaces()
- used_namespaces = set()
- for a, b in namespace_variants:
- used_namespaces.add(a)
- used_namespaces.add(b)
-
- extra_requires = set()
- for i in requires:
- baseNamespace = i.split('.')[0]
- if (i not in used_namespaces and
- baseNamespace in FLAGS.closurized_namespaces and
- i not in FLAGS.ignored_extra_namespaces):
- extra_requires.add(i)
-
- if extra_requires:
- self._HandleError(
- errors.EXTRA_GOOG_REQUIRE,
- 'The following goog.require statements appear unnecessary:\n' +
- '\n'.join(map(lambda x: 'goog.require(\'%s\');' % x,
- sorted(extra_requires))),
- state.GetFirstToken(), position=Position.AtBeginning(),
- fix_data=extra_requires)
+ first_require_token = sorter.CheckRequires(token)
+ if first_require_token:
+ new_order = sorter.GetFixedRequireString(first_require_token)
+ self._HandleError(
+ errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
+ 'goog.require classes must be alphabetized. The correct code is:\n' +
+ new_order,
+ first_require_token,
+ position=Position.AtBeginning(),
+ fix_data=first_require_token)
+ def GetLongLineExceptions(self):
+ """Gets a list of regexps for lines which can be longer than the limit.
+
+ Returns:
+ A list of regexps, used as matches (rather than searches).
+ """
+ return [
+ re.compile(r'(var .+\s*=\s*)?goog\.require\(.+\);?\s*$'),
+ re.compile(r'goog\.(provide|module|setTestOnly)\(.+\);?\s*$'),
+ re.compile(r'[\s/*]*@visibility\s*{.*}[\s*/]*$'),
+ ]
diff --git a/tools/closure_linter/closure_linter/javascriptstatetracker.py b/tools/closure_linter/closure_linter/javascriptstatetracker.py
index 9cce37632e..e0a42f66a8 100755
--- a/tools/closure_linter/closure_linter/javascriptstatetracker.py
+++ b/tools/closure_linter/closure_linter/javascriptstatetracker.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -36,7 +35,8 @@ class JsDocFlag(statetracker.DocFlag):
including braces.
type_end_token: The last token specifying the flag JS type,
including braces.
- type: The JavaScript type spec.
+ type: The type spec string.
+ jstype: The type spec, a TypeAnnotation instance.
name_token: The token specifying the flag name.
name: The flag name
description_start_token: The first token in the description.
@@ -50,18 +50,10 @@ class JsDocFlag(statetracker.DocFlag):
# TODO(robbyw): determine which of these, if any, should be illegal.
EXTENDED_DOC = frozenset([
'class', 'code', 'desc', 'final', 'hidden', 'inheritDoc', 'link',
- 'protected', 'notypecheck', 'throws'])
+ 'meaning', 'provideGoog', 'throws'])
LEGAL_DOC = EXTENDED_DOC | statetracker.DocFlag.LEGAL_DOC
- def __init__(self, flag_token):
- """Creates the JsDocFlag object and attaches it to the given start token.
-
- Args:
- flag_token: The starting token of the flag.
- """
- statetracker.DocFlag.__init__(self, flag_token)
-
class JavaScriptStateTracker(statetracker.StateTracker):
"""JavaScript state tracker.
@@ -70,25 +62,15 @@ class JavaScriptStateTracker(statetracker.StateTracker):
functionality needed for JavaScript.
"""
- def __init__(self, closurized_namespaces=''):
- """Initializes a JavaScript token stream state tracker.
-
- Args:
- closurized_namespaces: An optional list of namespace prefixes used for
- testing of goog.provide/require.
- """
+ def __init__(self):
+ """Initializes a JavaScript token stream state tracker."""
statetracker.StateTracker.__init__(self, JsDocFlag)
- self.__closurized_namespaces = closurized_namespaces
def Reset(self):
- """Resets the state tracker to prepare for processing a new page."""
+ self._scope_depth = 0
+ self._block_stack = []
super(JavaScriptStateTracker, self).Reset()
- self.__goog_require_tokens = []
- self.__goog_provide_tokens = []
- self.__provided_namespaces = set()
- self.__used_namespaces = []
-
def InTopLevel(self):
"""Compute whether we are at the top level in the class.
@@ -100,23 +82,26 @@ class JavaScriptStateTracker(statetracker.StateTracker):
Returns:
Whether we are at the top level in the class.
"""
- return not self.InParentheses()
+ return self._scope_depth == self.ParenthesesDepth()
+
+ def InFunction(self):
+ """Returns true if the current token is within a function.
- def GetGoogRequireTokens(self):
- """Returns list of require tokens."""
- return self.__goog_require_tokens
+ This js-specific override ignores goog.scope functions.
- def GetGoogProvideTokens(self):
- """Returns list of provide tokens."""
- return self.__goog_provide_tokens
+ Returns:
+ True if the current token is within a function.
+ """
+ return self._scope_depth != self.FunctionDepth()
- def GetProvidedNamespaces(self):
- """Returns list of provided namespaces."""
- return self.__provided_namespaces
+ def InNonScopeBlock(self):
+ """Compute whether we are nested within a non-goog.scope block.
- def GetUsedNamespaces(self):
- """Returns list of used namespaces, is a list of sequences."""
- return self.__used_namespaces
+ Returns:
+ True if the token is not enclosed in a block that does not originate from
+ a goog.scope statement. False otherwise.
+ """
+ return self._scope_depth != self.BlockDepth()
def GetBlockType(self, token):
"""Determine the block type given a START_BLOCK token.
@@ -128,111 +113,38 @@ class JavaScriptStateTracker(statetracker.StateTracker):
Returns:
Code block type for current token.
"""
- last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None,
- True)
+ last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, reverse=True)
if last_code.type in (Type.END_PARAMETERS, Type.END_PAREN,
Type.KEYWORD) and not last_code.IsKeyword('return'):
return self.CODE
else:
return self.OBJECT_LITERAL
+ def GetCurrentBlockStart(self):
+ """Gets the start token of current block.
+
+ Returns:
+ Starting token of current block. None if not in block.
+ """
+ if self._block_stack:
+ return self._block_stack[-1]
+ else:
+ return None
+
def HandleToken(self, token, last_non_space_token):
"""Handles the given token and updates state.
Args:
token: The token to handle.
- last_non_space_token:
+ last_non_space_token: The last non space token encountered
"""
+ if token.type == Type.START_BLOCK:
+ self._block_stack.append(token)
+ if token.type == Type.IDENTIFIER and token.string == 'goog.scope':
+ self._scope_depth += 1
+ if token.type == Type.END_BLOCK:
+ start_token = self._block_stack.pop()
+ if tokenutil.GoogScopeOrNoneFromStartBlock(start_token):
+ self._scope_depth -= 1
super(JavaScriptStateTracker, self).HandleToken(token,
last_non_space_token)
-
- if token.IsType(Type.IDENTIFIER):
- if token.string == 'goog.require':
- class_token = tokenutil.Search(token, Type.STRING_TEXT)
- self.__goog_require_tokens.append(class_token)
-
- elif token.string == 'goog.provide':
- class_token = tokenutil.Search(token, Type.STRING_TEXT)
- self.__goog_provide_tokens.append(class_token)
-
- elif self.__closurized_namespaces:
- self.__AddUsedNamespace(token.string)
- if token.IsType(Type.SIMPLE_LVALUE) and not self.InFunction():
- identifier = token.values['identifier']
-
- if self.__closurized_namespaces:
- namespace = self.GetClosurizedNamespace(identifier)
- if namespace and identifier == namespace:
- self.__provided_namespaces.add(namespace)
- if (self.__closurized_namespaces and
- token.IsType(Type.DOC_FLAG) and
- token.attached_object.flag_type == 'implements'):
- # Interfaces should be goog.require'd.
- doc_start = tokenutil.Search(token, Type.DOC_START_BRACE)
- interface = tokenutil.Search(doc_start, Type.COMMENT)
- self.__AddUsedNamespace(interface.string)
-
- def __AddUsedNamespace(self, identifier):
- """Adds the namespace of an identifier to the list of used namespaces.
-
- Args:
- identifier: An identifier which has been used.
- """
- namespace = self.GetClosurizedNamespace(identifier)
-
- if namespace:
- # We add token.string as a 'namespace' as it is something that could
- # potentially be provided to satisfy this dependency.
- self.__used_namespaces.append([namespace, identifier])
-
- def GetClosurizedNamespace(self, identifier):
- """Given an identifier, returns the namespace that identifier is from.
-
- Args:
- identifier: The identifier to extract a namespace from.
-
- Returns:
- The namespace the given identifier resides in, or None if one could not
- be found.
- """
- parts = identifier.split('.')
- for part in parts:
- if part.endswith('_'):
- # Ignore private variables / inner classes.
- return None
-
- if identifier.startswith('goog.global'):
- # Ignore goog.global, since it is, by definition, global.
- return None
-
- for namespace in self.__closurized_namespaces:
- if identifier.startswith(namespace + '.'):
- last_part = parts[-1]
- if not last_part:
- # TODO(robbyw): Handle this: it's a multi-line identifier.
- return None
-
- if last_part in ('apply', 'inherits', 'call'):
- # Calling one of Function's methods usually indicates use of a
- # superclass.
- parts.pop()
- last_part = parts[-1]
-
- for i in xrange(1, len(parts)):
- part = parts[i]
- if part.isupper():
- # If an identifier is of the form foo.bar.BAZ.x or foo.bar.BAZ,
- # the namespace is foo.bar.
- return '.'.join(parts[:i])
- if part == 'prototype':
- # If an identifier is of the form foo.bar.prototype.x, the
- # namespace is foo.bar.
- return '.'.join(parts[:i])
-
- if last_part.isupper() or not last_part[0].isupper():
- # Strip off the last part of an enum or constant reference.
- parts.pop()
-
- return '.'.join(parts)
-
- return None
diff --git a/tools/closure_linter/closure_linter/javascriptstatetracker_test.py b/tools/closure_linter/closure_linter/javascriptstatetracker_test.py
index e4288b7b64..76dabd2c70 100755..100644
--- a/tools/closure_linter/closure_linter/javascriptstatetracker_test.py
+++ b/tools/closure_linter/closure_linter/javascriptstatetracker_test.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python
#
-# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
-#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@@ -14,40 +13,266 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Unit tests for JavaScriptStateTracker."""
+"""Unit tests for the javascriptstatetracker module."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+__author__ = ('nnaze@google.com (Nathan Naze)')
import unittest as googletest
-from closure_linter import javascriptstatetracker
-
-class JavaScriptStateTrackerTest(googletest.TestCase):
-
- __test_cases = {
- 'package.CONSTANT' : 'package',
- 'package.methodName' : 'package',
- 'package.subpackage.methodName' : 'package.subpackage',
- 'package.ClassName.something' : 'package.ClassName',
- 'package.ClassName.Enum.VALUE.methodName' : 'package.ClassName.Enum',
- 'package.ClassName.CONSTANT' : 'package.ClassName',
- 'package.ClassName.inherits' : 'package.ClassName',
- 'package.ClassName.apply' : 'package.ClassName',
- 'package.ClassName.methodName.apply' : 'package.ClassName',
- 'package.ClassName.methodName.call' : 'package.ClassName',
- 'package.ClassName.prototype.methodName' : 'package.ClassName',
- 'package.ClassName.privateMethod_' : None,
- 'package.ClassName.prototype.methodName.apply' : 'package.ClassName'
- }
-
- def testGetClosurizedNamespace(self):
- stateTracker = javascriptstatetracker.JavaScriptStateTracker(['package'])
- for identifier, expected_namespace in self.__test_cases.items():
- actual_namespace = stateTracker.GetClosurizedNamespace(identifier)
- self.assertEqual(expected_namespace, actual_namespace,
- 'expected namespace "' + str(expected_namespace) +
- '" for identifier "' + str(identifier) + '" but was "' +
- str(actual_namespace) + '"')
+
+from closure_linter import javascripttokens
+from closure_linter import testutil
+from closure_linter import tokenutil
+
+
+_FUNCTION_SCRIPT = """\
+var a = 3;
+
+function foo(aaa, bbb, ccc) {
+ var b = 4;
+}
+
+
+/**
+ * JSDoc comment.
+ */
+var bar = function(ddd, eee, fff) {
+
+};
+
+
+/**
+ * Verify that nested functions get their proper parameters recorded.
+ */
+var baz = function(ggg, hhh, iii) {
+ var qux = function(jjj, kkk, lll) {
+ };
+ // make sure that entering a new block does not change baz' parameters.
+ {};
+};
+
+"""
+
+
+class FunctionTest(googletest.TestCase):
+
+ def testFunctionParse(self):
+ functions, _ = testutil.ParseFunctionsAndComments(_FUNCTION_SCRIPT)
+ self.assertEquals(4, len(functions))
+
+ # First function
+ function = functions[0]
+ self.assertEquals(['aaa', 'bbb', 'ccc'], function.parameters)
+
+ start_token = function.start_token
+ end_token = function.end_token
+
+ self.assertEquals(
+ javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
+ function.start_token.type)
+
+ self.assertEquals('function', start_token.string)
+ self.assertEquals(3, start_token.line_number)
+ self.assertEquals(0, start_token.start_index)
+
+ self.assertEquals('}', end_token.string)
+ self.assertEquals(5, end_token.line_number)
+ self.assertEquals(0, end_token.start_index)
+
+ self.assertEquals('foo', function.name)
+
+ self.assertIsNone(function.doc)
+
+ # Second function
+ function = functions[1]
+ self.assertEquals(['ddd', 'eee', 'fff'], function.parameters)
+
+ start_token = function.start_token
+ end_token = function.end_token
+
+ self.assertEquals(
+ javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
+ function.start_token.type)
+
+ self.assertEquals('function', start_token.string)
+ self.assertEquals(11, start_token.line_number)
+ self.assertEquals(10, start_token.start_index)
+
+ self.assertEquals('}', end_token.string)
+ self.assertEquals(13, end_token.line_number)
+ self.assertEquals(0, end_token.start_index)
+
+ self.assertEquals('bar', function.name)
+
+ self.assertIsNotNone(function.doc)
+
+ # Check function JSDoc
+ doc = function.doc
+ doc_tokens = tokenutil.GetTokenRange(doc.start_token, doc.end_token)
+
+ comment_type = javascripttokens.JavaScriptTokenType.COMMENT
+ comment_tokens = filter(lambda t: t.type is comment_type, doc_tokens)
+
+ self.assertEquals('JSDoc comment.',
+ tokenutil.TokensToString(comment_tokens).strip())
+
+ # Third function
+ function = functions[2]
+ self.assertEquals(['ggg', 'hhh', 'iii'], function.parameters)
+
+ start_token = function.start_token
+ end_token = function.end_token
+
+ self.assertEquals(
+ javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
+ function.start_token.type)
+
+ self.assertEquals('function', start_token.string)
+ self.assertEquals(19, start_token.line_number)
+ self.assertEquals(10, start_token.start_index)
+
+ self.assertEquals('}', end_token.string)
+ self.assertEquals(24, end_token.line_number)
+ self.assertEquals(0, end_token.start_index)
+
+ self.assertEquals('baz', function.name)
+ self.assertIsNotNone(function.doc)
+
+ # Fourth function (inside third function)
+ function = functions[3]
+ self.assertEquals(['jjj', 'kkk', 'lll'], function.parameters)
+
+ start_token = function.start_token
+ end_token = function.end_token
+
+ self.assertEquals(
+ javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
+ function.start_token.type)
+
+ self.assertEquals('function', start_token.string)
+ self.assertEquals(20, start_token.line_number)
+ self.assertEquals(12, start_token.start_index)
+
+ self.assertEquals('}', end_token.string)
+ self.assertEquals(21, end_token.line_number)
+ self.assertEquals(2, end_token.start_index)
+
+ self.assertEquals('qux', function.name)
+ self.assertIsNone(function.doc)
+
+
+
+class CommentTest(googletest.TestCase):
+
+ def testGetDescription(self):
+ comment = self._ParseComment("""
+ /**
+ * Comment targeting goog.foo.
+ *
+ * This is the second line.
+ * @param {number} foo The count of foo.
+ */
+ target;""")
+
+ self.assertEqual(
+ 'Comment targeting goog.foo.\n\nThis is the second line.',
+ comment.description)
+
+ def testCommentGetTarget(self):
+ self.assertCommentTarget('goog.foo', """
+ /**
+ * Comment targeting goog.foo.
+ */
+ goog.foo = 6;
+ """)
+
+ self.assertCommentTarget('bar', """
+ /**
+ * Comment targeting bar.
+ */
+ var bar = "Karate!";
+ """)
+
+ self.assertCommentTarget('doThing', """
+ /**
+ * Comment targeting doThing.
+ */
+ function doThing() {};
+ """)
+
+ self.assertCommentTarget('this.targetProperty', """
+ goog.bar.Baz = function() {
+ /**
+ * Comment targeting targetProperty.
+ */
+ this.targetProperty = 3;
+ };
+ """)
+
+ self.assertCommentTarget('goog.bar.prop', """
+ /**
+ * Comment targeting goog.bar.prop.
+ */
+ goog.bar.prop;
+ """)
+
+ self.assertCommentTarget('goog.aaa.bbb', """
+ /**
+ * Comment targeting goog.aaa.bbb.
+ */
+ (goog.aaa.bbb)
+ """)
+
+ self.assertCommentTarget('theTarget', """
+ /**
+ * Comment targeting symbol preceded by newlines, whitespace,
+ * and parens -- things we ignore.
+ */
+ (theTarget)
+ """)
+
+ self.assertCommentTarget(None, """
+ /**
+ * @fileoverview File overview.
+ */
+ (notATarget)
+ """)
+
+ self.assertCommentTarget(None, """
+ /**
+ * Comment that doesn't find a target.
+ */
+ """)
+
+ self.assertCommentTarget('theTarget.is.split.across.lines', """
+ /**
+ * Comment that addresses a symbol split across lines.
+ */
+ (theTarget.is.split
+ .across.lines)
+ """)
+
+ self.assertCommentTarget('theTarget.is.split.across.lines', """
+ /**
+ * Comment that addresses a symbol split across lines.
+ */
+ (theTarget.is.split.
+ across.lines)
+ """)
+
+ def _ParseComment(self, script):
+ """Parse a script that contains one comment and return it."""
+ _, comments = testutil.ParseFunctionsAndComments(script)
+ self.assertEquals(1, len(comments))
+ return comments[0]
+
+ def assertCommentTarget(self, target, script):
+ comment = self._ParseComment(script)
+ self.assertEquals(target, comment.GetTargetIdentifier())
+
if __name__ == '__main__':
googletest.main()
-
diff --git a/tools/closure_linter/closure_linter/javascripttokenizer.py b/tools/closure_linter/closure_linter/javascripttokenizer.py
index 097d3fd116..2ee5b81ee1 100755
--- a/tools/closure_linter/closure_linter/javascripttokenizer.py
+++ b/tools/closure_linter/closure_linter/javascripttokenizer.py
@@ -51,7 +51,7 @@ class JavaScriptTokenizer(tokenizer.Tokenizer):
"""
# Useful patterns for JavaScript parsing.
- IDENTIFIER_CHAR = r'A-Za-z0-9_$.';
+ IDENTIFIER_CHAR = r'A-Za-z0-9_$'
# Number patterns based on:
# http://www.mozilla.org/js/language/js20-2000-07/formal/lexer-grammar.html
@@ -92,6 +92,9 @@ class JavaScriptTokenizer(tokenizer.Tokenizer):
# like in email addresses in the @author tag.
DOC_COMMENT_TEXT = re.compile(r'([^*{}\s]@|[^*{}@]|\*(?!/))+')
DOC_COMMENT_NO_SPACES_TEXT = re.compile(r'([^*{}\s]@|[^*{}@\s]|\*(?!/))+')
+ # Match anything that is allowed in a type definition, except for tokens
+ # needed to parse it (and the lookahead assertion for "*/").
+ DOC_COMMENT_TYPE_TEXT = re.compile(r'([^*|!?=<>(){}:,\s]|\*(?!/))+')
# Match the prefix ' * ' that starts every line of jsdoc. Want to include
# spaces after the '*', but nothing else that occurs after a '*', and don't
@@ -141,13 +144,25 @@ class JavaScriptTokenizer(tokenizer.Tokenizer):
# delete, in, instanceof, new, typeof - included as operators.
# this - included in identifiers.
# null, undefined - not included, should go in some "special constant" list.
- KEYWORD_LIST = ['break', 'case', 'catch', 'continue', 'default', 'do', 'else',
- 'finally', 'for', 'if', 'return', 'switch', 'throw', 'try', 'var',
- 'while', 'with']
- # Match a keyword string followed by a non-identifier character in order to
- # not match something like doSomething as do + Something.
- KEYWORD = re.compile('(%s)((?=[^%s])|$)' % (
- '|'.join(KEYWORD_LIST), IDENTIFIER_CHAR))
+ KEYWORD_LIST = [
+ 'break',
+ 'case',
+ 'catch',
+ 'continue',
+ 'default',
+ 'do',
+ 'else',
+ 'finally',
+ 'for',
+ 'if',
+ 'return',
+ 'switch',
+ 'throw',
+ 'try',
+ 'var',
+ 'while',
+ 'with',
+ ]
# List of regular expressions to match as operators. Some notes: for our
# purposes, the comma behaves similarly enough to a normal operator that we
@@ -155,19 +170,62 @@ class JavaScriptTokenizer(tokenizer.Tokenizer):
# characters - this may not match some very esoteric uses of the in operator.
# Operators that are subsets of larger operators must come later in this list
# for proper matching, e.g., '>>' must come AFTER '>>>'.
- OPERATOR_LIST = [',', r'\+\+', '===', '!==', '>>>=', '>>>', '==', '>=', '<=',
- '!=', '<<=', '>>=', '<<', '>>', '>', '<', r'\+=', r'\+',
- '--', '\^=', '-=', '-', '/=', '/', r'\*=', r'\*', '%=', '%',
- '&&', r'\|\|', '&=', '&', r'\|=', r'\|', '=', '!', ':', '\?',
- r'\bdelete\b', r'\bin\b', r'\binstanceof\b', r'\bnew\b',
- r'\btypeof\b', r'\bvoid\b']
+ OPERATOR_LIST = [
+ ',',
+ r'\+\+',
+ '===',
+ '!==',
+ '>>>=',
+ '>>>',
+ '==',
+ '>=',
+ '<=',
+ '!=',
+ '<<=',
+ '>>=',
+ '<<',
+ '>>',
+ '=>',
+ '>',
+ '<',
+ r'\+=',
+ r'\+',
+ '--',
+ r'\^=',
+ '-=',
+ '-',
+ '/=',
+ '/',
+ r'\*=',
+ r'\*',
+ '%=',
+ '%',
+ '&&',
+ r'\|\|',
+ '&=',
+ '&',
+ r'\|=',
+ r'\|',
+ '=',
+ '!',
+ ':',
+ r'\?',
+ r'\^',
+ r'\bdelete\b',
+ r'\bin\b',
+ r'\binstanceof\b',
+ r'\bnew\b',
+ r'\btypeof\b',
+ r'\bvoid\b',
+ r'\.',
+ ]
OPERATOR = re.compile('|'.join(OPERATOR_LIST))
WHITESPACE = re.compile(r'\s+')
SEMICOLON = re.compile(r';')
# Technically JavaScript identifiers can't contain '.', but we treat a set of
- # nested identifiers as a single identifier.
- NESTED_IDENTIFIER = r'[a-zA-Z_$][%s.]*' % IDENTIFIER_CHAR
+ # nested identifiers as a single identifier, except for trailing dots.
+ NESTED_IDENTIFIER = r'[a-zA-Z_$]([%s]|\.[a-zA-Z_$])*' % IDENTIFIER_CHAR
IDENTIFIER = re.compile(NESTED_IDENTIFIER)
SIMPLE_LVALUE = re.compile(r"""
@@ -181,13 +239,33 @@ class JavaScriptTokenizer(tokenizer.Tokenizer):
# beginning of the line, after whitespace, or after a '{'. The look-behind
# check is necessary to not match someone@google.com as a flag.
DOC_FLAG = re.compile(r'(^|(?<=\s))@(?P<name>[a-zA-Z]+)')
- # To properly parse parameter names, we need to tokenize whitespace into a
- # token.
- DOC_FLAG_LEX_SPACES = re.compile(r'(^|(?<=\s))@(?P<name>%s)\b' %
- '|'.join(['param']))
+ # To properly parse parameter names and complex doctypes containing
+ # whitespace, we need to tokenize whitespace into a token after certain
+ # doctags. All statetracker.HAS_TYPE that are not listed here must not contain
+ # any whitespace in their types.
+ DOC_FLAG_LEX_SPACES = re.compile(
+ r'(^|(?<=\s))@(?P<name>%s)\b' %
+ '|'.join([
+ 'const',
+ 'enum',
+ 'extends',
+ 'final',
+ 'implements',
+ 'param',
+ 'private',
+ 'protected',
+ 'public',
+ 'return',
+ 'type',
+ 'typedef'
+ ]))
DOC_INLINE_FLAG = re.compile(r'(?<={)@(?P<name>[a-zA-Z]+)')
+ DOC_TYPE_BLOCK_START = re.compile(r'[<(]')
+ DOC_TYPE_BLOCK_END = re.compile(r'[>)]')
+ DOC_TYPE_MODIFIERS = re.compile(r'[!?|,:=]')
+
# Star followed by non-slash, i.e a star that does not end a comment.
# This is used for TYPE_GROUP below.
SAFE_STAR = r'(\*(?!/))'
@@ -201,145 +279,165 @@ class JavaScriptTokenizer(tokenizer.Tokenizer):
Matcher(DOC_INLINE_FLAG, Type.DOC_INLINE_FLAG),
Matcher(DOC_FLAG_LEX_SPACES, Type.DOC_FLAG,
JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE),
- Matcher(DOC_FLAG, Type.DOC_FLAG),
+
+ # Encountering a doc flag should leave lex spaces mode.
+ Matcher(DOC_FLAG, Type.DOC_FLAG, JavaScriptModes.DOC_COMMENT_MODE),
# Tokenize braces so we can find types.
Matcher(START_BLOCK, Type.DOC_START_BRACE),
Matcher(END_BLOCK, Type.DOC_END_BRACE),
- Matcher(DOC_PREFIX, Type.DOC_PREFIX, None, True)]
-
-
- # The token matcher groups work as follows: it is an list of Matcher objects.
- # The matchers will be tried in this order, and the first to match will be
- # returned. Hence the order is important because the matchers that come first
- # overrule the matchers that come later.
- JAVASCRIPT_MATCHERS = {
- # Matchers for basic text mode.
- JavaScriptModes.TEXT_MODE: [
- # Check a big group - strings, starting comments, and regexes - all
- # of which could be intertwined. 'string with /regex/',
- # /regex with 'string'/, /* comment with /regex/ and string */ (and so on)
- Matcher(START_DOC_COMMENT, Type.START_DOC_COMMENT,
- JavaScriptModes.DOC_COMMENT_MODE),
- Matcher(START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT,
- JavaScriptModes.BLOCK_COMMENT_MODE),
- Matcher(END_OF_LINE_SINGLE_LINE_COMMENT,
- Type.START_SINGLE_LINE_COMMENT),
- Matcher(START_SINGLE_LINE_COMMENT, Type.START_SINGLE_LINE_COMMENT,
- JavaScriptModes.LINE_COMMENT_MODE),
- Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START,
- JavaScriptModes.SINGLE_QUOTE_STRING_MODE),
- Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START,
- JavaScriptModes.DOUBLE_QUOTE_STRING_MODE),
- Matcher(REGEX, Type.REGEX),
-
- # Next we check for start blocks appearing outside any of the items above.
- Matcher(START_BLOCK, Type.START_BLOCK),
- Matcher(END_BLOCK, Type.END_BLOCK),
-
- # Then we search for function declarations.
- Matcher(FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION,
- JavaScriptModes.FUNCTION_MODE),
-
- # Next, we convert non-function related parens to tokens.
- Matcher(OPENING_PAREN, Type.START_PAREN),
- Matcher(CLOSING_PAREN, Type.END_PAREN),
-
- # Next, we convert brackets to tokens.
- Matcher(OPENING_BRACKET, Type.START_BRACKET),
- Matcher(CLOSING_BRACKET, Type.END_BRACKET),
-
- # Find numbers. This has to happen before operators because scientific
- # notation numbers can have + and - in them.
- Matcher(NUMBER, Type.NUMBER),
-
- # Find operators and simple assignments
- Matcher(SIMPLE_LVALUE, Type.SIMPLE_LVALUE),
- Matcher(OPERATOR, Type.OPERATOR),
-
- # Find key words and whitespace
- Matcher(KEYWORD, Type.KEYWORD),
- Matcher(WHITESPACE, Type.WHITESPACE),
-
- # Find identifiers
- Matcher(IDENTIFIER, Type.IDENTIFIER),
-
- # Finally, we convert semicolons to tokens.
- Matcher(SEMICOLON, Type.SEMICOLON)],
-
-
- # Matchers for single quote strings.
- JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [
- Matcher(SINGLE_QUOTE_TEXT, Type.STRING_TEXT),
- Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END,
- JavaScriptModes.TEXT_MODE)],
-
-
- # Matchers for double quote strings.
- JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [
- Matcher(DOUBLE_QUOTE_TEXT, Type.STRING_TEXT),
- Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END,
- JavaScriptModes.TEXT_MODE)],
-
-
- # Matchers for block comments.
- JavaScriptModes.BLOCK_COMMENT_MODE: [
- # First we check for exiting a block comment.
- Matcher(END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT,
- JavaScriptModes.TEXT_MODE),
-
- # Match non-comment-ending text..
- Matcher(BLOCK_COMMENT_TEXT, Type.COMMENT)],
-
-
- # Matchers for doc comments.
- JavaScriptModes.DOC_COMMENT_MODE: COMMON_DOC_MATCHERS + [
- Matcher(DOC_COMMENT_TEXT, Type.COMMENT)],
- JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: COMMON_DOC_MATCHERS + [
- Matcher(WHITESPACE, Type.COMMENT),
- Matcher(DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)],
+ # And some more to parse types.
+ Matcher(DOC_TYPE_BLOCK_START, Type.DOC_TYPE_START_BLOCK),
+ Matcher(DOC_TYPE_BLOCK_END, Type.DOC_TYPE_END_BLOCK),
- # Matchers for single line comments.
- JavaScriptModes.LINE_COMMENT_MODE: [
- # We greedy match until the end of the line in line comment mode.
- Matcher(ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)],
-
-
- # Matchers for code after the function keyword.
- JavaScriptModes.FUNCTION_MODE: [
- # Must match open paren before anything else and move into parameter mode,
- # otherwise everything inside the parameter list is parsed incorrectly.
- Matcher(OPENING_PAREN, Type.START_PARAMETERS,
- JavaScriptModes.PARAMETER_MODE),
- Matcher(WHITESPACE, Type.WHITESPACE),
- Matcher(IDENTIFIER, Type.FUNCTION_NAME)],
-
-
- # Matchers for function parameters
- JavaScriptModes.PARAMETER_MODE: [
- # When in function parameter mode, a closing paren is treated specially.
- # Everything else is treated as lines of parameters.
- Matcher(CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS,
- JavaScriptModes.TEXT_MODE),
- Matcher(PARAMETERS, Type.PARAMETERS, JavaScriptModes.PARAMETER_MODE)]}
+ Matcher(DOC_TYPE_MODIFIERS, Type.DOC_TYPE_MODIFIER),
+ Matcher(DOC_COMMENT_TYPE_TEXT, Type.COMMENT),
+ Matcher(DOC_PREFIX, Type.DOC_PREFIX, None, True)]
# When text is not matched, it is given this default type based on mode.
# If unspecified in this map, the default default is Type.NORMAL.
JAVASCRIPT_DEFAULT_TYPES = {
- JavaScriptModes.DOC_COMMENT_MODE: Type.COMMENT,
- JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: Type.COMMENT
+ JavaScriptModes.DOC_COMMENT_MODE: Type.COMMENT,
+ JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: Type.COMMENT
}
- def __init__(self, parse_js_doc = True):
+ @classmethod
+ def BuildMatchers(cls):
+ """Builds the token matcher group.
+
+ The token matcher groups work as follows: it is a list of Matcher objects.
+ The matchers will be tried in this order, and the first to match will be
+ returned. Hence the order is important because the matchers that come first
+ overrule the matchers that come later.
+
+ Returns:
+ The completed token matcher group.
+ """
+ # Match a keyword string followed by a non-identifier character in order to
+ # not match something like doSomething as do + Something.
+ keyword = re.compile('(%s)((?=[^%s])|$)' % (
+ '|'.join(cls.KEYWORD_LIST), cls.IDENTIFIER_CHAR))
+ return {
+
+ # Matchers for basic text mode.
+ JavaScriptModes.TEXT_MODE: [
+ # Check a big group - strings, starting comments, and regexes - all
+ # of which could be intertwined. 'string with /regex/',
+ # /regex with 'string'/, /* comment with /regex/ and string */ (and
+ # so on)
+ Matcher(cls.START_DOC_COMMENT, Type.START_DOC_COMMENT,
+ JavaScriptModes.DOC_COMMENT_MODE),
+ Matcher(cls.START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT,
+ JavaScriptModes.BLOCK_COMMENT_MODE),
+ Matcher(cls.END_OF_LINE_SINGLE_LINE_COMMENT,
+ Type.START_SINGLE_LINE_COMMENT),
+ Matcher(cls.START_SINGLE_LINE_COMMENT,
+ Type.START_SINGLE_LINE_COMMENT,
+ JavaScriptModes.LINE_COMMENT_MODE),
+ Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START,
+ JavaScriptModes.SINGLE_QUOTE_STRING_MODE),
+ Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START,
+ JavaScriptModes.DOUBLE_QUOTE_STRING_MODE),
+ Matcher(cls.REGEX, Type.REGEX),
+
+ # Next we check for start blocks appearing outside any of the items
+ # above.
+ Matcher(cls.START_BLOCK, Type.START_BLOCK),
+ Matcher(cls.END_BLOCK, Type.END_BLOCK),
+
+ # Then we search for function declarations.
+ Matcher(cls.FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION,
+ JavaScriptModes.FUNCTION_MODE),
+
+ # Next, we convert non-function related parens to tokens.
+ Matcher(cls.OPENING_PAREN, Type.START_PAREN),
+ Matcher(cls.CLOSING_PAREN, Type.END_PAREN),
+
+ # Next, we convert brackets to tokens.
+ Matcher(cls.OPENING_BRACKET, Type.START_BRACKET),
+ Matcher(cls.CLOSING_BRACKET, Type.END_BRACKET),
+
+ # Find numbers. This has to happen before operators because
+ # scientific notation numbers can have + and - in them.
+ Matcher(cls.NUMBER, Type.NUMBER),
+
+ # Find operators and simple assignments
+ Matcher(cls.SIMPLE_LVALUE, Type.SIMPLE_LVALUE),
+ Matcher(cls.OPERATOR, Type.OPERATOR),
+
+ # Find key words and whitespace.
+ Matcher(keyword, Type.KEYWORD),
+ Matcher(cls.WHITESPACE, Type.WHITESPACE),
+
+ # Find identifiers.
+ Matcher(cls.IDENTIFIER, Type.IDENTIFIER),
+
+ # Finally, we convert semicolons to tokens.
+ Matcher(cls.SEMICOLON, Type.SEMICOLON)],
+
+ # Matchers for single quote strings.
+ JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [
+ Matcher(cls.SINGLE_QUOTE_TEXT, Type.STRING_TEXT),
+ Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END,
+ JavaScriptModes.TEXT_MODE)],
+
+ # Matchers for double quote strings.
+ JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [
+ Matcher(cls.DOUBLE_QUOTE_TEXT, Type.STRING_TEXT),
+ Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END,
+ JavaScriptModes.TEXT_MODE)],
+
+ # Matchers for block comments.
+ JavaScriptModes.BLOCK_COMMENT_MODE: [
+ # First we check for exiting a block comment.
+ Matcher(cls.END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT,
+ JavaScriptModes.TEXT_MODE),
+
+ # Match non-comment-ending text..
+ Matcher(cls.BLOCK_COMMENT_TEXT, Type.COMMENT)],
+
+ # Matchers for doc comments.
+ JavaScriptModes.DOC_COMMENT_MODE: cls.COMMON_DOC_MATCHERS + [
+ Matcher(cls.DOC_COMMENT_TEXT, Type.COMMENT)],
+
+ JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: cls.COMMON_DOC_MATCHERS + [
+ Matcher(cls.WHITESPACE, Type.COMMENT),
+ Matcher(cls.DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)],
+
+ # Matchers for single line comments.
+ JavaScriptModes.LINE_COMMENT_MODE: [
+ # We greedy match until the end of the line in line comment mode.
+ Matcher(cls.ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)],
+
+ # Matchers for code after the function keyword.
+ JavaScriptModes.FUNCTION_MODE: [
+ # Must match open paren before anything else and move into parameter
+ # mode, otherwise everything inside the parameter list is parsed
+ # incorrectly.
+ Matcher(cls.OPENING_PAREN, Type.START_PARAMETERS,
+ JavaScriptModes.PARAMETER_MODE),
+ Matcher(cls.WHITESPACE, Type.WHITESPACE),
+ Matcher(cls.IDENTIFIER, Type.FUNCTION_NAME)],
+
+ # Matchers for function parameters
+ JavaScriptModes.PARAMETER_MODE: [
+ # When in function parameter mode, a closing paren is treated
+ # specially. Everything else is treated as lines of parameters.
+ Matcher(cls.CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS,
+ JavaScriptModes.TEXT_MODE),
+ Matcher(cls.PARAMETERS, Type.PARAMETERS,
+ JavaScriptModes.PARAMETER_MODE)]}
+
+ def __init__(self, parse_js_doc=True):
"""Create a tokenizer object.
Args:
parse_js_doc: Whether to do detailed parsing of javascript doc comments,
or simply treat them as normal comments. Defaults to parsing JsDoc.
"""
- matchers = self.JAVASCRIPT_MATCHERS
+ matchers = self.BuildMatchers()
if not parse_js_doc:
# Make a copy so the original doesn't get modified.
matchers = copy.deepcopy(matchers)
@@ -362,4 +460,4 @@ class JavaScriptTokenizer(tokenizer.Tokenizer):
name of the function.
"""
return javascripttokens.JavaScriptToken(string, token_type, line,
- line_number, values)
+ line_number, values, line_number)
diff --git a/tools/closure_linter/closure_linter/javascripttokens.py b/tools/closure_linter/closure_linter/javascripttokens.py
index f46d4e17bc..f5815d2bf8 100755
--- a/tools/closure_linter/closure_linter/javascripttokens.py
+++ b/tools/closure_linter/closure_linter/javascripttokens.py
@@ -53,6 +53,9 @@ class JavaScriptTokenType(tokens.TokenType):
DOC_START_BRACE = 'doc {'
DOC_END_BRACE = 'doc }'
DOC_PREFIX = 'comment prefix: * '
+ DOC_TYPE_START_BLOCK = 'Type <'
+ DOC_TYPE_END_BLOCK = 'Type >'
+ DOC_TYPE_MODIFIER = 'modifier'
SIMPLE_LVALUE = 'lvalue='
KEYWORD = 'keyword'
OPERATOR = 'operator'
@@ -62,14 +65,17 @@ class JavaScriptTokenType(tokens.TokenType):
SINGLE_QUOTE_STRING_START, SINGLE_QUOTE_STRING_END,
DOUBLE_QUOTE_STRING_START, DOUBLE_QUOTE_STRING_END, STRING_TEXT])
- COMMENT_TYPES = frozenset([START_SINGLE_LINE_COMMENT, COMMENT,
+ COMMENT_TYPES = frozenset([
+ START_SINGLE_LINE_COMMENT, COMMENT,
START_BLOCK_COMMENT, START_DOC_COMMENT,
END_BLOCK_COMMENT, END_DOC_COMMENT,
DOC_START_BRACE, DOC_END_BRACE,
- DOC_FLAG, DOC_INLINE_FLAG, DOC_PREFIX])
+ DOC_FLAG, DOC_INLINE_FLAG, DOC_PREFIX,
+ DOC_TYPE_START_BLOCK, DOC_TYPE_END_BLOCK, DOC_TYPE_MODIFIER])
FLAG_DESCRIPTION_TYPES = frozenset([
- DOC_INLINE_FLAG, COMMENT, DOC_START_BRACE, DOC_END_BRACE])
+ DOC_INLINE_FLAG, COMMENT, DOC_START_BRACE, DOC_END_BRACE,
+ DOC_TYPE_START_BLOCK, DOC_TYPE_END_BLOCK, DOC_TYPE_MODIFIER])
FLAG_ENDING_TYPES = frozenset([DOC_FLAG, END_DOC_COMMENT])
diff --git a/tools/closure_linter/closure_linter/not_strict_test.py b/tools/closure_linter/closure_linter/not_strict_test.py
new file mode 100755
index 0000000000..c92c13ee03
--- /dev/null
+++ b/tools/closure_linter/closure_linter/not_strict_test.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for gjslint --nostrict.
+
+Tests errors that can be thrown by gjslint when not in strict mode.
+"""
+
+
+
+import os
+import sys
+import unittest
+
+import gflags as flags
+import unittest as googletest
+
+from closure_linter import errors
+from closure_linter import runner
+from closure_linter.common import filetestcase
+
+_RESOURCE_PREFIX = 'closure_linter/testdata'
+
+flags.FLAGS.strict = False
+flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
+flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
+flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
+ 'limited_doc_checks.js')
+
+
+# List of files under testdata to test.
+# We need to list files explicitly since pyglib can't list directories.
+_TEST_FILES = [
+ 'not_strict.js'
+ ]
+
+
+class GJsLintTestSuite(unittest.TestSuite):
+ """Test suite to run a GJsLintTest for each of several files.
+
+ If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
+ testdata to test. Otherwise, _TEST_FILES is used.
+ """
+
+ def __init__(self, tests=()):
+ unittest.TestSuite.__init__(self, tests)
+
+ argv = sys.argv and sys.argv[1:] or []
+ if argv:
+ test_files = argv
+ else:
+ test_files = _TEST_FILES
+ for test_file in test_files:
+ resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
+ self.addTest(filetestcase.AnnotatedFileTestCase(resource_path,
+ runner.Run,
+ errors.ByName))
+
+if __name__ == '__main__':
+ # Don't let main parse args; it happens in the TestSuite.
+ googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')
diff --git a/tools/closure_linter/closure_linter/requireprovidesorter.py b/tools/closure_linter/closure_linter/requireprovidesorter.py
new file mode 100755
index 0000000000..e7e08a13c2
--- /dev/null
+++ b/tools/closure_linter/closure_linter/requireprovidesorter.py
@@ -0,0 +1,329 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Contains logic for sorting goog.provide and goog.require statements.
+
+Closurized JavaScript files use goog.provide and goog.require statements at the
+top of the file to manage dependencies. These statements should be sorted
+alphabetically, however, it is common for them to be accompanied by inline
+comments or suppression annotations. In order to sort these statements without
+disrupting their comments and annotations, the association between statements
+and comments/annotations must be maintained while sorting.
+
+ RequireProvideSorter: Handles checking/fixing of provide/require statements.
+"""
+
+
+
+from closure_linter import javascripttokens
+from closure_linter import tokenutil
+
+# Shorthand
+Type = javascripttokens.JavaScriptTokenType
+
+
+class RequireProvideSorter(object):
+ """Checks for and fixes alphabetization of provide and require statements.
+
+ When alphabetizing, comments on the same line or comments directly above a
+ goog.provide or goog.require statement are associated with that statement and
+ stay with the statement as it gets sorted.
+ """
+
+ def CheckProvides(self, token):
+ """Checks alphabetization of goog.provide statements.
+
+ Iterates over tokens in given token stream, identifies goog.provide tokens,
+ and checks that they occur in alphabetical order by the object being
+ provided.
+
+ Args:
+ token: A token in the token stream before any goog.provide tokens.
+
+ Returns:
+ The first provide token in the token stream.
+
+ None is returned if all goog.provide statements are already sorted.
+ """
+ provide_tokens = self._GetRequireOrProvideTokens(token, 'goog.provide')
+ provide_strings = self._GetRequireOrProvideTokenStrings(provide_tokens)
+ sorted_provide_strings = sorted(provide_strings)
+ if provide_strings != sorted_provide_strings:
+ return provide_tokens[0]
+ return None
+
+ def CheckRequires(self, token):
+ """Checks alphabetization of goog.require statements.
+
+ Iterates over tokens in given token stream, identifies goog.require tokens,
+ and checks that they occur in alphabetical order by the dependency being
+ required.
+
+ Args:
+ token: A token in the token stream before any goog.require tokens.
+
+ Returns:
+ The first require token in the token stream.
+
+ None is returned if all goog.require statements are already sorted.
+ """
+ require_tokens = self._GetRequireOrProvideTokens(token, 'goog.require')
+ require_strings = self._GetRequireOrProvideTokenStrings(require_tokens)
+ sorted_require_strings = sorted(require_strings)
+ if require_strings != sorted_require_strings:
+ return require_tokens[0]
+ return None
+
+ def FixProvides(self, token):
+ """Sorts goog.provide statements in the given token stream alphabetically.
+
+ Args:
+ token: The first token in the token stream.
+ """
+ self._FixProvidesOrRequires(
+ self._GetRequireOrProvideTokens(token, 'goog.provide'))
+
+ def FixRequires(self, token):
+ """Sorts goog.require statements in the given token stream alphabetically.
+
+ Args:
+ token: The first token in the token stream.
+ """
+ self._FixProvidesOrRequires(
+ self._GetRequireOrProvideTokens(token, 'goog.require'))
+
+ def _FixProvidesOrRequires(self, tokens):
+ """Sorts goog.provide or goog.require statements.
+
+ Args:
+ tokens: A list of goog.provide or goog.require tokens in the order they
+ appear in the token stream. i.e. the first token in this list must
+ be the first goog.provide or goog.require token.
+ """
+ strings = self._GetRequireOrProvideTokenStrings(tokens)
+ sorted_strings = sorted(strings)
+
+ # Make a separate pass to remove any blank lines between goog.require/
+ # goog.provide tokens.
+ first_token = tokens[0]
+ last_token = tokens[-1]
+ i = last_token
+ while i != first_token and i is not None:
+ if i.type is Type.BLANK_LINE:
+ tokenutil.DeleteToken(i)
+ i = i.previous
+
+ # A map from required/provided object name to tokens that make up the line
+ # it was on, including any comments immediately before it or after it on the
+ # same line.
+ tokens_map = self._GetTokensMap(tokens)
+
+ # Iterate over the map removing all tokens.
+ for name in tokens_map:
+ tokens_to_delete = tokens_map[name]
+ for i in tokens_to_delete:
+ tokenutil.DeleteToken(i)
+
+ # Save token to rest of file. Sorted token will be inserted before this.
+ rest_of_file = tokens_map[strings[-1]][-1].next
+
+ # Re-add all tokens in the map in alphabetical order.
+ insert_after = tokens[0].previous
+ for string in sorted_strings:
+ for i in tokens_map[string]:
+ if rest_of_file:
+ tokenutil.InsertTokenBefore(i, rest_of_file)
+ else:
+ tokenutil.InsertTokenAfter(i, insert_after)
+ insert_after = i
+
+ def _GetRequireOrProvideTokens(self, token, token_string):
+ """Gets all goog.provide or goog.require tokens in the given token stream.
+
+ Args:
+ token: The first token in the token stream.
+ token_string: One of 'goog.provide' or 'goog.require' to indicate which
+ tokens to find.
+
+ Returns:
+ A list of goog.provide or goog.require tokens in the order they appear in
+ the token stream.
+ """
+ tokens = []
+ while token:
+ if token.type == Type.IDENTIFIER:
+ if token.string == token_string:
+ tokens.append(token)
+ elif token.string not in [
+ 'goog.provide', 'goog.require', 'goog.setTestOnly']:
+ # These 3 identifiers are at the top of the file. So if any other
+ # identifier is encountered, return.
+ # TODO(user): Once it's decided what ordering goog.require
+ # should use, add 'goog.module' to the list above and implement the
+ # decision.
+ break
+ token = token.next
+
+ return tokens
+
+ def _GetRequireOrProvideTokenStrings(self, tokens):
+ """Gets a list of strings corresponding to the given list of tokens.
+
+ The string will be the next string in the token stream after each token in
+ tokens. This is used to find the object being provided/required by a given
+ goog.provide or goog.require token.
+
+ Args:
+ tokens: A list of goog.provide or goog.require tokens.
+
+ Returns:
+ A list of object names that are being provided or required by the given
+ list of tokens. For example:
+
+ ['object.a', 'object.c', 'object.b']
+ """
+ token_strings = []
+ for token in tokens:
+ if not token.is_deleted:
+ name = tokenutil.GetStringAfterToken(token)
+ token_strings.append(name)
+ return token_strings
+
+ def _GetTokensMap(self, tokens):
+ """Gets a map from object name to tokens associated with that object.
+
+ Starting from the goog.provide/goog.require token, searches backwards in the
+ token stream for any lines that start with a comment. These lines are
+ associated with the goog.provide/goog.require token. Also associates any
+ tokens on the same line as the goog.provide/goog.require token with that
+ token.
+
+ Args:
+ tokens: A list of goog.provide or goog.require tokens.
+
+ Returns:
+ A dictionary that maps object names to the tokens associated with the
+ goog.provide or goog.require of that object name. For example:
+
+ {
+ 'object.a': [JavaScriptToken, JavaScriptToken, ...],
+ 'object.b': [...]
+ }
+
+ The list of tokens includes any comment lines above the goog.provide or
+ goog.require statement and everything after the statement on the same
+ line. For example, all of the following would be associated with
+ 'object.a':
+
+ /** @suppress {extraRequire} */
+ goog.require('object.a'); // Some comment.
+ """
+ tokens_map = {}
+ for token in tokens:
+ object_name = tokenutil.GetStringAfterToken(token)
+ # If the previous line starts with a comment, presume that the comment
+ # relates to the goog.require or goog.provide and keep them together when
+ # sorting.
+ first_token = token
+ previous_first_token = tokenutil.GetFirstTokenInPreviousLine(first_token)
+ while (previous_first_token and
+ previous_first_token.IsAnyType(Type.COMMENT_TYPES)):
+ first_token = previous_first_token
+ previous_first_token = tokenutil.GetFirstTokenInPreviousLine(
+ first_token)
+
+ # Find the last token on the line.
+ last_token = tokenutil.GetLastTokenInSameLine(token)
+
+ all_tokens = self._GetTokenList(first_token, last_token)
+ tokens_map[object_name] = all_tokens
+ return tokens_map
+
+ def _GetTokenList(self, first_token, last_token):
+ """Gets a list of all tokens from first_token to last_token, inclusive.
+
+ Args:
+ first_token: The first token to get.
+ last_token: The last token to get.
+
+ Returns:
+ A list of all tokens between first_token and last_token, including both
+ first_token and last_token.
+
+ Raises:
+ Exception: If the token stream ends before last_token is reached.
+ """
+ token_list = []
+ token = first_token
+ while token != last_token:
+ if not token:
+ raise Exception('ran out of tokens')
+ token_list.append(token)
+ token = token.next
+ token_list.append(last_token)
+
+ return token_list
+
+ def GetFixedRequireString(self, token):
+ """Get fixed/sorted order of goog.require statements.
+
+ Args:
+ token: The first token in the token stream.
+
+ Returns:
+ A string for correct sorted order of goog.require.
+ """
+ return self._GetFixedRequireOrProvideString(
+ self._GetRequireOrProvideTokens(token, 'goog.require'))
+
+ def GetFixedProvideString(self, token):
+ """Get fixed/sorted order of goog.provide statements.
+
+ Args:
+ token: The first token in the token stream.
+
+ Returns:
+ A string for correct sorted order of goog.provide.
+ """
+ return self._GetFixedRequireOrProvideString(
+ self._GetRequireOrProvideTokens(token, 'goog.provide'))
+
+ def _GetFixedRequireOrProvideString(self, tokens):
+ """Sorts goog.provide or goog.require statements.
+
+ Args:
+ tokens: A list of goog.provide or goog.require tokens in the order they
+ appear in the token stream. i.e. the first token in this list must
+ be the first goog.provide or goog.require token.
+
+ Returns:
+ A string for sorted goog.require or goog.provide statements
+ """
+
+ # A map from required/provided object name to tokens that make up the line
+ # it was on, including any comments immediately before it or after it on the
+ # same line.
+ tokens_map = self._GetTokensMap(tokens)
+ sorted_strings = sorted(tokens_map.keys())
+
+ new_order = ''
+ for string in sorted_strings:
+ for i in tokens_map[string]:
+ new_order += i.string
+ if i.IsLastInLine():
+ new_order += '\n'
+
+ return new_order
diff --git a/tools/closure_linter/closure_linter/requireprovidesorter_test.py b/tools/closure_linter/closure_linter/requireprovidesorter_test.py
new file mode 100644
index 0000000000..fecb6d04da
--- /dev/null
+++ b/tools/closure_linter/closure_linter/requireprovidesorter_test.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for RequireProvideSorter."""
+
+
+
+import unittest as googletest
+from closure_linter import javascripttokens
+from closure_linter import requireprovidesorter
+from closure_linter import testutil
+
+# pylint: disable=g-bad-name
+TokenType = javascripttokens.JavaScriptTokenType
+
+
+class RequireProvideSorterTest(googletest.TestCase):
+ """Tests for RequireProvideSorter."""
+
+ def testGetFixedProvideString(self):
+ """Tests that fixed string constains proper comments also."""
+ input_lines = [
+ 'goog.provide(\'package.xyz\');',
+ '/** @suppress {extraprovide} **/',
+ 'goog.provide(\'package.abcd\');'
+ ]
+
+ expected_lines = [
+ '/** @suppress {extraprovide} **/',
+ 'goog.provide(\'package.abcd\');',
+ 'goog.provide(\'package.xyz\');'
+ ]
+
+ token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
+
+ sorter = requireprovidesorter.RequireProvideSorter()
+ fixed_provide_string = sorter.GetFixedProvideString(token)
+
+ self.assertEquals(expected_lines, fixed_provide_string.splitlines())
+
+ def testGetFixedRequireString(self):
+ """Tests that fixed string constains proper comments also."""
+ input_lines = [
+ 'goog.require(\'package.xyz\');',
+ '/** This is needed for scope. **/',
+ 'goog.require(\'package.abcd\');'
+ ]
+
+ expected_lines = [
+ '/** This is needed for scope. **/',
+ 'goog.require(\'package.abcd\');',
+ 'goog.require(\'package.xyz\');'
+ ]
+
+ token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
+
+ sorter = requireprovidesorter.RequireProvideSorter()
+ fixed_require_string = sorter.GetFixedRequireString(token)
+
+ self.assertEquals(expected_lines, fixed_require_string.splitlines())
+
+ def testFixRequires_removeBlankLines(self):
+ """Tests that blank lines are omitted in sorted goog.require statements."""
+ input_lines = [
+ 'goog.provide(\'package.subpackage.Whatever\');',
+ '',
+ 'goog.require(\'package.subpackage.ClassB\');',
+ '',
+ 'goog.require(\'package.subpackage.ClassA\');'
+ ]
+ expected_lines = [
+ 'goog.provide(\'package.subpackage.Whatever\');',
+ '',
+ 'goog.require(\'package.subpackage.ClassA\');',
+ 'goog.require(\'package.subpackage.ClassB\');'
+ ]
+ token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
+
+ sorter = requireprovidesorter.RequireProvideSorter()
+ sorter.FixRequires(token)
+
+ self.assertEquals(expected_lines, self._GetLines(token))
+
+ def fixRequiresTest_withTestOnly(self, position):
+ """Regression-tests sorting even with a goog.setTestOnly statement.
+
+ Args:
+ position: The position in the list where to insert the goog.setTestOnly
+ statement. Will be used to test all possible combinations for
+ this test.
+ """
+ input_lines = [
+ 'goog.provide(\'package.subpackage.Whatever\');',
+ '',
+ 'goog.require(\'package.subpackage.ClassB\');',
+ 'goog.require(\'package.subpackage.ClassA\');'
+ ]
+ expected_lines = [
+ 'goog.provide(\'package.subpackage.Whatever\');',
+ '',
+ 'goog.require(\'package.subpackage.ClassA\');',
+ 'goog.require(\'package.subpackage.ClassB\');'
+ ]
+ input_lines.insert(position, 'goog.setTestOnly();')
+ expected_lines.insert(position, 'goog.setTestOnly();')
+
+ token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
+
+ sorter = requireprovidesorter.RequireProvideSorter()
+ sorter.FixRequires(token)
+
+ self.assertEquals(expected_lines, self._GetLines(token))
+
+ def testFixRequires_withTestOnly(self):
+ """Regression-tests sorting even after a goog.setTestOnly statement."""
+
+ # goog.setTestOnly at first line.
+ self.fixRequiresTest_withTestOnly(position=0)
+
+ # goog.setTestOnly after goog.provide.
+ self.fixRequiresTest_withTestOnly(position=1)
+
+ # goog.setTestOnly before goog.require.
+ self.fixRequiresTest_withTestOnly(position=2)
+
+ # goog.setTestOnly after goog.require.
+ self.fixRequiresTest_withTestOnly(position=4)
+
+ def _GetLines(self, token):
+ """Returns an array of lines based on the specified token stream."""
+ lines = []
+ line = ''
+ while token:
+ line += token.string
+ if token.IsLastInLine():
+ lines.append(line)
+ line = ''
+ token = token.next
+ return lines
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/closure_linter/runner.py b/tools/closure_linter/closure_linter/runner.py
new file mode 100644
index 0000000000..04e7fa4ac8
--- /dev/null
+++ b/tools/closure_linter/closure_linter/runner.py
@@ -0,0 +1,198 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Main lint function. Tokenizes file, runs passes, and feeds to checker."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = 'nnaze@google.com (Nathan Naze)'
+
+import traceback
+
+import gflags as flags
+
+from closure_linter import checker
+from closure_linter import ecmalintrules
+from closure_linter import ecmametadatapass
+from closure_linter import error_check
+from closure_linter import errors
+from closure_linter import javascriptstatetracker
+from closure_linter import javascripttokenizer
+
+from closure_linter.common import error
+from closure_linter.common import htmlutil
+from closure_linter.common import tokens
+
+flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'],
+ 'List of files with relaxed documentation checks. Will not '
+ 'report errors for missing documentation, some missing '
+ 'descriptions, or methods whose @return tags don\'t have a '
+ 'matching return statement.')
+flags.DEFINE_boolean('error_trace', False,
+ 'Whether to show error exceptions.')
+flags.ADOPT_module_key_flags(checker)
+flags.ADOPT_module_key_flags(ecmalintrules)
+flags.ADOPT_module_key_flags(error_check)
+
+
+def _GetLastNonWhiteSpaceToken(start_token):
+ """Get the last non-whitespace token in a token stream."""
+ ret_token = None
+
+ whitespace_tokens = frozenset([
+ tokens.TokenType.WHITESPACE, tokens.TokenType.BLANK_LINE])
+ for t in start_token:
+ if t.type not in whitespace_tokens:
+ ret_token = t
+
+ return ret_token
+
+
+def _IsHtml(filename):
+ return filename.endswith('.html') or filename.endswith('.htm')
+
+
+def _Tokenize(fileobj):
+ """Tokenize a file.
+
+ Args:
+ fileobj: file-like object (or iterable lines) with the source.
+
+ Returns:
+ The first token in the token stream and the ending mode of the tokenizer.
+ """
+ tokenizer = javascripttokenizer.JavaScriptTokenizer()
+ start_token = tokenizer.TokenizeFile(fileobj)
+ return start_token, tokenizer.mode
+
+
+def _IsLimitedDocCheck(filename, limited_doc_files):
+ """Whether this this a limited-doc file.
+
+ Args:
+ filename: The filename.
+ limited_doc_files: Iterable of strings. Suffixes of filenames that should
+ be limited doc check.
+
+ Returns:
+ Whether the file should be limited check.
+ """
+ for limited_doc_filename in limited_doc_files:
+ if filename.endswith(limited_doc_filename):
+ return True
+ return False
+
+
+def Run(filename, error_handler, source=None):
+ """Tokenize, run passes, and check the given file.
+
+ Args:
+ filename: The path of the file to check
+ error_handler: The error handler to report errors to.
+ source: A file-like object with the file source. If omitted, the file will
+ be read from the filename path.
+ """
+ if not source:
+ try:
+ source = open(filename)
+ except IOError:
+ error_handler.HandleFile(filename, None)
+ error_handler.HandleError(
+ error.Error(errors.FILE_NOT_FOUND, 'File not found'))
+ error_handler.FinishFile()
+ return
+
+ if _IsHtml(filename):
+ source_file = htmlutil.GetScriptLines(source)
+ else:
+ source_file = source
+
+ token, tokenizer_mode = _Tokenize(source_file)
+
+ error_handler.HandleFile(filename, token)
+
+ # If we did not end in the basic mode, this a failed parse.
+ if tokenizer_mode is not javascripttokenizer.JavaScriptModes.TEXT_MODE:
+ error_handler.HandleError(
+ error.Error(errors.FILE_IN_BLOCK,
+ 'File ended in mode "%s".' % tokenizer_mode,
+ _GetLastNonWhiteSpaceToken(token)))
+
+ # Run the ECMA pass
+ error_token = None
+
+ ecma_pass = ecmametadatapass.EcmaMetaDataPass()
+ error_token = RunMetaDataPass(token, ecma_pass, error_handler, filename)
+
+ is_limited_doc_check = (
+ _IsLimitedDocCheck(filename, flags.FLAGS.limited_doc_files))
+
+ _RunChecker(token, error_handler,
+ is_limited_doc_check,
+ is_html=_IsHtml(filename),
+ stop_token=error_token)
+
+ error_handler.FinishFile()
+
+
+def RunMetaDataPass(start_token, metadata_pass, error_handler, filename=''):
+ """Run a metadata pass over a token stream.
+
+ Args:
+ start_token: The first token in a token stream.
+ metadata_pass: Metadata pass to run.
+ error_handler: The error handler to report errors to.
+ filename: Filename of the source.
+
+ Returns:
+ The token where the error occurred (if any).
+ """
+
+ try:
+ metadata_pass.Process(start_token)
+ except ecmametadatapass.ParseError, parse_err:
+ if flags.FLAGS.error_trace:
+ traceback.print_exc()
+ error_token = parse_err.token
+ error_msg = str(parse_err)
+ error_handler.HandleError(
+ error.Error(errors.FILE_DOES_NOT_PARSE,
+ ('Error parsing file at token "%s". Unable to '
+ 'check the rest of file.'
+ '\nError "%s"' % (error_token, error_msg)), error_token))
+ return error_token
+ except Exception: # pylint: disable=broad-except
+ traceback.print_exc()
+ error_handler.HandleError(
+ error.Error(
+ errors.FILE_DOES_NOT_PARSE,
+ 'Internal error in %s' % filename))
+
+
+def _RunChecker(start_token, error_handler,
+ limited_doc_checks, is_html,
+ stop_token=None):
+
+ state_tracker = javascriptstatetracker.JavaScriptStateTracker()
+
+ style_checker = checker.JavaScriptStyleChecker(
+ state_tracker=state_tracker,
+ error_handler=error_handler)
+
+ style_checker.Check(start_token,
+ is_html=is_html,
+ limited_doc_checks=limited_doc_checks,
+ stop_token=stop_token)
diff --git a/tools/closure_linter/closure_linter/runner_test.py b/tools/closure_linter/closure_linter/runner_test.py
new file mode 100644
index 0000000000..da5857d309
--- /dev/null
+++ b/tools/closure_linter/closure_linter/runner_test.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the runner module."""
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+import StringIO
+
+
+import mox
+
+
+import unittest as googletest
+
+from closure_linter import errors
+from closure_linter import runner
+from closure_linter.common import error
+from closure_linter.common import errorhandler
+from closure_linter.common import tokens
+
+
+class LimitedDocTest(googletest.TestCase):
+
+ def testIsLimitedDocCheck(self):
+ self.assertTrue(runner._IsLimitedDocCheck('foo_test.js', ['_test.js']))
+ self.assertFalse(runner._IsLimitedDocCheck('foo_bar.js', ['_test.js']))
+
+ self.assertTrue(runner._IsLimitedDocCheck(
+ 'foo_moo.js', ['moo.js', 'quack.js']))
+ self.assertFalse(runner._IsLimitedDocCheck(
+ 'foo_moo.js', ['woof.js', 'quack.js']))
+
+
+class RunnerTest(googletest.TestCase):
+
+ def setUp(self):
+ self.mox = mox.Mox()
+
+ def testRunOnMissingFile(self):
+ mock_error_handler = self.mox.CreateMock(errorhandler.ErrorHandler)
+
+ def ValidateError(err):
+ return (isinstance(err, error.Error) and
+ err.code is errors.FILE_NOT_FOUND and
+ err.token is None)
+
+ mock_error_handler.HandleFile('does_not_exist.js', None)
+ mock_error_handler.HandleError(mox.Func(ValidateError))
+ mock_error_handler.FinishFile()
+
+ self.mox.ReplayAll()
+
+ runner.Run('does_not_exist.js', mock_error_handler)
+
+ self.mox.VerifyAll()
+
+ def testBadTokenization(self):
+ mock_error_handler = self.mox.CreateMock(errorhandler.ErrorHandler)
+
+ def ValidateError(err):
+ return (isinstance(err, error.Error) and
+ err.code is errors.FILE_IN_BLOCK and
+ err.token.string == '}')
+
+ mock_error_handler.HandleFile('foo.js', mox.IsA(tokens.Token))
+ mock_error_handler.HandleError(mox.Func(ValidateError))
+ mock_error_handler.HandleError(mox.IsA(error.Error))
+ mock_error_handler.FinishFile()
+
+ self.mox.ReplayAll()
+
+ source = StringIO.StringIO(_BAD_TOKENIZATION_SCRIPT)
+ runner.Run('foo.js', mock_error_handler, source)
+
+ self.mox.VerifyAll()
+
+
+_BAD_TOKENIZATION_SCRIPT = """
+function foo () {
+ var a = 3;
+ var b = 2;
+ return b + a; /* Comment not closed
+}
+"""
+
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/closure_linter/scopeutil.py b/tools/closure_linter/closure_linter/scopeutil.py
new file mode 100644
index 0000000000..a7ca9b630a
--- /dev/null
+++ b/tools/closure_linter/closure_linter/scopeutil.py
@@ -0,0 +1,206 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tools to match goog.scope alias statements."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+import itertools
+
+from closure_linter import ecmametadatapass
+from closure_linter import tokenutil
+from closure_linter.javascripttokens import JavaScriptTokenType
+
+
+
+def IsGoogScopeBlock(context):
+ """Whether the given context is a goog.scope block.
+
+ This function only checks that the block is a function block inside
+ a goog.scope() call.
+
+ TODO(nnaze): Implement goog.scope checks that verify the call is
+ in the root context and contains only a single function literal.
+
+ Args:
+ context: An EcmaContext of type block.
+
+ Returns:
+ Whether the context is a goog.scope block.
+ """
+
+ if context.type != ecmametadatapass.EcmaContext.BLOCK:
+ return False
+
+ if not _IsFunctionLiteralBlock(context):
+ return False
+
+ # Check that this function is contained by a group
+ # of form "goog.scope(...)".
+ parent = context.parent
+ if parent and parent.type is ecmametadatapass.EcmaContext.GROUP:
+
+ last_code_token = parent.start_token.metadata.last_code
+
+ if (last_code_token and
+ last_code_token.type is JavaScriptTokenType.IDENTIFIER and
+ last_code_token.string == 'goog.scope'):
+ return True
+
+ return False
+
+
+def _IsFunctionLiteralBlock(block_context):
+ """Check if a context is a function literal block (without parameters).
+
+ Example function literal block: 'function() {}'
+
+ Args:
+ block_context: An EcmaContext of type block.
+
+ Returns:
+ Whether this context is a function literal block.
+ """
+
+ previous_code_tokens_iter = itertools.ifilter(
+ lambda token: token not in JavaScriptTokenType.NON_CODE_TYPES,
+ reversed(block_context.start_token))
+
+ # Ignore the current token
+ next(previous_code_tokens_iter, None)
+
+ # Grab the previous three tokens and put them in correct order.
+ previous_code_tokens = list(itertools.islice(previous_code_tokens_iter, 3))
+ previous_code_tokens.reverse()
+
+ # There aren't three previous tokens.
+ if len(previous_code_tokens) is not 3:
+ return False
+
+ # Check that the previous three code tokens are "function ()"
+ previous_code_token_types = [token.type for token in previous_code_tokens]
+ if (previous_code_token_types == [
+ JavaScriptTokenType.FUNCTION_DECLARATION,
+ JavaScriptTokenType.START_PARAMETERS,
+ JavaScriptTokenType.END_PARAMETERS]):
+ return True
+
+ return False
+
+
+def IsInClosurizedNamespace(symbol, closurized_namespaces):
+ """Match a goog.scope alias.
+
+ Args:
+ symbol: An identifier like 'goog.events.Event'.
+ closurized_namespaces: Iterable of valid Closurized namespaces (strings).
+
+ Returns:
+ True if symbol is an identifier in a Closurized namespace, otherwise False.
+ """
+ for ns in closurized_namespaces:
+ if symbol.startswith(ns + '.'):
+ return True
+
+ return False
+
+
+def _GetVarAssignmentTokens(context):
+ """Returns the tokens from context if it is a var assignment.
+
+ Args:
+ context: An EcmaContext.
+
+ Returns:
+ If a var assignment, the tokens contained within it w/o the trailing
+ semicolon.
+ """
+ if context.type != ecmametadatapass.EcmaContext.VAR:
+ return
+
+ # Get the tokens in this statement.
+ if context.start_token and context.end_token:
+ statement_tokens = tokenutil.GetTokenRange(context.start_token,
+ context.end_token)
+ else:
+ return
+
+ # And now just those tokens that are actually code.
+ is_non_code_type = lambda t: t.type not in JavaScriptTokenType.NON_CODE_TYPES
+ code_tokens = filter(is_non_code_type, statement_tokens)
+
+ # Pop off the semicolon if present.
+ if code_tokens and code_tokens[-1].IsType(JavaScriptTokenType.SEMICOLON):
+ code_tokens.pop()
+
+ if len(code_tokens) < 4:
+ return
+
+ if (code_tokens[0].IsKeyword('var') and
+ code_tokens[1].IsType(JavaScriptTokenType.SIMPLE_LVALUE) and
+ code_tokens[2].IsOperator('=')):
+ return code_tokens
+
+
+def MatchAlias(context):
+ """Match an alias statement (some identifier assigned to a variable).
+
+ Example alias: var MyClass = proj.longNamespace.MyClass.
+
+ Args:
+ context: An EcmaContext of type EcmaContext.VAR.
+
+ Returns:
+ If a valid alias, returns a tuple of alias and symbol, otherwise None.
+ """
+ code_tokens = _GetVarAssignmentTokens(context)
+ if code_tokens is None:
+ return
+
+ if all(tokenutil.IsIdentifierOrDot(t) for t in code_tokens[3:]):
+ # var Foo = bar.Foo;
+ alias, symbol = code_tokens[1], code_tokens[3]
+ # Mark both tokens as an alias definition to not count them as usages.
+ alias.metadata.is_alias_definition = True
+ symbol.metadata.is_alias_definition = True
+ return alias.string, tokenutil.GetIdentifierForToken(symbol)
+
+
+def MatchModuleAlias(context):
+ """Match an alias statement in a goog.module style import.
+
+ Example alias: var MyClass = goog.require('proj.longNamespace.MyClass').
+
+ Args:
+ context: An EcmaContext.
+
+ Returns:
+ If a valid alias, returns a tuple of alias and symbol, otherwise None.
+ """
+ code_tokens = _GetVarAssignmentTokens(context)
+ if code_tokens is None:
+ return
+
+ if(code_tokens[3].IsType(JavaScriptTokenType.IDENTIFIER) and
+ code_tokens[3].string == 'goog.require'):
+ # var Foo = goog.require('bar.Foo');
+ alias = code_tokens[1]
+ symbol = tokenutil.GetStringAfterToken(code_tokens[3])
+ if symbol:
+ alias.metadata.is_alias_definition = True
+ return alias.string, symbol
diff --git a/tools/closure_linter/closure_linter/scopeutil_test.py b/tools/closure_linter/closure_linter/scopeutil_test.py
new file mode 100644
index 0000000000..722a953900
--- /dev/null
+++ b/tools/closure_linter/closure_linter/scopeutil_test.py
@@ -0,0 +1,222 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the scopeutil module."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+
+import unittest as googletest
+
+from closure_linter import ecmametadatapass
+from closure_linter import scopeutil
+from closure_linter import testutil
+
+
+def _FindContexts(start_token):
+ """Depth first search of all contexts referenced by a token stream.
+
+ Includes contexts' parents, which might not be directly referenced
+ by any token in the stream.
+
+ Args:
+ start_token: First token in the token stream.
+
+ Yields:
+ All contexts referenced by this token stream.
+ """
+
+ seen_contexts = set()
+
+ # For each token, yield the context if we haven't seen it before.
+ for token in start_token:
+
+ token_context = token.metadata.context
+ contexts = [token_context]
+
+ # Also grab all the context's ancestors.
+ parent = token_context.parent
+ while parent:
+ contexts.append(parent)
+ parent = parent.parent
+
+ # Yield each of these contexts if we've not seen them.
+ for context in contexts:
+ if context not in seen_contexts:
+ yield context
+
+ seen_contexts.add(context)
+
+
+def _FindFirstContextOfType(token, context_type):
+ """Returns the first statement context."""
+ for context in _FindContexts(token):
+ if context.type == context_type:
+ return context
+
+
+def _ParseAssignment(script):
+ start_token = testutil.TokenizeSourceAndRunEcmaPass(script)
+ statement = _FindFirstContextOfType(
+ start_token, ecmametadatapass.EcmaContext.VAR)
+ return statement
+
+
+class StatementTest(googletest.TestCase):
+
+ def assertAlias(self, expected_match, script):
+ statement = _ParseAssignment(script)
+ match = scopeutil.MatchAlias(statement)
+ self.assertEquals(expected_match, match)
+
+ def assertModuleAlias(self, expected_match, script):
+ statement = _ParseAssignment(script)
+ match = scopeutil.MatchModuleAlias(statement)
+ self.assertEquals(expected_match, match)
+
+ def testSimpleAliases(self):
+ self.assertAlias(
+ ('foo', 'goog.foo'),
+ 'var foo = goog.foo;')
+
+ self.assertAlias(
+ ('foo', 'goog.foo'),
+ 'var foo = goog.foo') # No semicolon
+
+ def testAliasWithComment(self):
+ self.assertAlias(
+ ('Component', 'goog.ui.Component'),
+ 'var Component = /* comment */ goog.ui.Component;')
+
+ def testMultilineAlias(self):
+ self.assertAlias(
+ ('Component', 'goog.ui.Component'),
+ 'var Component = \n goog.ui.\n Component;')
+
+ def testNonSymbolAliasVarStatements(self):
+ self.assertAlias(None, 'var foo = 3;')
+ self.assertAlias(None, 'var foo = function() {};')
+ self.assertAlias(None, 'var foo = bar ? baz : qux;')
+
+ def testModuleAlias(self):
+ self.assertModuleAlias(
+ ('foo', 'goog.foo'),
+ 'var foo = goog.require("goog.foo");')
+ self.assertModuleAlias(
+ None,
+ 'var foo = goog.require(notastring);')
+
+
+class ScopeBlockTest(googletest.TestCase):
+
+ @staticmethod
+ def _GetBlocks(source):
+ start_token = testutil.TokenizeSourceAndRunEcmaPass(source)
+ for context in _FindContexts(start_token):
+ if context.type is ecmametadatapass.EcmaContext.BLOCK:
+ yield context
+
+ def assertNoBlocks(self, script):
+ blocks = list(self._GetBlocks(script))
+ self.assertEquals([], blocks)
+
+ def testNotBlocks(self):
+ # Ensure these are not considered blocks.
+ self.assertNoBlocks('goog.scope(if{});')
+ self.assertNoBlocks('goog.scope(for{});')
+ self.assertNoBlocks('goog.scope(switch{});')
+ self.assertNoBlocks('goog.scope(function foo{});')
+
+ def testNonScopeBlocks(self):
+
+ blocks = list(self._GetBlocks('goog.scope(try{});'))
+ self.assertEquals(1, len(blocks))
+ self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+
+ blocks = list(self._GetBlocks('goog.scope(function(a,b){});'))
+ self.assertEquals(1, len(blocks))
+ self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+
+ blocks = list(self._GetBlocks('goog.scope(try{} catch(){});'))
+ # Two blocks: try and catch.
+ self.assertEquals(2, len(blocks))
+ self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+ self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+
+ blocks = list(self._GetBlocks('goog.scope(try{} catch(){} finally {});'))
+ self.assertEquals(3, len(blocks))
+ self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+ self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+ self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
+
+
+class AliasTest(googletest.TestCase):
+
+ def setUp(self):
+ self.start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
+
+ def testMatchAliasStatement(self):
+ matches = set()
+ for context in _FindContexts(self.start_token):
+ match = scopeutil.MatchAlias(context)
+ if match:
+ matches.add(match)
+
+ self.assertEquals(
+ set([('bar', 'baz'),
+ ('foo', 'this.foo_'),
+ ('Component', 'goog.ui.Component'),
+ ('MyClass', 'myproject.foo.MyClass'),
+ ('NonClosurizedClass', 'aaa.bbb.NonClosurizedClass')]),
+ matches)
+
+ def testMatchAliasStatement_withClosurizedNamespaces(self):
+
+ closurized_namepaces = frozenset(['goog', 'myproject'])
+
+ matches = set()
+ for context in _FindContexts(self.start_token):
+ match = scopeutil.MatchAlias(context)
+ if match:
+ unused_alias, symbol = match
+ if scopeutil.IsInClosurizedNamespace(symbol, closurized_namepaces):
+ matches.add(match)
+
+ self.assertEquals(
+ set([('MyClass', 'myproject.foo.MyClass'),
+ ('Component', 'goog.ui.Component')]),
+ matches)
+
+_TEST_SCRIPT = """
+goog.scope(function() {
+ var Component = goog.ui.Component; // scope alias
+ var MyClass = myproject.foo.MyClass; // scope alias
+
+ // Scope alias of non-Closurized namespace.
+ var NonClosurizedClass = aaa.bbb.NonClosurizedClass;
+
+ var foo = this.foo_; // non-scope object property alias
+ var bar = baz; // variable alias
+
+ var component = new Component();
+});
+
+"""
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/closure_linter/statetracker.py b/tools/closure_linter/closure_linter/statetracker.py
index 5630c17bd8..52e363972f 100755..100644
--- a/tools/closure_linter/closure_linter/statetracker.py
+++ b/tools/closure_linter/closure_linter/statetracker.py
@@ -24,6 +24,7 @@ import re
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import tokenutil
+from closure_linter import typeannotation
# Shorthand
Type = javascripttokens.JavaScriptTokenType
@@ -39,7 +40,8 @@ class DocFlag(object):
including braces.
type_end_token: The last token specifying the flag type,
including braces.
- type: The type spec.
+ type: The type spec string.
+ jstype: The type spec, a TypeAnnotation instance.
name_token: The token specifying the flag name.
name: The flag name
description_start_token: The first token in the description.
@@ -53,34 +55,47 @@ class DocFlag(object):
STANDARD_DOC = frozenset([
'author',
'bug',
+ 'classTemplate',
+ 'consistentIdGenerator',
'const',
'constructor',
'define',
'deprecated',
+ 'dict',
'enum',
'export',
+ 'expose',
'extends',
'externs',
'fileoverview',
+ 'idGenerator',
'implements',
'implicitCast',
'interface',
+ 'lends',
'license',
+ 'ngInject', # This annotation is specific to AngularJS.
'noalias',
'nocompile',
'nosideeffects',
'override',
'owner',
+ 'package',
'param',
'preserve',
'private',
+ 'protected',
+ 'public',
'return',
'see',
+ 'stableIdGenerator',
+ 'struct',
'supported',
'template',
'this',
'type',
'typedef',
+ 'unrestricted',
])
ANNOTATION = frozenset(['preserveTry', 'suppress'])
@@ -89,51 +104,127 @@ class DocFlag(object):
# Includes all Closure Compiler @suppress types.
# Not all of these annotations are interpreted by Closure Linter.
+ #
+ # Specific cases:
+ # - accessControls is supported by the compiler at the expression
+ # and method level to suppress warnings about private/protected
+ # access (method level applies to all references in the method).
+ # The linter mimics the compiler behavior.
SUPPRESS_TYPES = frozenset([
'accessControls',
+ 'ambiguousFunctionDecl',
+ 'checkDebuggerStatement',
'checkRegExp',
+ 'checkStructDictInheritance',
'checkTypes',
'checkVars',
+ 'const',
+ 'constantProperty',
'deprecated',
'duplicate',
+ 'es5Strict',
+ 'externsValidation',
+ 'extraProvide',
+ 'extraRequire',
'fileoverviewTags',
+ 'globalThis',
+ 'internetExplorerChecks',
'invalidCasts',
'missingProperties',
+ 'missingProvide',
+ 'missingRequire',
+ 'missingReturn',
'nonStandardJsDocs',
'strictModuleDepCheck',
+ 'suspiciousCode',
+ 'tweakValidation',
+ 'typeInvalidation',
+ 'undefinedNames',
'undefinedVars',
'underscore',
'unknownDefines',
+ 'unnecessaryCasts',
+ 'unusedPrivateMembers',
'uselessCode',
'visibility',
- 'with'])
+ 'with',
+ ])
HAS_DESCRIPTION = frozenset([
- 'define', 'deprecated', 'desc', 'fileoverview', 'license', 'param',
- 'preserve', 'return', 'supported'])
+ 'define',
+ 'deprecated',
+ 'desc',
+ 'fileoverview',
+ 'license',
+ 'param',
+ 'preserve',
+ 'return',
+ 'supported',
+ ])
+ # Docflags whose argument should be parsed using the typeannotation parser.
HAS_TYPE = frozenset([
- 'define', 'enum', 'extends', 'implements', 'param', 'return', 'type',
- 'suppress'])
+ 'const',
+ 'define',
+ 'enum',
+ 'extends',
+ 'final',
+ 'implements',
+ 'mods',
+ 'package',
+ 'param',
+ 'private',
+ 'protected',
+ 'public',
+ 'return',
+ 'suppress',
+ 'type',
+ 'typedef',
+ ])
- TYPE_ONLY = frozenset(['enum', 'extends', 'implements', 'suppress', 'type'])
+ # Docflags for which it's ok to omit the type (flag without an argument).
+ CAN_OMIT_TYPE = frozenset([
+ 'const',
+ 'enum',
+ 'final',
+ 'package',
+ 'private',
+ 'protected',
+ 'public',
+ 'suppress', # We'll raise a separate INCORRECT_SUPPRESS_SYNTAX instead.
+ ])
+
+ # Docflags that only take a type as an argument and should not parse a
+ # following description.
+ TYPE_ONLY = frozenset([
+ 'const',
+ 'enum',
+ 'extends',
+ 'implements',
+ 'package',
+ 'suppress',
+ 'type',
+ ])
HAS_NAME = frozenset(['param'])
EMPTY_COMMENT_LINE = re.compile(r'^\s*\*?\s*$')
EMPTY_STRING = re.compile(r'^\s*$')
- def __init__(self, flag_token):
+ def __init__(self, flag_token, error_handler=None):
"""Creates the DocFlag object and attaches it to the given start token.
Args:
flag_token: The starting token of the flag.
+ error_handler: An optional error handler for errors occurring while
+ parsing the doctype.
"""
self.flag_token = flag_token
self.flag_type = flag_token.string.strip().lstrip('@')
# Extract type, if applicable.
self.type = None
+ self.jstype = None
self.type_start_token = None
self.type_end_token = None
if self.flag_type in self.HAS_TYPE:
@@ -142,28 +233,37 @@ class DocFlag(object):
if brace:
end_token, contents = _GetMatchingEndBraceAndContents(brace)
self.type = contents
+ self.jstype = typeannotation.Parse(brace, end_token,
+ error_handler)
self.type_start_token = brace
self.type_end_token = end_token
elif (self.flag_type in self.TYPE_ONLY and
- flag_token.next.type not in Type.FLAG_ENDING_TYPES):
+ flag_token.next.type not in Type.FLAG_ENDING_TYPES and
+ flag_token.line_number == flag_token.next.line_number):
+ # b/10407058. If the flag is expected to be followed by a type then
+ # search for type in same line only. If no token after flag in same
+ # line then conclude that no type is specified.
self.type_start_token = flag_token.next
self.type_end_token, self.type = _GetEndTokenAndContents(
self.type_start_token)
if self.type is not None:
self.type = self.type.strip()
+ self.jstype = typeannotation.Parse(flag_token, self.type_end_token,
+ error_handler)
# Extract name, if applicable.
self.name_token = None
self.name = None
if self.flag_type in self.HAS_NAME:
# Handle bad case, name could be immediately after flag token.
- self.name_token = _GetNextIdentifierToken(flag_token)
+ self.name_token = _GetNextPartialIdentifierToken(flag_token)
# Handle good case, if found token is after type start, look for
- # identifier after type end, since types contain identifiers.
+ # a identifier (substring to cover cases like [cnt] b/4197272) after
+ # type end, since types contain identifiers.
if (self.type and self.name_token and
tokenutil.Compare(self.name_token, self.type_start_token) > 0):
- self.name_token = _GetNextIdentifierToken(self.type_end_token)
+ self.name_token = _GetNextPartialIdentifierToken(self.type_end_token)
if self.name_token:
self.name = self.name_token.string
@@ -191,6 +291,13 @@ class DocFlag(object):
self.description_end_token, self.description = (
_GetEndTokenAndContents(interesting_token))
+ def HasType(self):
+ """Returns whether this flag should have a type annotation."""
+ return self.flag_type in self.HAS_TYPE
+
+ def __repr__(self):
+ return '<Flag: %s, type:%s>' % (self.flag_type, repr(self.jstype))
+
class DocComment(object):
"""JavaScript doc comment object.
@@ -207,14 +314,21 @@ class DocComment(object):
Args:
start_token: The first token in the doc comment.
"""
- self.__params = {}
- self.ordered_params = []
- self.__flags = {}
+ self.__flags = []
self.start_token = start_token
self.end_token = None
self.suppressions = {}
self.invalidated = False
+ @property
+ def ordered_params(self):
+ """Gives the list of parameter names as a list of strings."""
+ params = []
+ for flag in self.__flags:
+ if flag.flag_type == 'param' and flag.name:
+ params.append(flag.name)
+ return params
+
def Invalidate(self):
"""Indicate that the JSDoc is well-formed but we had problems parsing it.
@@ -228,28 +342,27 @@ class DocComment(object):
"""Test whether Invalidate() has been called."""
return self.invalidated
- def AddParam(self, name, param_type):
- """Add a new documented parameter.
-
- Args:
- name: The name of the parameter to document.
- param_type: The parameter's declared JavaScript type.
- """
- self.ordered_params.append(name)
- self.__params[name] = param_type
-
def AddSuppression(self, token):
"""Add a new error suppression flag.
Args:
token: The suppression flag token.
"""
- #TODO(user): Error if no braces
- brace = tokenutil.SearchUntil(token, [Type.DOC_START_BRACE],
- [Type.DOC_FLAG])
- if brace:
- end_token, contents = _GetMatchingEndBraceAndContents(brace)
- self.suppressions[contents] = token
+ flag = token and token.attached_object
+ if flag and flag.jstype:
+ for suppression in flag.jstype.IterIdentifiers():
+ self.suppressions[suppression] = token
+
+ def SuppressionOnly(self):
+ """Returns whether this comment contains only suppression flags."""
+ if not self.__flags:
+ return False
+
+ for flag in self.__flags:
+ if flag.flag_type != 'suppress':
+ return False
+
+ return True
def AddFlag(self, flag):
"""Add a new document flag.
@@ -257,7 +370,7 @@ class DocComment(object):
Args:
flag: DocFlag object.
"""
- self.__flags[flag.flag_type] = flag
+ self.__flags.append(flag)
def InheritsDocumentation(self):
"""Test if the jsdoc implies documentation inheritance.
@@ -265,10 +378,7 @@ class DocComment(object):
Returns:
True if documentation may be pulled off the superclass.
"""
- return (self.HasFlag('inheritDoc') or
- (self.HasFlag('override') and
- not self.HasFlag('return') and
- not self.HasFlag('param')))
+ return self.HasFlag('inheritDoc') or self.HasFlag('override')
def HasFlag(self, flag_type):
"""Test if the given flag has been set.
@@ -279,7 +389,10 @@ class DocComment(object):
Returns:
True if the flag is set.
"""
- return flag_type in self.__flags
+ for flag in self.__flags:
+ if flag.flag_type == flag_type:
+ return True
+ return False
def GetFlag(self, flag_type):
"""Gets the last flag of the given type.
@@ -290,7 +403,101 @@ class DocComment(object):
Returns:
The last instance of the given flag type in this doc comment.
"""
- return self.__flags[flag_type]
+ for flag in reversed(self.__flags):
+ if flag.flag_type == flag_type:
+ return flag
+
+ def GetDocFlags(self):
+ """Return the doc flags for this comment."""
+ return list(self.__flags)
+
+ def _YieldDescriptionTokens(self):
+ for token in self.start_token:
+
+ if (token is self.end_token or
+ token.type is javascripttokens.JavaScriptTokenType.DOC_FLAG or
+ token.type not in javascripttokens.JavaScriptTokenType.COMMENT_TYPES):
+ return
+
+ if token.type not in [
+ javascripttokens.JavaScriptTokenType.START_DOC_COMMENT,
+ javascripttokens.JavaScriptTokenType.END_DOC_COMMENT,
+ javascripttokens.JavaScriptTokenType.DOC_PREFIX]:
+ yield token
+
+ @property
+ def description(self):
+ return tokenutil.TokensToString(
+ self._YieldDescriptionTokens())
+
+ def GetTargetIdentifier(self):
+ """Returns the identifier (as a string) that this is a comment for.
+
+ Note that this uses method uses GetIdentifierForToken to get the full
+ identifier, even if broken up by whitespace, newlines, or comments,
+ and thus could be longer than GetTargetToken().string.
+
+ Returns:
+ The identifier for the token this comment is for.
+ """
+ token = self.GetTargetToken()
+ if token:
+ return tokenutil.GetIdentifierForToken(token)
+
+ def GetTargetToken(self):
+ """Get this comment's target token.
+
+ Returns:
+ The token that is the target of this comment, or None if there isn't one.
+ """
+
+ # File overviews describe the file, not a token.
+ if self.HasFlag('fileoverview'):
+ return
+
+ skip_types = frozenset([
+ Type.WHITESPACE,
+ Type.BLANK_LINE,
+ Type.START_PAREN])
+
+ target_types = frozenset([
+ Type.FUNCTION_NAME,
+ Type.IDENTIFIER,
+ Type.SIMPLE_LVALUE])
+
+ token = self.end_token.next
+ while token:
+ if token.type in target_types:
+ return token
+
+ # Handles the case of a comment on "var foo = ...'
+ if token.IsKeyword('var'):
+ next_code_token = tokenutil.CustomSearch(
+ token,
+ lambda t: t.type not in Type.NON_CODE_TYPES)
+
+ if (next_code_token and
+ next_code_token.IsType(Type.SIMPLE_LVALUE)):
+ return next_code_token
+
+ return
+
+ # Handles the case of a comment on "function foo () {}"
+ if token.type is Type.FUNCTION_DECLARATION:
+ next_code_token = tokenutil.CustomSearch(
+ token,
+ lambda t: t.type not in Type.NON_CODE_TYPES)
+
+ if next_code_token.IsType(Type.FUNCTION_NAME):
+ return next_code_token
+
+ return
+
+ # Skip types will end the search.
+ if token.type not in skip_types:
+ return
+
+ token = token.next
def CompareParameters(self, params):
"""Computes the edit distance and list from the function params to the docs.
@@ -360,7 +567,8 @@ class DocComment(object):
Returns:
A string representation of this object.
"""
- return '<DocComment: %s, %s>' % (str(self.__params), str(self.__flags))
+ return '<DocComment: %s, %s>' % (
+ str(self.ordered_params), str(self.__flags))
#
@@ -409,28 +617,25 @@ def _GetMatchingEndBraceAndContents(start_brace):
return token, ''.join(contents)
-def _GetNextIdentifierToken(start_token):
- """Searches for and returns the first identifier at the beginning of a token.
+def _GetNextPartialIdentifierToken(start_token):
+ """Returns the first token having identifier as substring after a token.
- Searches each token after the start to see if it starts with an identifier.
- If found, will split the token into at most 3 piecies: leading whitespace,
- identifier, rest of token, returning the identifier token. If no identifier is
- found returns None and changes no tokens. Search is abandoned when a
- FLAG_ENDING_TYPE token is found.
+ Searches each token after the start to see if it contains an identifier.
+ If found, token is returned. If no identifier is found returns None.
+ Search is abandoned when a FLAG_ENDING_TYPE token is found.
Args:
start_token: The token to start searching after.
Returns:
- The identifier token is found, None otherwise.
+ The token found containing identifier, None otherwise.
"""
token = start_token.next
- while token and not token.type in Type.FLAG_ENDING_TYPES:
- match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.match(
+ while token and token.type not in Type.FLAG_ENDING_TYPES:
+ match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.search(
token.string)
- if (match is not None and token.type == Type.COMMENT and
- len(token.string) == len(match.group(0))):
+ if match is not None and token.type == Type.COMMENT:
return token
token = token.next
@@ -455,7 +660,8 @@ def _GetEndTokenAndContents(start_token):
last_line = iterator.line_number
last_token = None
contents = ''
- while not iterator.type in Type.FLAG_ENDING_TYPES:
+ doc_depth = 0
+ while not iterator.type in Type.FLAG_ENDING_TYPES or doc_depth > 0:
if (iterator.IsFirstInLine() and
DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)):
# If we have a blank comment line, consider that an implicit
@@ -470,6 +676,17 @@ def _GetEndTokenAndContents(start_token):
# only a doc comment prefix or whitespace.
break
+ # b/2983692
+ # don't prematurely match against a @flag if inside a doc flag
+ # need to think about what is the correct behavior for unterminated
+ # inline doc flags
+ if (iterator.type == Type.DOC_START_BRACE and
+ iterator.next.type == Type.DOC_INLINE_FLAG):
+ doc_depth += 1
+ elif (iterator.type == Type.DOC_END_BRACE and
+ doc_depth > 0):
+ doc_depth -= 1
+
if iterator.type in Type.FLAG_DESCRIPTION_TYPES:
contents += iterator.string
last_token = iterator
@@ -501,6 +718,9 @@ class Function(object):
is_constructor: If the function is a constructor.
name: The name of the function, whether given in the function keyword or
as the lvalue the function is assigned to.
+ start_token: First token of the function (the function' keyword token).
+ end_token: Last token of the function (the closing '}' token).
+ parameters: List of parameter names.
"""
def __init__(self, block_depth, is_assigned, doc, name):
@@ -509,9 +729,13 @@ class Function(object):
self.is_constructor = doc and doc.HasFlag('constructor')
self.is_interface = doc and doc.HasFlag('interface')
self.has_return = False
+ self.has_throw = False
self.has_this = False
self.name = name
self.doc = doc
+ self.start_token = None
+ self.end_token = None
+ self.parameters = None
class StateTracker(object):
@@ -538,7 +762,7 @@ class StateTracker(object):
self._block_depth = 0
self._is_block_close = False
self._paren_depth = 0
- self._functions = []
+ self._function_stack = []
self._functions_by_name = {}
self._last_comment = None
self._doc_comment = None
@@ -548,6 +772,24 @@ class StateTracker(object):
self._last_line = None
self._first_token = None
self._documented_identifiers = set()
+ self._variables_in_scope = []
+
+ def DocFlagPass(self, start_token, error_handler):
+ """Parses doc flags.
+
+ This pass needs to be executed before the aliaspass and we don't want to do
+ a full-blown statetracker dry run for these.
+
+ Args:
+ start_token: The token at which to start iterating
+ error_handler: An error handler for error reporting.
+ """
+ if not start_token:
+ return
+ doc_flag_types = (Type.DOC_FLAG, Type.DOC_INLINE_FLAG)
+ for token in start_token:
+ if token.type in doc_flag_types:
+ token.attached_object = self._doc_flag(token, error_handler)
def InFunction(self):
"""Returns true if the current token is within a function.
@@ -555,7 +797,7 @@ class StateTracker(object):
Returns:
True if the current token is within a function.
"""
- return bool(self._functions)
+ return bool(self._function_stack)
def InConstructor(self):
"""Returns true if the current token is within a constructor.
@@ -563,7 +805,7 @@ class StateTracker(object):
Returns:
True if the current token is within a constructor.
"""
- return self.InFunction() and self._functions[-1].is_constructor
+ return self.InFunction() and self._function_stack[-1].is_constructor
def InInterfaceMethod(self):
"""Returns true if the current token is within an interface method.
@@ -572,10 +814,10 @@ class StateTracker(object):
True if the current token is within an interface method.
"""
if self.InFunction():
- if self._functions[-1].is_interface:
+ if self._function_stack[-1].is_interface:
return True
else:
- name = self._functions[-1].name
+ name = self._function_stack[-1].name
prototype_index = name.find('.prototype.')
if prototype_index != -1:
class_function_name = name[0:prototype_index]
@@ -591,7 +833,7 @@ class StateTracker(object):
Returns:
True if the current token is within a top level function.
"""
- return len(self._functions) == 1 and self.InTopLevel()
+ return len(self._function_stack) == 1 and self.InTopLevel()
def InAssignedFunction(self):
"""Returns true if the current token is within a function variable.
@@ -599,7 +841,7 @@ class StateTracker(object):
Returns:
True if if the current token is within a function variable
"""
- return self.InFunction() and self._functions[-1].is_assigned
+ return self.InFunction() and self._function_stack[-1].is_assigned
def IsFunctionOpen(self):
"""Returns true if the current token is a function block open.
@@ -607,8 +849,8 @@ class StateTracker(object):
Returns:
True if the current token is a function block open.
"""
- return (self._functions and
- self._functions[-1].block_depth == self._block_depth - 1)
+ return (self._function_stack and
+ self._function_stack[-1].block_depth == self._block_depth - 1)
def IsFunctionClose(self):
"""Returns true if the current token is a function block close.
@@ -616,8 +858,8 @@ class StateTracker(object):
Returns:
True if the current token is a function block close.
"""
- return (self._functions and
- self._functions[-1].block_depth == self._block_depth)
+ return (self._function_stack and
+ self._function_stack[-1].block_depth == self._block_depth)
def InBlock(self):
"""Returns true if the current token is within a block.
@@ -659,6 +901,30 @@ class StateTracker(object):
"""
return bool(self._paren_depth)
+ def ParenthesesDepth(self):
+ """Returns the number of parens surrounding the token.
+
+ Returns:
+ The number of parenthesis surrounding the token.
+ """
+ return self._paren_depth
+
+ def BlockDepth(self):
+ """Returns the number of blocks in which the token is nested.
+
+ Returns:
+ The number of blocks in which the token is nested.
+ """
+ return self._block_depth
+
+ def FunctionDepth(self):
+ """Returns the number of functions in which the token is nested.
+
+ Returns:
+ The number of functions in which the token is nested.
+ """
+ return len(self._function_stack)
+
def InTopLevel(self):
"""Whether we are at the top level in the class.
@@ -752,7 +1018,8 @@ class StateTracker(object):
Type.DOC_FLAG, Type.DOC_INLINE_FLAG, Type.DOC_PREFIX):
f = tokenutil.SearchUntil(t, [Type.DOC_FLAG], [Type.START_DOC_COMMENT],
None, True)
- if f and f.attached_object.type_start_token is not None:
+ if (f and f.attached_object.type_start_token is not None and
+ f.attached_object.type_end_token is not None):
return (tokenutil.Compare(t, f.attached_object.type_start_token) > 0 and
tokenutil.Compare(t, f.attached_object.type_end_token) < 0)
return False
@@ -763,8 +1030,8 @@ class StateTracker(object):
Returns:
The current Function object.
"""
- if self._functions:
- return self._functions[-1]
+ if self._function_stack:
+ return self._function_stack[-1]
def GetBlockDepth(self):
"""Return the block depth.
@@ -786,6 +1053,29 @@ class StateTracker(object):
"""Return the very first token in the file."""
return self._first_token
+ def IsVariableInScope(self, token_string):
+ """Checks if string is variable in current scope.
+
+ For given string it checks whether the string is a defined variable
+ (including function param) in current state.
+
+ E.g. if variables defined (variables in current scope) is docs
+ then docs, docs.length etc will be considered as variable in current
+ scope. This will help in avoding extra goog.require for variables.
+
+ Args:
+ token_string: String to check if its is a variable in current scope.
+
+ Returns:
+ true if given string is a variable in current scope.
+ """
+ for variable in self._variables_in_scope:
+ if (token_string == variable
+ or token_string.startswith(variable + '.')):
+ return True
+
+ return False
+
def HandleToken(self, token, last_non_space_token):
"""Handles the given token and updates state.
@@ -808,6 +1098,12 @@ class StateTracker(object):
# by language.
self._block_types.append(self.GetBlockType(token))
+ # When entering a function body, record its parameters.
+ if self.InFunction():
+ function = self._function_stack[-1]
+ if self._block_depth == function.block_depth + 1:
+ function.parameters = self.GetParams()
+
# Track block depth.
elif type == Type.END_BLOCK:
self._is_block_close = not self.InObjectLiteral()
@@ -833,21 +1129,23 @@ class StateTracker(object):
self._doc_comment.end_token = token
elif type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
- flag = self._doc_flag(token)
- token.attached_object = flag
+ # Don't overwrite flags if they were already parsed in a previous pass.
+ if token.attached_object is None:
+ flag = self._doc_flag(token)
+ token.attached_object = flag
+ else:
+ flag = token.attached_object
self._doc_comment.AddFlag(flag)
- if flag.flag_type == 'param' and flag.name:
- self._doc_comment.AddParam(flag.name, flag.type)
- elif flag.flag_type == 'suppress':
+ if flag.flag_type == 'suppress':
self._doc_comment.AddSuppression(token)
elif type == Type.FUNCTION_DECLARATION:
last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None,
True)
doc = None
- # Only functions outside of parens are eligible for documentation.
- if not self._paren_depth:
+ # Only top-level functions are eligible for documentation.
+ if self.InTopLevel():
doc = self._doc_comment
name = ''
@@ -861,8 +1159,7 @@ class StateTracker(object):
# my.function.foo.
# bar = function() ...
identifier = tokenutil.Search(last_code, Type.SIMPLE_LVALUE, None, True)
- while identifier and identifier.type in (
- Type.IDENTIFIER, Type.SIMPLE_LVALUE):
+ while identifier and tokenutil.IsIdentifierOrDot(identifier):
name = identifier.string + name
# Traverse behind us, skipping whitespace and comments.
while True:
@@ -877,14 +1174,22 @@ class StateTracker(object):
next_token = tokenutil.Search(next_token, Type.FUNCTION_NAME, 2)
function = Function(self._block_depth, is_assigned, doc, name)
- self._functions.append(function)
+ function.start_token = token
+
+ self._function_stack.append(function)
self._functions_by_name[name] = function
+ # Add a delimiter in stack for scope variables to define start of
+ # function. This helps in popping variables of this function when
+ # function declaration ends.
+ self._variables_in_scope.append('')
+
elif type == Type.START_PARAMETERS:
self._cumulative_params = ''
elif type == Type.PARAMETERS:
self._cumulative_params += token.string
+ self._variables_in_scope.extend(self.GetParams())
elif type == Type.KEYWORD and token.string == 'return':
next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
@@ -893,6 +1198,22 @@ class StateTracker(object):
if function:
function.has_return = True
+ elif type == Type.KEYWORD and token.string == 'throw':
+ function = self.GetFunction()
+ if function:
+ function.has_throw = True
+
+ elif type == Type.KEYWORD and token.string == 'var':
+ function = self.GetFunction()
+ next_token = tokenutil.Search(token, [Type.IDENTIFIER,
+ Type.SIMPLE_LVALUE])
+
+ if next_token:
+ if next_token.type == Type.SIMPLE_LVALUE:
+ self._variables_in_scope.append(next_token.values['identifier'])
+ else:
+ self._variables_in_scope.append(next_token.string)
+
elif type == Type.SIMPLE_LVALUE:
identifier = token.values['identifier']
jsdoc = self.GetDocComment()
@@ -906,7 +1227,7 @@ class StateTracker(object):
# Detect documented non-assignments.
next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
- if next_token.IsType(Type.SEMICOLON):
+ if next_token and next_token.IsType(Type.SEMICOLON):
if (self._last_non_space_token and
self._last_non_space_token.IsType(Type.END_DOC_COMMENT)):
self._documented_identifiers.add(token.string)
@@ -926,7 +1247,6 @@ class StateTracker(object):
if function:
function.has_this = True
-
def HandleAfterToken(self, token):
"""Handle updating state after a token has been checked.
@@ -952,7 +1272,17 @@ class StateTracker(object):
if self.InFunction() and self.IsFunctionClose():
# TODO(robbyw): Detect the function's name for better errors.
- self._functions.pop()
+ function = self._function_stack.pop()
+ function.end_token = token
+
+ # Pop all variables till delimiter ('') those were defined in the
+ # function being closed so make them out of scope.
+ while self._variables_in_scope and self._variables_in_scope[-1]:
+ self._variables_in_scope.pop()
+
+ # Pop delimiter
+ if self._variables_in_scope:
+ self._variables_in_scope.pop()
elif type == Type.END_PARAMETERS and self._doc_comment:
self._doc_comment = None
diff --git a/tools/closure_linter/closure_linter/statetracker_test.py b/tools/closure_linter/closure_linter/statetracker_test.py
new file mode 100755
index 0000000000..494dc642fc
--- /dev/null
+++ b/tools/closure_linter/closure_linter/statetracker_test.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the statetracker module."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+
+
+import unittest as googletest
+
+from closure_linter import javascripttokens
+from closure_linter import statetracker
+from closure_linter import testutil
+
+
+class _FakeDocFlag(object):
+
+ def __repr__(self):
+ return '@%s %s' % (self.flag_type, self.name)
+
+
+class IdentifierTest(googletest.TestCase):
+
+ def testJustIdentifier(self):
+ a = javascripttokens.JavaScriptToken(
+ 'abc', javascripttokens.JavaScriptTokenType.IDENTIFIER, 'abc', 1)
+
+ st = statetracker.StateTracker()
+ st.HandleToken(a, None)
+
+
+class DocCommentTest(googletest.TestCase):
+
+ @staticmethod
+ def _MakeDocFlagFake(flag_type, name=None):
+ flag = _FakeDocFlag()
+ flag.flag_type = flag_type
+ flag.name = name
+ return flag
+
+ def testDocFlags(self):
+ comment = statetracker.DocComment(None)
+
+ a = self._MakeDocFlagFake('param', 'foo')
+ comment.AddFlag(a)
+
+ b = self._MakeDocFlagFake('param', '')
+ comment.AddFlag(b)
+
+ c = self._MakeDocFlagFake('param', 'bar')
+ comment.AddFlag(c)
+
+ self.assertEquals(
+ ['foo', 'bar'],
+ comment.ordered_params)
+
+ self.assertEquals(
+ [a, b, c],
+ comment.GetDocFlags())
+
+ def testInvalidate(self):
+ comment = statetracker.DocComment(None)
+
+ self.assertFalse(comment.invalidated)
+ self.assertFalse(comment.IsInvalidated())
+
+ comment.Invalidate()
+
+ self.assertTrue(comment.invalidated)
+ self.assertTrue(comment.IsInvalidated())
+
+ def testSuppressionOnly(self):
+ comment = statetracker.DocComment(None)
+
+ self.assertFalse(comment.SuppressionOnly())
+ comment.AddFlag(self._MakeDocFlagFake('suppress'))
+ self.assertTrue(comment.SuppressionOnly())
+ comment.AddFlag(self._MakeDocFlagFake('foo'))
+ self.assertFalse(comment.SuppressionOnly())
+
+ def testRepr(self):
+ comment = statetracker.DocComment(None)
+ comment.AddFlag(self._MakeDocFlagFake('param', 'foo'))
+ comment.AddFlag(self._MakeDocFlagFake('param', 'bar'))
+
+ self.assertEquals(
+ '<DocComment: [\'foo\', \'bar\'], [@param foo, @param bar]>',
+ repr(comment))
+
+ def testDocFlagParam(self):
+ comment = self._ParseComment("""
+ /**
+ * @param {string} [name] Name of customer.
+ */""")
+ flag = comment.GetFlag('param')
+ self.assertEquals('string', flag.type)
+ self.assertEquals('string', flag.jstype.ToString())
+ self.assertEquals('[name]', flag.name)
+
+ def _ParseComment(self, script):
+ """Parse a script that contains one comment and return it."""
+ _, comments = testutil.ParseFunctionsAndComments(script)
+ self.assertEquals(1, len(comments))
+ return comments[0]
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/closure_linter/strict_test.py b/tools/closure_linter/closure_linter/strict_test.py
new file mode 100755
index 0000000000..2634456874
--- /dev/null
+++ b/tools/closure_linter/closure_linter/strict_test.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+# Copyright 2013 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for gjslint --strict.
+
+Tests errors that can be thrown by gjslint when in strict mode.
+"""
+
+
+
+import unittest
+
+import gflags as flags
+import unittest as googletest
+
+from closure_linter import errors
+from closure_linter import runner
+from closure_linter.common import erroraccumulator
+
+flags.FLAGS.strict = True
+
+
+class StrictTest(unittest.TestCase):
+ """Tests scenarios where strict generates warnings."""
+
+ def testUnclosedString(self):
+ """Tests warnings are reported when nothing is disabled.
+
+ b/11450054.
+ """
+ original = [
+ 'bug = function() {',
+ ' (\'foo\'\');',
+ '};',
+ '',
+ ]
+
+ expected = [errors.FILE_DOES_NOT_PARSE, errors.MULTI_LINE_STRING,
+ errors.FILE_IN_BLOCK]
+ self._AssertErrors(original, expected)
+
+ def _AssertErrors(self, original, expected_errors):
+ """Asserts that the error fixer corrects original to expected."""
+
+ # Trap gjslint's output parse it to get messages added.
+ error_accumulator = erroraccumulator.ErrorAccumulator()
+ runner.Run('testing.js', error_accumulator, source=original)
+ error_nums = [e.code for e in error_accumulator.GetErrors()]
+
+ error_nums.sort()
+ expected_errors.sort()
+ self.assertListEqual(error_nums, expected_errors)
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/closure_linter/testdata/all_js_wrapped.js b/tools/closure_linter/closure_linter/testdata/all_js_wrapped.js
new file mode 100644
index 0000000000..6eb3b38f52
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/all_js_wrapped.js
@@ -0,0 +1,5 @@
+(function($) {
+ // My code goes here.
+ // linter should not throw random exceptions because the file starts with
+ // an open paren. Regression test for bug 2966755.
+})(jQuery);
diff --git a/tools/closure_linter/closure_linter/testdata/blank_lines.js b/tools/closure_linter/closure_linter/testdata/blank_lines.js
new file mode 100644
index 0000000000..1dc3da2905
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/blank_lines.js
@@ -0,0 +1,104 @@
+/**
+ * @fileoverview This is a file overview with no lines above it, at the top of
+ * the file (GOOD).
+ */
+/** // WRONG_BLANK_LINE_COUNT
+ * @fileoverview This is a file overview with no lines above it (BAD).
+ */
+
+/**
+ * @fileoverview This is a file overview with one line above it (GOOD).
+ */
+
+
+/**
+ * @fileoverview This is a file overview with two lines above it (GOOD).
+ */
+
+/** // WRONG_BLANK_LINE_COUNT
+ * A constructor with 1 line above it (BAD).
+ * @constructor
+ */
+function someFunction() {}
+
+
+/** // WRONG_BLANK_LINE_COUNT
+ * A constructor with 2 lines above it (BAD).
+ * @constructor
+ */
+function someFunction() {}
+
+
+
+/**
+ * A constructor with 3 lines above it (GOOD).
+ * @constructor
+ */
+function someFunction() {}
+
+
+
+
+/** // WRONG_BLANK_LINE_COUNT
+ * A constructor with 4 lines above it (BAD).
+ * @constructor
+ */
+function someFunction() {}
+
+/** // WRONG_BLANK_LINE_COUNT
+ * Top level block with 1 line above it (BAD).
+ */
+function someFunction() {}
+
+
+/**
+ * Top level block with 2 lines above it (GOOD).
+ */
+function someFunction() {}
+
+
+
+/** // WRONG_BLANK_LINE_COUNT
+ * Top level block with 3 lines above it (BAD).
+ */
+function someFunction() {}
+
+
+// -1: EXTRA_SPACE
+/**
+ * Top level block with 2 lines above it, one contains whitespace (GOOD).
+ */
+function someFunction() {}
+
+
+// This comment should be ignored.
+/**
+ * Top level block with 2 lines above it (GOOD).
+ */
+function someFunction() {}
+
+// Should not check jsdocs which are inside a block.
+var x = {
+ /**
+ * @constructor
+ */
+};
+
+/**
+ * This jsdoc-style comment should not be required to have two lines above it
+ * since it does not immediately precede any code.
+ */
+// This is a comment.
+
+/**
+ * This jsdoc-style comment should not be required to have two lines above it
+ * since it does not immediately precede any code.
+ */
+/**
+ * This is a comment.
+ */
+
+/**
+ * This jsdoc-style comment should not be required to have two lines above it
+ * since it does not immediately precede any code.
+ */
diff --git a/tools/closure_linter/closure_linter/testdata/bugs.js b/tools/closure_linter/closure_linter/testdata/bugs.js
new file mode 100644
index 0000000000..735200545d
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/bugs.js
@@ -0,0 +1,43 @@
+// Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A file full of known bugs - this file serves only as a reference and is not
+// tested in any way.
+
+/**
+ * @param {{foo} x This is a bad record type.
+ * @param {{foo}} y This is a good record type with bad spacing.
+ * @param {{foo}} This is a good record type with no parameter name.
+ */
+function f(x, y, z) {
+}
+
+
+// Should report extra space errors.
+var magicProps = { renderRow: 0 };
+
+// No error reported here for missing space before {.
+if (x){
+}
+
+// Should have a "brace on wrong line" error.
+if (x)
+{
+}
+
+// We could consider not reporting it when wrapping makes it necessary, as in:
+if (aLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongLongCondition)
+ {
+ // Code here.
+}
diff --git a/tools/closure_linter/closure_linter/testdata/empty_file.js b/tools/closure_linter/closure_linter/testdata/empty_file.js
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/empty_file.js
diff --git a/tools/closure_linter/closure_linter/testdata/ends_with_block.js b/tools/closure_linter/closure_linter/testdata/ends_with_block.js
new file mode 100644
index 0000000000..40aa872e30
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/ends_with_block.js
@@ -0,0 +1,19 @@
+// Copyright 2009 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Regression test for an old crasher.
+
+if (x) {
+ alert('Hello world');
+}
diff --git a/tools/closure_linter/closure_linter/testdata/externs.js b/tools/closure_linter/closure_linter/testdata/externs.js
new file mode 100644
index 0000000000..94e2ad38a4
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/externs.js
@@ -0,0 +1,34 @@
+// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * Externs files are treated specially. We don't require documentation or
+ * return statements in functions when they are documented.
+ *
+ * @externs
+ */
+
+
+function VXMLBaseElement() {}
+
+
+/**
+ * Should not complain about return tag with no return statement in
+ * externs.js file.
+ * @param {string} attrName The name of the attribute.
+ * @return {string}
+ */
+VXMLBaseElement.prototype.getAttribute = function(attrName) {};
+
+VXMLBaseElement.prototype.undocumentedMethod = function() {};
diff --git a/tools/closure_linter/closure_linter/testdata/externs_jsdoc.js b/tools/closure_linter/closure_linter/testdata/externs_jsdoc.js
new file mode 100644
index 0000000000..926593f7c4
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/externs_jsdoc.js
@@ -0,0 +1,37 @@
+// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Tests that the externs tag within a fileoverview comment is
+ * sufficient to identify an externs file.
+ * @externs
+ *
+ * Externs files are treated specially. We don't require documentation or
+ * return statements in functions when they are documented.
+ *
+ */
+
+
+function VXMLBaseElement() {}
+
+
+/**
+ * Should not complain about return tag with no return statement in
+ * an externs file.
+ * @param {string} attrName The name of the attribute.
+ * @return {string}
+ */
+VXMLBaseElement.prototype.getAttribute = function(attrName) {};
+
+VXMLBaseElement.prototype.undocumentedMethod = function() {};
diff --git a/tools/closure_linter/closure_linter/testdata/file_level_comment.js b/tools/closure_linter/closure_linter/testdata/file_level_comment.js
new file mode 100644
index 0000000000..86581155a0
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/file_level_comment.js
@@ -0,0 +1,13 @@
+/**
+ * File level comment as the first thing in a file (GOOD).
+ * @license Copyright 2009 SomeThirdParty.
+ */
+/** // WRONG_BLANK_LINE_COUNT
+ * Comment block that is not the first thing in a file (BAD).
+ * @license Copyright 2009 SomeThirdParty.
+ */
+
+/** // WRONG_BLANK_LINE_COUNT
+ * Top level comment with a single line above it (BAD).
+ * @license Copyright 2009 SomeThirdParty.
+ */
diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.html.in.html b/tools/closure_linter/closure_linter/testdata/fixjsstyle.html.in.html
new file mode 100644
index 0000000000..c341bb9a39
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/fixjsstyle.html.in.html
@@ -0,0 +1,52 @@
+<!DOCTYPE HTML>
+<html>
+<!--
+ Copyright 2012 Google Inc. All rights reserved.
+-->
+<head>
+<meta http-equiv="pragma" content="no-cache">
+<title>Tests</title>
+</head>
+<body>
+<!-- An empty script tag with src.-->
+<script type="text/javascript" src="../../deps_html.js"></script>
+<!-- Missing a require statement and unsorted.-->
+<script type="text/javascript">
+goog.require('goog.testing.MockControl');
+goog.require('goog.SomeTestVar');
+ goog.require('goog.SomeMoreExtra');
+goog.require('goog.SomeExtra');
+</script>
+<script type="text/javascript">
+
+var mc;
+var objectToTest;
+
+function setUp() {
+ mc = new goog.testing.MockControl();
+
+ objectToTest = new goog.AMissingObject();
+}
+<!-- some extra blank lines here-->
+
+
+
+
+function testEnterMeetingId_startsMeeting() {
+ var someTestVar = new goog.SomeTestVar();
+ objectToTest.test();
+}
+
+
+
+/** @private */
+function somePrivateHelper_() {
+
+}
+
+
+</script>
+<!-- An empty script tag with no src.-->
+<script type="text/javascript"></script>
+</body>
+</html>
diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.html.out.html b/tools/closure_linter/closure_linter/testdata/fixjsstyle.html.out.html
new file mode 100644
index 0000000000..bb9a16f003
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/fixjsstyle.html.out.html
@@ -0,0 +1,51 @@
+<!DOCTYPE HTML>
+<html>
+<!--
+ Copyright 2012 Google Inc. All rights reserved.
+-->
+<head>
+<meta http-equiv="pragma" content="no-cache">
+<title>Tests</title>
+</head>
+<body>
+<!-- An empty script tag with src.-->
+<script type="text/javascript" src="../../deps_html.js"></script>
+<!-- Missing a require statement and unsorted.-->
+<script type="text/javascript">
+goog.require('goog.AMissingObject');
+goog.require('goog.SomeTestVar');
+goog.require('goog.testing.MockControl');
+</script>
+<script type="text/javascript">
+
+var mc;
+var objectToTest;
+
+function setUp() {
+ mc = new goog.testing.MockControl();
+
+ objectToTest = new goog.AMissingObject();
+}
+<!-- some extra blank lines here-->
+
+
+
+
+function testEnterMeetingId_startsMeeting() {
+ var someTestVar = new goog.SomeTestVar();
+ objectToTest.test();
+}
+
+
+
+/** @private */
+function somePrivateHelper_() {
+
+}
+
+
+</script>
+<!-- An empty script tag with no src.-->
+<script type="text/javascript"></script>
+</body>
+</html>
diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.in.js b/tools/closure_linter/closure_linter/testdata/fixjsstyle.in.js
new file mode 100644
index 0000000000..3a1ccb1f1f
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/fixjsstyle.in.js
@@ -0,0 +1,293 @@
+// Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+/**
+ * @fileoverview Autofix test script.
+ *
+ * @author robbyw@google.com (Robby Walker)
+ * @author robbyw@google.com (Robby Walker)
+ * @author robbyw@google.com(Robby Walker)
+ * @author robbyw@google.com
+ * @author robbyw@google.com Robby
+ */
+
+goog.provide('w');
+goog.provide('Y');
+goog.provide('X');
+goog.provide('Z');
+
+// Some comment about why this is suppressed top.
+/** @suppress {extraRequire} */
+goog.require('dummy.NotUsedTop'); // Comment top.
+goog.require('dummy.Bb');
+/** @suppress {extraRequire} */
+// Some comment about why this is suppressed different.
+goog.require('dummy.NotUsedDifferentComment');
+goog.require('dummy.Cc');
+// Some comment about why this is suppressed middle.
+/** @suppress {extraRequire} */
+goog.require('dummy.NotUsedMiddle'); // Comment middle.
+goog.require('dummy.Dd');
+goog.require('dummy.aa');
+// Some comment about why this is suppressed bottom.
+/** @suppress {extraRequire} */
+goog.require('dummy.NotUsedBottom'); // Comment bottom.
+
+var x = new dummy.Bb();
+dummy.Cc.someMethod();
+dummy.aa.someMethod();
+
+
+/**
+ * @param {number|null} badTypeWithExtraSpace |null -> ?.
+ * @returns {number} returns -> return.
+ */
+x.y = function( badTypeWithExtraSpace) {
+}
+
+
+/** @type {function():null|Array.<string|null>} only 2nd |null -> ? */
+x.badType;
+
+
+/** @type {Array.<number|string|null>|null} only 2nd |null -> ? */
+x.wickedType;
+
+
+/** @type { string | null } null -> ? */
+x.nullWithSpace;
+
+spaceBeforeSemicolon = 10 ;
+spaceBeforeParen = 10 +(5 * 2);
+arrayNoSpace =[10];
+arrayExtraSpace [10] = 10;
+spaceBeforeClose = ([10 ] );
+spaceAfterStart = ( [ 10]);
+extraSpaceAfterPlus = 10 + 20;
+extraSpaceBeforeOperator = x ++;
+extraSpaceBeforeOperator = x --;
+extraSpaceBeforeComma = x(y , z);
+missingSpaceBeforeOperator = x+ y;
+missingSpaceAfterOperator = x +y;
+missingBothSpaces = x+y;
+equalsSpacing= 10;
+equalsSpacing =10;
+equalsSpacing=10;
+equalsSpacing=[10];
+reallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyLongName=1000;
+
+"string should be single quotes";
+
+// Regression test for interaction between space fixing and semicolon fixing -
+// previously the fix for the missing space caused the function to be seen as
+// a non-assigned function and then its semicolon was being stripped.
+x=function() {
+};
+
+/**
+ * Missing a newline.
+ * @constructor
+ * @extends {a.b.c}
+ */
+x.y.z = function() {
+};goog.inherits(x.y.z, a.b.c);
+
+/**
+ * Extra blank line.
+ * @constructor
+ * @extends {a.b.c}
+ */
+x.y.z = function() {
+};
+
+goog.inherits(x.y.z, a.b.c);
+
+/**
+ * Perfect!
+ * @constructor
+ * @extends {a.b.c}
+ */
+x.y.z = function() {
+};
+goog.inherits(x.y.z, a.b.c);
+
+// Whitespace at end of comment.
+var removeWhiteSpaceAtEndOfLine;
+
+/**
+ * Whitespace at EOL (here and the line of code and the one below it).
+ * @type {string}
+ * @param {string} Description with whitespace at EOL.
+ */
+x = 10;
+
+/**
+ * @type number
+ */
+foo.bar = 3;
+
+/**
+ * @enum {boolean
+ */
+bar.baz = true;
+
+/**
+ * @extends Object}
+ */
+bar.foo = x;
+
+/**
+ * @type function(string, boolean) : void
+ */
+baz.bar = goog.nullFunction;
+
+/** {@inheritDoc} */
+baz.baz = function() {
+};
+
+TR_Node.splitDomTreeAt(splitNode, clone, /** @type Node */ (quoteNode));
+
+x = [1, 2, 3,];
+x = {
+ a: 1,
+};
+
+if (x) {
+};
+
+for (i = 0;i < 10; i++) {
+}
+for (i = 0; i < 10;i++) {
+}
+for ( i = 0; i < 10; i++) {
+}
+for (i = 0 ; i < 10; i++) {
+}
+for (i = 0; i < 10 ; i++) {
+}
+for (i = 0; i < 10; i++ ) {
+}
+for (i = 0; i < 10; i++) {
+}
+for (i = 0; i < 10; i++) {
+}
+for (i = 0 ;i < 10; i++) {
+}
+
+var x = 10
+var y = 100;
+
+
+/**
+ * This is to test the ability to add or remove a = in type to mark optional
+ * parameters.
+ * @param {number=} firstArg Incorrect the name should start with opt_. Don't
+ * handle the fix (yet).
+ * @param {function(string=):number} opt_function This should end with a =.
+ * @param {function(number)} opt_otherFunc This should end with a =.
+ * @param {string} opt_otherArg Incorrect this should be string=.
+ * @param {{string, number}} opt_recordArg Incorrect this should
+ * be {string, number}=.
+ */
+function someFunction(firstArg, opt_function, opt_otherFunc, opt_otherArg,
+ opt_recordArg) {
+}
+
+
+/**
+ * This is to test the ability to add '...' in type with variable arguments.
+ * @param {number} firstArg First argument.
+ * @param {string} var_args This should start with '...'.
+ */
+function varArgFunction(firstArg, var_args) {
+}
+
+
+/**
+ * This is to test the ability to add '...' in type with variable arguments.
+ * @param {number} firstArg First argument.
+ * @param {{a, b}} var_args This should start with '...'.
+ */
+function varArgRecordTypeFunction(firstArg, var_args) {
+}
+
+var indent = 'correct';
+ indent = 'too far';
+if (indent) {
+indent = 'too short';
+}
+indent = function() {
+ return a +
+ b;
+};
+
+
+/**
+ * Regression test, must insert whitespace before the 'b' when fixing
+ * indentation. Its different from below case of bug 3473113 as has spaces
+ * before parameter which was not working in part of the bug fix.
+ */
+indentWrongSpaces = function(
+ b) {
+};
+
+
+/**
+ * Regression test, must insert whitespace before the 'b' when fixing
+ * indentation.
+ * @bug 3473113
+ */
+indent = function(
+b) {
+};
+
+
+
+/**
+ * This is to test the ability to remove multiple extra lines before a top-level
+ * block.
+ */
+function someFunction() {}
+/**
+ * This is to test the ability to add multiple extra lines before a top-level
+ * block.
+ */
+function someFunction() {}
+
+
+// This is a comment.
+/**
+ * This is to test that blank lines removed before a top level block skips any
+ * comments above the block.
+ */
+function someFunction() {}
+// This is a comment.
+/**
+ * This is to test that blank lines added before a top level block skips any
+ * comments above the block.
+ */
+function someFunction() {}
+
+
+/**
+ * Parameters don't have proper spaces.
+ * @param {number} a
+ * @param {number} b
+ * @param {number} d
+ * @param {number} e
+ * @param {number} f
+ */
+function someFunction(a, b,d, e, f) {
+}
+
+// File does not end with newline \ No newline at end of file
diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.indentation.out.js b/tools/closure_linter/closure_linter/testdata/fixjsstyle.indentation.out.js
new file mode 100644
index 0000000000..4d7c3853c8
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/fixjsstyle.indentation.out.js
@@ -0,0 +1,465 @@
+// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Test file for indentation.
+ * @author robbyw@google.com (Robert Walker)
+ */
+
+goog.provide('goog.editor.SeamlessField');
+goog.provide('goog.something');
+
+goog.require('goog.events.KeyCodes');
+goog.require('goog.userAgent');
+
+// Some good indentation examples.
+
+var x = 10;
+var y = 'some really really really really really really really long string',
+ z = 14;
+if (x == 10) {
+ x = 12;
+}
+if (x == 10 ||
+ x == 12) {
+ x = 14;
+}
+if (x == 14) {
+ if (z >= x) {
+ y = 'test';
+ }
+}
+x = x +
+ 10 + (
+ 14
+ );
+something =
+ 5;
+var arr = [
+ 1, 2, 3];
+var arr2 = [
+ 1,
+ 2,
+ 3];
+var obj = {
+ a: 10,
+ b: 20
+};
+callAFunction(10, [100, 200],
+ 300);
+callAFunction([
+ 100,
+ 200
+],
+300);
+callAFunction('abc' +
+ 'def' +
+ 'ghi');
+
+x.reallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyLongName
+ .someMember = 10;
+
+
+// confused on allowed indentation in continued function assignments vs overlong
+// wrapped function calls.
+some.sample() // LINE_ENDS_WITH_DOT
+ .then(function(response) {
+ return 1;
+ });
+
+
+/**
+ * Some function.
+ * @return {number} The number ten.
+ */
+goog.something.x = function() {
+ return 10 +
+ 20;
+};
+
+
+/**
+ * Some function.
+ * @param {number} longParameterName1 Some number.
+ * @param {number} longParameterName2 Some number.
+ * @param {number} longParameterName3 Some number.
+ * @return {number} Sum number.
+ */
+goog.something.y = function(longParameterName1, longParameterName2,
+ longParameterName3) {
+ return longParameterName1 + longParameterName2 + longParameterName3;
+};
+
+
+/**
+ * Some function.
+ * @param {number} longParameterName1 Some number.
+ * @param {number} longParameterName2 Some number.
+ * @param {number} longParameterName3 Some number.
+ * @return {number} Sum number.
+ */
+goog.something.z = function(longParameterName1, longParameterName2,
+ longParameterName3) {
+ return longParameterName1 + longParameterName2 + longParameterName3;
+};
+
+if (opt_rootTagName) {
+ doc.appendChild(doc.createNode(3,
+ opt_rootTagName,
+ opt_namespaceUri || ''));
+}
+
+
+/**
+ * For a while this errored because the function call parens were overriding
+ * the other opening paren.
+ */
+goog.something.q = function() {
+ goog.something.x(a.getStartNode(),
+ a.getStartOffset(), a.getEndNode(), a.getEndOffset());
+};
+
+function doSomething() {
+ var titleElement = goog.something(x, // UNUSED_LOCAL_VARIABLE
+ y);
+}
+
+switch (x) {
+ case 10:
+ y = 100;
+ break;
+
+ // This should be allowed.
+ case 20:
+ if (y) {
+ z = 0;
+ }
+ break;
+
+ // This should be allowed,
+ // even with mutliple lines.
+ case 30:
+ if (y) {
+ z = 0;
+ }
+ break;
+
+ case SadThatYouSwitch
+ .onSomethingLikeThis:
+ z = 10;
+
+ case 40:
+ z = 20;
+
+ default:
+ break;
+}
+
+// Description of if case.
+if (x) {
+
+// Description of else case should be allowed at this indent.
+// Multiple lines is ok.
+} else {
+
+}
+
+
+/** @inheritDoc */
+goog.editor.SeamlessField.prototype.setupMutationEventHandlersGecko =
+ function() {
+ var x = 10;
+ x++;
+};
+
+
+// Regression test for '.' at the end confusing the indentation checker if it is
+// not considered to be part of the identifier.
+/** @inheritDoc */
+goog.editor.SeamlessField.prototype
+ .setupMutationEventHandlersGecko = function() {
+ // -2: LINE_ENDS_WITH_DOT
+ var x = 10;
+ x++;
+};
+
+var someReallyReallyLongVariableName =
+ y ? /veryVeryVeryVeryVeryVeryVeryVeryLongRegex1/gi :
+ /slightlyLessLongRegex2/gi;
+
+var somethingOrOther = z ?
+ a :
+ b;
+
+var z = x ? y :
+ 'bar';
+
+var z = x ?
+ y :
+ a;
+
+var z = z ?
+ a ? b : c :
+ d ? e : f;
+
+var z = z ?
+ a ? b :
+ c :
+ d ? e : f;
+
+var z = z ?
+ a ?
+ b :
+ c :
+ d ? e : f;
+
+var z = z ?
+ a ? b : c :
+ d ? e :
+ f ? g : h;
+
+var z = z ?
+ a +
+ i ?
+ b +
+ j : c :
+ d ? e :
+ f ? g : h;
+
+
+if (x) {
+ var block =
+ // some comment
+ // and some more comment
+ (e.keyCode == goog.events.KeyCodes.TAB && !this.dispatchBeforeTab_(e)) ||
+ // #2: to block a Firefox-specific bug where Macs try to navigate
+ // back a page when you hit command+left arrow or comamnd-right arrow.
+ // See https://bugzilla.mozilla.org/show_bug.cgi?id=341886
+ // get Firefox to fix this.
+ (goog.userAgent.GECKO && e.metaKey &&
+ (e.keyCode == goog.events.KeyCodes.LEFT ||
+ e.keyCode == goog.events.KeyCodes.RIGHT));
+}
+
+if (x) {
+}
+
+var somethingElse = {
+ HAS_W3C_RANGES: goog.userAgent.GECKO || goog.userAgent.WEBKIT ||
+ goog.userAgent.OPERA,
+
+ // A reasonably placed comment.
+ SOME_KEY: goog.userAgent.IE
+};
+
+var x = {
+ ySomethingReallyReallyLong:
+ 'foo',
+ z: 'bar'
+};
+
+// Some bad indentation.
+
+var a = 10; // WRONG_INDENTATION
+var b = 10,
+ c = 12; // WRONG_INDENTATION
+x = x +
+ 10; // WRONG_INDENTATION
+if (x == 14) {
+ x = 15; // WRONG_INDENTATION
+ x = 16; // WRONG_INDENTATION
+}
+
+var longFunctionName = function(opt_element) {
+ return opt_element ?
+ new z(q(opt_element)) : 100;
+ // -1: WRONG_INDENTATION
+};
+
+longFunctionName(a, b, c,
+ d, e, f); // WRONG_INDENTATION
+longFunctionName(a, b,
+ c, // WRONG_INDENTATION
+ d); // WRONG_INDENTATION
+
+x = a ? b :
+ c; // WRONG_INDENTATION
+y = a ?
+ b : c; // WRONG_INDENTATION
+
+switch (x) {
+ case 10:
+ break; // WRONG_INDENTATION
+ case 20: // WRONG_INDENTATION
+ break;
+ default: // WRONG_INDENTATION
+ break;
+}
+
+while (true) {
+ x = 10; // WRONG_INDENTATION
+ break; // WRONG_INDENTATION
+}
+
+function foo() {
+ return entryUrlTemplate
+ .replace(
+ '${authorResourceId}',
+ this.sanitizer_.sanitize(authorResourceId));
+}
+
+return [new x(
+ 10)];
+return [
+ new x(10)];
+
+return [new x(
+ 10)]; // WRONG_INDENTATION
+return [new x(
+ 10)]; // WRONG_INDENTATION
+
+return {x: y(
+ z)};
+return {
+ x: y(z)
+};
+
+return {x: y(
+ z)}; // WRONG_INDENTATION
+return {x: y(
+ z)}; // WRONG_INDENTATION
+
+return /** @type {Window} */ (x(
+ 'javascript:"' + encodeURI(loadingMessage) + '"')); // WRONG_INDENTATION
+
+x = {
+ y: function() {}
+};
+
+x = {
+ y: foo,
+ z: bar +
+ baz // WRONG_INDENTATION
+};
+
+x({
+ a: b
+},
+10);
+
+z = function(arr, f, val, opt_obj) {
+ x(arr, function(val, index) {
+ rval = f.call(opt_obj, rval, val, index, arr);
+ });
+};
+
+var xyz = [100,
+ 200,
+ 300];
+
+var def = [100,
+ 200]; // WRONG_INDENTATION
+
+var ghi = [100,
+ 200]; // WRONG_INDENTATION
+
+var abcdefg = ('a' +
+ 'b');
+
+var x9 = z('7: ' +
+ x(x)); // WRONG_INDENTATION
+
+function abc() {
+ var z = d('div', // UNUSED_LOCAL_VARIABLE
+ {
+ a: 'b'
+ });
+}
+
+abcdefg('p', {x: 10},
+ 'Para 1');
+
+function bar1() {
+ return 3 +
+ 4; // WRONG_INDENTATION
+}
+
+function bar2() {
+ return 3 + // WRONG_INDENTATION
+ 4; // WRONG_INDENTATION
+}
+
+function bar3() {
+ return 3 + // WRONG_INDENTATION
+ 4;
+}
+
+// Regression test for unfiled bug. Wrongly going into implied block after else
+// when there was an explicit block (was an else if) caused false positive
+// indentation errors.
+if (true) {
+} else if (doc.foo(
+ doc.getBar(baz))) {
+ var x = 3;
+}
+
+// Regression tests for function indent + 4.
+// (The first example is from the styleguide.)
+if (veryLongFunctionNameA(
+ veryLongArgumentName) ||
+ veryLongFunctionNameB(
+ veryLongArgumentName)) {
+ veryLongFunctionNameC(veryLongFunctionNameD(
+ veryLongFunctioNameE(
+ veryLongFunctionNameF)));
+}
+
+if (outer(middle(
+ inner(first)))) {}
+if (outer(middle(
+ inner(second)),
+ outer_second)) {}
+if (nested.outer(
+ first)) {}
+if (nested.outer(nested.middle(
+ first))) {}
+if (nested
+ .outer(nested.middle(
+ first))) {}
+if (nested.outer(first
+ .middle(
+ second),
+ third)) {}
+
+// goog.scope should not increase indentation.
+goog.scope(function() {
+var x = 5;
+while (x > 0) {
+ --x;
+}
+}); // goog.scope
+
+
+goog.scope(function() { // EXTRA_GOOG_SCOPE_USAGE
+// +1: UNUSED_LOCAL_VARIABLE
+var x = 5; // WRONG_INDENTATION
+}); // goog.scope
+
+goog.scope(function() { // EXTRA_GOOG_SCOPE_USAGE
+var x = 5; // UNUSED_LOCAL_VARIABLE
+}); // goog.scope
+
+goog.scope(function() { // EXTRA_GOOG_SCOPE_USAGE
+var x = 5; // UNUSED_LOCAL_VARIABLE
+}); // goog.scope
diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.in.js b/tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.in.js
new file mode 100644
index 0000000000..974af915d3
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.in.js
@@ -0,0 +1,21 @@
+// Correct dot placement:
+var x = window.some()
+ .method()
+ .calls();
+
+// Wrong dots:
+window.
+ some().
+ // With a comment in between.
+ method().
+ calls();
+
+// Wrong plus operator:
+var y = 'hello'
+ + 'world'
+ // With a comment in between.
+ + '!';
+
+// Correct plus operator (untouched):
+var y = 'hello' +
+ 'world';
diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.out.js b/tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.out.js
new file mode 100644
index 0000000000..c03e11730a
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/fixjsstyle.oplineend.out.js
@@ -0,0 +1,21 @@
+// Correct dot placement:
+var x = window.some()
+ .method()
+ .calls();
+
+// Wrong dots:
+window
+ .some()
+ // With a comment in between.
+ .method()
+ .calls();
+
+// Wrong plus operator:
+var y = 'hello' +
+ 'world' +
+ // With a comment in between.
+ '!';
+
+// Correct plus operator (untouched):
+var y = 'hello' +
+ 'world';
diff --git a/tools/closure_linter/closure_linter/testdata/fixjsstyle.out.js b/tools/closure_linter/closure_linter/testdata/fixjsstyle.out.js
new file mode 100644
index 0000000000..37fe2b8b3c
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/fixjsstyle.out.js
@@ -0,0 +1,310 @@
+// Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Autofix test script.
+ *
+ * @author robbyw@google.com (Robby Walker)
+ * @author robbyw@google.com (Robby Walker)
+ * @author robbyw@google.com (Robby Walker)
+ * @author robbyw@google.com (Robby Walker)
+ * @author robbyw@google.com (Robby)
+ * @author robbyw@google.com
+ * @author robbyw@google.com Robby
+ */
+
+goog.provide('X');
+goog.provide('Y');
+goog.provide('Z');
+goog.provide('w');
+
+goog.require('dummy.Bb');
+goog.require('dummy.Cc');
+// Some comment about why this is suppressed bottom.
+/** @suppress {extraRequire} */
+goog.require('dummy.NotUsedBottom'); // Comment bottom.
+/** @suppress {extraRequire} */
+// Some comment about why this is suppressed different.
+goog.require('dummy.NotUsedDifferentComment');
+// Some comment about why this is suppressed middle.
+/** @suppress {extraRequire} */
+goog.require('dummy.NotUsedMiddle'); // Comment middle.
+// Some comment about why this is suppressed top.
+/** @suppress {extraRequire} */
+goog.require('dummy.NotUsedTop'); // Comment top.
+goog.require('dummy.aa');
+
+var x = new dummy.Bb();
+dummy.Cc.someMethod();
+dummy.aa.someMethod();
+
+
+/**
+ * @param {?number} badTypeWithExtraSpace |null -> ?.
+ * @return {number} returns -> return.
+ */
+x.y = function(badTypeWithExtraSpace) {
+};
+
+
+/** @type {function():null|Array.<?string>} only 2nd |null -> ? */
+x.badType;
+
+
+/** @type {?Array.<number|string|null>} only 2nd |null -> ? */
+x.wickedType;
+
+
+/** @type {? string } null -> ? */
+x.nullWithSpace;
+
+spaceBeforeSemicolon = 10;
+spaceBeforeParen = 10 + (5 * 2);
+arrayNoSpace = [10];
+arrayExtraSpace[10] = 10;
+spaceBeforeClose = ([10]);
+spaceAfterStart = ([10]);
+extraSpaceAfterPlus = 10 + 20;
+extraSpaceBeforeOperator = x++;
+extraSpaceBeforeOperator = x--;
+extraSpaceBeforeComma = x(y, z);
+missingSpaceBeforeOperator = x + y;
+missingSpaceAfterOperator = x + y;
+missingBothSpaces = x + y;
+equalsSpacing = 10;
+equalsSpacing = 10;
+equalsSpacing = 10;
+equalsSpacing = [10];
+reallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyLongName = 1000;
+
+'string should be single quotes';
+
+// Regression test for interaction between space fixing and semicolon fixing -
+// previously the fix for the missing space caused the function to be seen as
+// a non-assigned function and then its semicolon was being stripped.
+x = function() {
+};
+
+
+
+/**
+ * Missing a newline.
+ * @constructor
+ * @extends {a.b.c}
+ */
+x.y.z = function() {
+};
+goog.inherits(x.y.z, a.b.c);
+
+
+
+/**
+ * Extra blank line.
+ * @constructor
+ * @extends {a.b.c}
+ */
+x.y.z = function() {
+};
+goog.inherits(x.y.z, a.b.c);
+
+
+
+/**
+ * Perfect!
+ * @constructor
+ * @extends {a.b.c}
+ */
+x.y.z = function() {
+};
+goog.inherits(x.y.z, a.b.c);
+
+// Whitespace at end of comment.
+var removeWhiteSpaceAtEndOfLine;
+
+
+/**
+ * Whitespace at EOL (here and the line of code and the one below it).
+ * @type {string}
+ * @param {string} Description with whitespace at EOL.
+ */
+x = 10;
+
+
+/**
+ * @type {number}
+ */
+foo.bar = 3;
+
+
+/**
+ * @enum {boolean}
+ */
+bar.baz = true;
+
+
+/**
+ * @extends {Object}
+ */
+bar.foo = x;
+
+
+/**
+ * @type {function(string, boolean) : void}
+ */
+baz.bar = goog.nullFunction;
+
+
+/** @inheritDoc */
+baz.baz = function() {
+};
+
+TR_Node.splitDomTreeAt(splitNode, clone, /** @type {Node} */ (quoteNode));
+
+x = [1, 2, 3,];
+x = {
+ a: 1,
+};
+
+if (x) {
+}
+
+for (i = 0; i < 10; i++) {
+}
+for (i = 0; i < 10; i++) {
+}
+for (i = 0; i < 10; i++) {
+}
+for (i = 0; i < 10; i++) {
+}
+for (i = 0; i < 10; i++) {
+}
+for (i = 0; i < 10; i++) {
+}
+for (i = 0; i < 10; i++) {
+}
+for (i = 0; i < 10; i++) {
+}
+for (i = 0; i < 10; i++) {
+}
+
+var x = 10;
+var y = 100;
+
+
+/**
+ * This is to test the ability to add or remove a = in type to mark optional
+ * parameters.
+ * @param {number=} firstArg Incorrect the name should start with opt_. Don't
+ * handle the fix (yet).
+ * @param {function(string=):number=} opt_function This should end with a =.
+ * @param {function(number)=} opt_otherFunc This should end with a =.
+ * @param {string=} opt_otherArg Incorrect this should be string=.
+ * @param {{string, number}=} opt_recordArg Incorrect this should
+ * be {string, number}=.
+ */
+function someFunction(firstArg, opt_function, opt_otherFunc, opt_otherArg,
+ opt_recordArg) {
+}
+
+
+/**
+ * This is to test the ability to add '...' in type with variable arguments.
+ * @param {number} firstArg First argument.
+ * @param {...string} var_args This should start with '...'.
+ */
+function varArgFunction(firstArg, var_args) {
+}
+
+
+/**
+ * This is to test the ability to add '...' in type with variable arguments.
+ * @param {number} firstArg First argument.
+ * @param {...{a, b}} var_args This should start with '...'.
+ */
+function varArgRecordTypeFunction(firstArg, var_args) {
+}
+
+var indent = 'correct';
+indent = 'too far';
+if (indent) {
+ indent = 'too short';
+}
+indent = function() {
+ return a +
+ b;
+};
+
+
+/**
+ * Regression test, must insert whitespace before the 'b' when fixing
+ * indentation. Its different from below case of bug 3473113 as has spaces
+ * before parameter which was not working in part of the bug fix.
+ */
+indentWrongSpaces = function(
+ b) {
+};
+
+
+/**
+ * Regression test, must insert whitespace before the 'b' when fixing
+ * indentation.
+ * @bug 3473113
+ */
+indent = function(
+ b) {
+};
+
+
+/**
+ * This is to test the ability to remove multiple extra lines before a top-level
+ * block.
+ */
+function someFunction() {}
+
+
+/**
+ * This is to test the ability to add multiple extra lines before a top-level
+ * block.
+ */
+function someFunction() {}
+
+
+// This is a comment.
+/**
+ * This is to test that blank lines removed before a top level block skips any
+ * comments above the block.
+ */
+function someFunction() {}
+
+
+// This is a comment.
+/**
+ * This is to test that blank lines added before a top level block skips any
+ * comments above the block.
+ */
+function someFunction() {}
+
+
+/**
+ * Parameters don't have proper spaces.
+ * @param {number} a
+ * @param {number} b
+ * @param {number} d
+ * @param {number} e
+ * @param {number} f
+ */
+function someFunction(a, b, d, e, f) {
+}
+
+// File does not end with newline
diff --git a/tools/closure_linter/closure_linter/testdata/goog_scope.js b/tools/closure_linter/closure_linter/testdata/goog_scope.js
new file mode 100644
index 0000000000..aa655d8e6d
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/goog_scope.js
@@ -0,0 +1,63 @@
+// Copyright 2011 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Tests provides/requires in the presence of goog.scope.
+ * There should be no errors for missing provides or requires.
+ *
+ * @author nicksantos@google.com (Nick Santos)
+ */
+
+goog.provide('goog.something.Something');
+
+goog.require('goog.util.Else');
+
+goog.scope(function() {
+var Else = goog.util.Else;
+var something = goog.something;
+
+/** // WRONG_BLANK_LINE_COUNT
+ * This is a something.
+ * @constructor
+ */
+something.Something = function() {
+ /**
+ * This is an else.
+ * @type {Else}
+ */
+ this.myElse = new Else();
+
+ /** @type {boolean} */
+ this.private_ = false; // MISSING_PRIVATE, UNUSED_PRIVATE_MEMBER
+};
+
+/** // WRONG_BLANK_LINE_COUNT
+ * // +3: MISSING_PRIVATE
+ * Missing private.
+ */
+something.withTrailingUnderscore_ = 'should be declared @private';
+
+/** // WRONG_BLANK_LINE_COUNT
+ * Does nothing.
+ */
+something.Something.prototype.noOp = function() {};
+
+
+/**
+ * Does something.
+ * Tests for included semicolon in function expression in goog.scope.
+ */
+something.Something.prototype.someOp = function() {
+} // MISSING_SEMICOLON_AFTER_FUNCTION
+}); // goog.scope
diff --git a/tools/closure_linter/closure_linter/testdata/html_parse_error.html b/tools/closure_linter/closure_linter/testdata/html_parse_error.html
new file mode 100644
index 0000000000..df61da10c7
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/html_parse_error.html
@@ -0,0 +1,32 @@
+<!DOCTYPE html>
+<html lang="en" dir="ltr">
+<head>
+ <!--
+ Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS-IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+ <title>GJsLint HTML JavaScript extraction tests</title>
+</head>
+<body>
+ Text outside the script tag should not be linted as JavaScript.
+ Stray closing brace: }
+ <script type="text/javascript">
+ /**
+ * @type {!Array.<!Array.<string>>}
+ */
+ var badParse = [['a']];
+ </script>
+ <div>Some more non-JavaScript text with missing whitespace: (a+b).</div>
+</body>
+</html>
diff --git a/tools/closure_linter/closure_linter/testdata/indentation.js b/tools/closure_linter/closure_linter/testdata/indentation.js
new file mode 100644
index 0000000000..10d2ad0174
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/indentation.js
@@ -0,0 +1,465 @@
+// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Test file for indentation.
+ * @author robbyw@google.com (Robert Walker)
+ */
+
+goog.provide('goog.editor.SeamlessField');
+goog.provide('goog.something');
+
+goog.require('goog.events.KeyCodes');
+goog.require('goog.userAgent');
+
+// Some good indentation examples.
+
+var x = 10;
+var y = 'some really really really really really really really long string',
+ z = 14;
+if (x == 10) {
+ x = 12;
+}
+if (x == 10 ||
+ x == 12) {
+ x = 14;
+}
+if (x == 14) {
+ if (z >= x) {
+ y = 'test';
+ }
+}
+x = x +
+ 10 + (
+ 14
+ );
+something =
+ 5;
+var arr = [
+ 1, 2, 3];
+var arr2 = [
+ 1,
+ 2,
+ 3];
+var obj = {
+ a: 10,
+ b: 20
+};
+callAFunction(10, [100, 200],
+ 300);
+callAFunction([
+ 100,
+ 200
+],
+300);
+callAFunction('abc' +
+ 'def' +
+ 'ghi');
+
+x.reallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyLongName
+ .someMember = 10;
+
+
+// confused on allowed indentation in continued function assignments vs overlong
+// wrapped function calls.
+some.sample(). // LINE_ENDS_WITH_DOT
+ then(function(response) {
+ return 1;
+ });
+
+
+/**
+ * Some function.
+ * @return {number} The number ten.
+ */
+goog.something.x = function() {
+ return 10 +
+ 20;
+};
+
+
+/**
+ * Some function.
+ * @param {number} longParameterName1 Some number.
+ * @param {number} longParameterName2 Some number.
+ * @param {number} longParameterName3 Some number.
+ * @return {number} Sum number.
+ */
+goog.something.y = function(longParameterName1, longParameterName2,
+ longParameterName3) {
+ return longParameterName1 + longParameterName2 + longParameterName3;
+};
+
+
+/**
+ * Some function.
+ * @param {number} longParameterName1 Some number.
+ * @param {number} longParameterName2 Some number.
+ * @param {number} longParameterName3 Some number.
+ * @return {number} Sum number.
+ */
+goog.something.z = function(longParameterName1, longParameterName2,
+ longParameterName3) {
+ return longParameterName1 + longParameterName2 + longParameterName3;
+};
+
+if (opt_rootTagName) {
+ doc.appendChild(doc.createNode(3,
+ opt_rootTagName,
+ opt_namespaceUri || ''));
+}
+
+
+/**
+ * For a while this errored because the function call parens were overriding
+ * the other opening paren.
+ */
+goog.something.q = function() {
+ goog.something.x(a.getStartNode(),
+ a.getStartOffset(), a.getEndNode(), a.getEndOffset());
+};
+
+function doSomething() {
+ var titleElement = goog.something(x, // UNUSED_LOCAL_VARIABLE
+ y);
+}
+
+switch (x) {
+ case 10:
+ y = 100;
+ break;
+
+ // This should be allowed.
+ case 20:
+ if (y) {
+ z = 0;
+ }
+ break;
+
+ // This should be allowed,
+ // even with mutliple lines.
+ case 30:
+ if (y) {
+ z = 0;
+ }
+ break;
+
+ case SadThatYouSwitch
+ .onSomethingLikeThis:
+ z = 10;
+
+ case 40:
+ z = 20;
+
+ default:
+ break;
+}
+
+// Description of if case.
+if (x) {
+
+// Description of else case should be allowed at this indent.
+// Multiple lines is ok.
+} else {
+
+}
+
+
+/** @inheritDoc */
+goog.editor.SeamlessField.prototype.setupMutationEventHandlersGecko =
+ function() {
+ var x = 10;
+ x++;
+};
+
+
+// Regression test for '.' at the end confusing the indentation checker if it is
+// not considered to be part of the identifier.
+/** @inheritDoc */
+goog.editor.SeamlessField.prototype.
+ setupMutationEventHandlersGecko = function() {
+ // -2: LINE_ENDS_WITH_DOT
+ var x = 10;
+ x++;
+};
+
+var someReallyReallyLongVariableName =
+ y ? /veryVeryVeryVeryVeryVeryVeryVeryLongRegex1/gi :
+ /slightlyLessLongRegex2/gi;
+
+var somethingOrOther = z ?
+ a :
+ b;
+
+var z = x ? y :
+ 'bar';
+
+var z = x ?
+ y :
+ a;
+
+var z = z ?
+ a ? b : c :
+ d ? e : f;
+
+var z = z ?
+ a ? b :
+ c :
+ d ? e : f;
+
+var z = z ?
+ a ?
+ b :
+ c :
+ d ? e : f;
+
+var z = z ?
+ a ? b : c :
+ d ? e :
+ f ? g : h;
+
+var z = z ?
+ a +
+ i ?
+ b +
+ j : c :
+ d ? e :
+ f ? g : h;
+
+
+if (x) {
+ var block =
+ // some comment
+ // and some more comment
+ (e.keyCode == goog.events.KeyCodes.TAB && !this.dispatchBeforeTab_(e)) ||
+ // #2: to block a Firefox-specific bug where Macs try to navigate
+ // back a page when you hit command+left arrow or comamnd-right arrow.
+ // See https://bugzilla.mozilla.org/show_bug.cgi?id=341886
+ // get Firefox to fix this.
+ (goog.userAgent.GECKO && e.metaKey &&
+ (e.keyCode == goog.events.KeyCodes.LEFT ||
+ e.keyCode == goog.events.KeyCodes.RIGHT));
+}
+
+if (x) {
+}
+
+var somethingElse = {
+ HAS_W3C_RANGES: goog.userAgent.GECKO || goog.userAgent.WEBKIT ||
+ goog.userAgent.OPERA,
+
+ // A reasonably placed comment.
+ SOME_KEY: goog.userAgent.IE
+};
+
+var x = {
+ ySomethingReallyReallyLong:
+ 'foo',
+ z: 'bar'
+};
+
+// Some bad indentation.
+
+ var a = 10; // WRONG_INDENTATION
+var b = 10,
+ c = 12; // WRONG_INDENTATION
+x = x +
+ 10; // WRONG_INDENTATION
+if (x == 14) {
+ x = 15; // WRONG_INDENTATION
+ x = 16; // WRONG_INDENTATION
+}
+
+var longFunctionName = function(opt_element) {
+ return opt_element ?
+ new z(q(opt_element)) : 100;
+ // -1: WRONG_INDENTATION
+};
+
+longFunctionName(a, b, c,
+ d, e, f); // WRONG_INDENTATION
+longFunctionName(a, b,
+ c, // WRONG_INDENTATION
+ d); // WRONG_INDENTATION
+
+x = a ? b :
+ c; // WRONG_INDENTATION
+y = a ?
+ b : c; // WRONG_INDENTATION
+
+switch (x) {
+ case 10:
+ break; // WRONG_INDENTATION
+ case 20: // WRONG_INDENTATION
+ break;
+default: // WRONG_INDENTATION
+ break;
+}
+
+while (true) {
+ x = 10; // WRONG_INDENTATION
+ break; // WRONG_INDENTATION
+}
+
+function foo() {
+ return entryUrlTemplate
+ .replace(
+ '${authorResourceId}',
+ this.sanitizer_.sanitize(authorResourceId));
+}
+
+return [new x(
+ 10)];
+return [
+ new x(10)];
+
+return [new x(
+ 10)]; // WRONG_INDENTATION
+return [new x(
+ 10)]; // WRONG_INDENTATION
+
+return {x: y(
+ z)};
+return {
+ x: y(z)
+};
+
+return {x: y(
+ z)}; // WRONG_INDENTATION
+return {x: y(
+ z)}; // WRONG_INDENTATION
+
+return /** @type {Window} */ (x(
+'javascript:"' + encodeURI(loadingMessage) + '"')); // WRONG_INDENTATION
+
+x = {
+ y: function() {}
+};
+
+x = {
+ y: foo,
+ z: bar +
+ baz // WRONG_INDENTATION
+};
+
+x({
+ a: b
+},
+10);
+
+z = function(arr, f, val, opt_obj) {
+ x(arr, function(val, index) {
+ rval = f.call(opt_obj, rval, val, index, arr);
+ });
+};
+
+var xyz = [100,
+ 200,
+ 300];
+
+var def = [100,
+ 200]; // WRONG_INDENTATION
+
+var ghi = [100,
+ 200]; // WRONG_INDENTATION
+
+var abcdefg = ('a' +
+ 'b');
+
+var x9 = z('7: ' +
+x(x)); // WRONG_INDENTATION
+
+function abc() {
+ var z = d('div', // UNUSED_LOCAL_VARIABLE
+ {
+ a: 'b'
+ });
+}
+
+abcdefg('p', {x: 10},
+ 'Para 1');
+
+function bar1() {
+ return 3 +
+ 4; // WRONG_INDENTATION
+}
+
+function bar2() {
+ return 3 + // WRONG_INDENTATION
+ 4; // WRONG_INDENTATION
+}
+
+function bar3() {
+ return 3 + // WRONG_INDENTATION
+ 4;
+}
+
+// Regression test for unfiled bug. Wrongly going into implied block after else
+// when there was an explicit block (was an else if) caused false positive
+// indentation errors.
+if (true) {
+} else if (doc.foo(
+ doc.getBar(baz))) {
+ var x = 3;
+}
+
+// Regression tests for function indent + 4.
+// (The first example is from the styleguide.)
+if (veryLongFunctionNameA(
+ veryLongArgumentName) ||
+ veryLongFunctionNameB(
+ veryLongArgumentName)) {
+ veryLongFunctionNameC(veryLongFunctionNameD(
+ veryLongFunctioNameE(
+ veryLongFunctionNameF)));
+}
+
+if (outer(middle(
+ inner(first)))) {}
+if (outer(middle(
+ inner(second)),
+ outer_second)) {}
+if (nested.outer(
+ first)) {}
+if (nested.outer(nested.middle(
+ first))) {}
+if (nested
+ .outer(nested.middle(
+ first))) {}
+if (nested.outer(first
+ .middle(
+ second),
+ third)) {}
+
+// goog.scope should not increase indentation.
+goog.scope(function() {
+var x = 5;
+while (x > 0) {
+ --x;
+}
+}); // goog.scope
+
+
+goog.scope(function() { // EXTRA_GOOG_SCOPE_USAGE
+// +1: UNUSED_LOCAL_VARIABLE
+ var x = 5; // WRONG_INDENTATION
+}); // goog.scope
+
+goog.scope(function() { // EXTRA_GOOG_SCOPE_USAGE
+var x = 5; // UNUSED_LOCAL_VARIABLE
+}); // MISSING_END_OF_SCOPE_COMMENT
+
+goog.scope(function() { // EXTRA_GOOG_SCOPE_USAGE
+var x = 5; // UNUSED_LOCAL_VARIABLE
+}); // malformed goog.scope comment // MALFORMED_END_OF_SCOPE_COMMENT
diff --git a/tools/closure_linter/closure_linter/testdata/interface.js b/tools/closure_linter/closure_linter/testdata/interface.js
new file mode 100644
index 0000000000..7daeee3ca8
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/interface.js
@@ -0,0 +1,89 @@
+// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Test file for interfaces.
+ * @author robbyw@google.com (Robert Walker)
+ */
+
+goog.provide('sample.BadInterface');
+goog.provide('sample.GoodInterface');
+
+
+
+/**
+ * Sample interface to demonstrate correct style.
+ * @interface
+ */
+sample.GoodInterface = function() {
+};
+
+
+/**
+ * Legal methods can take parameters and have a return type.
+ * @param {string} param1 First parameter.
+ * @param {Object} param2 Second parameter.
+ * @return {number} Some return value.
+ */
+sample.GoodInterface.prototype.legalMethod = function(param1, param2) {
+};
+
+
+/**
+ * Legal methods can also take no parameters and return nothing.
+ */
+sample.GoodInterface.prototype.legalMethod2 = function() {
+ // Comments should be allowed.
+};
+
+
+/**
+ * Legal methods can also be omitted, even with params and return values.
+ * @param {string} param1 First parameter.
+ * @param {Object} param2 Second parameter.
+ * @return {number} Some return value.
+ */
+sample.GoodInterface.prototype.legalMethod3;
+
+
+/**
+ * Legal methods can also be set to abstract, even with params and return
+ * values.
+ * @param {string} param1 First parameter.
+ * @param {Object} param2 Second parameter.
+ * @return {number} Some return value.
+ */
+sample.GoodInterface.prototype.legalMethod4 = goog.abstractMethod;
+
+
+
+/**
+ * Sample interface to demonstrate style errors.
+ * @param {string} a This is illegal.
+ * @interface
+ */
+sample.BadInterface = function(a) { // INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS
+ this.x = a; // INTERFACE_METHOD_CANNOT_HAVE_CODE
+};
+
+
+/**
+ * It is illegal to include code in an interface method.
+ * @param {string} param1 First parameter.
+ * @param {Object} param2 Second parameter.
+ * @return {number} Some return value.
+ */
+sample.BadInterface.prototype.illegalMethod = function(param1, param2) {
+ return 10; // INTERFACE_METHOD_CANNOT_HAVE_CODE
+};
diff --git a/tools/closure_linter/closure_linter/testdata/jsdoc.js b/tools/closure_linter/closure_linter/testdata/jsdoc.js
new file mode 100644
index 0000000000..d62fd3c5fc
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/jsdoc.js
@@ -0,0 +1,1455 @@
+// Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Errors related to JsDoc.
+ *
+ * @author robbyw@google.com (Robby Walker)
+ *
+ * @author robbyw@google.com (Robby Walker) // EXTRA_SPACE, EXTRA_SPACE
+ * @author robbyw@google.com(Robby Walker) // MISSING_SPACE
+ *
+ * @author robbyw@google.com () // INVALID_AUTHOR_TAG_DESCRIPTION
+ * @author robbyw@google.com // INVALID_AUTHOR_TAG_DESCRIPTION
+ *
+ * @owner ajp@google.com (Andy Perelson)
+ * @badtag // INVALID_JSDOC_TAG
+ * @customtag This tag is passed as a flag in full_test.py
+ * @requires anotherCustomTagPassedInFromFullTestThatShouldAllowASingleWordLongerThan80Lines
+ * @requires firstWord, secondWordWhichShouldMakeThisLineTooLongSinceThereIsAFirstWord
+ * @wizmodule
+ * @wizModule // INVALID_JSDOC_TAG
+ */
+// -4: LINE_TOO_LONG
+
+goog.provide('MyClass');
+goog.provide('goog.NumberLike');
+goog.provide('goog.math.Vec2.sum');
+
+goog.require('goog.array');
+goog.require('goog.color');
+goog.require('goog.dom.Range');
+goog.require('goog.math.Matrix');
+goog.require('goog.math.Vec2');
+
+
+/**
+ * Test the "no compilation should be done after annotation processing" tag.
+ * @nocompile
+ */
+
+
+/**
+ * @returns // INVALID_JSDOC_TAG
+ * @params // INVALID_JSDOC_TAG
+ * @defines // INVALID_JSDOC_TAG
+ * @nginject // INVALID_JSDOC_TAG
+ * @wizAction // INVALID_JSDOC_TAG
+ */
+function badTags() {
+}
+
+
+// +4: MISSING_JSDOC_TAG_DESCRIPTION
+/**
+ * @license Description.
+ * @preserve Good tag, missing punctuation
+ * @preserve
+ */
+function goodTags() {
+ /** @preserveTry */
+ try {
+ hexColor = goog.color.parse(value).hex;
+ } catch (ext) {
+ // Regression test. The preserveTry tag was incorrectly causing a warning
+ // for a missing period at the end of tag description. Parsed as
+ // flag: preserve, description: Try.
+ }
+}
+
+
+/**
+ * Some documentation goes here.
+ *
+ * @param {Object} object Good docs.
+ * @ngInject
+ * @wizaction
+ */
+function good(object) {
+}
+
+
+/**
+ * Some documentation goes here.
+ * @param {function(string, string) : string} f A function.
+ */
+function setConcatFunc(f) {
+}
+
+
+/**
+ * Some docs.
+ */
+function missingParam(object) { // MISSING_PARAMETER_DOCUMENTATION
+}
+
+
+/**
+ * @return {number} Hiya.
+ * @override
+ */
+function missingParamButInherit(object) {
+ return 3;
+}
+
+
+/**
+ * @inheritDoc
+ */
+function missingParamButInherit(object) {
+}
+
+
+/**
+ * @override
+ */
+function missingParamButOverride(object) {
+}
+
+
+// +2: UNNECESSARY_BRACES_AROUND_INHERIT_DOC
+/**
+ * {@inheritDoc}
+ */
+function missingParamButInherit(object) {
+}
+
+
+/**
+ * Some docs.
+ *
+ * @param {Object} object Docs.
+ */
+function mismatchedParam(elem) { // WRONG_PARAMETER_DOCUMENTATION
+ /** @param {number} otherElem */
+ function nestedFunction(elem) { // WRONG_PARAMETER_DOCUMENTATION
+ };
+}
+
+
+/**
+ * @return {boolean} A boolean primitive.
+ */
+function goodReturn() {
+ return something;
+}
+
+
+/**
+ * @return {some.long.type.that.will.make.the.description.start.on.next.line}
+ * An object.
+ */
+function anotherGoodReturn() {
+ return something;
+}
+
+
+// +2: MISSING_JSDOC_TAG_TYPE
+/**
+ * @return false.
+ */
+function missingReturnType() {
+ return something;
+}
+
+
+// +2: MISSING_SPACE
+/**
+ * @return{type}
+ */
+function missingSpaceOnReturnType() {
+ return something;
+}
+
+
+// +2: MISSING_JSDOC_TAG_TYPE
+/**
+ * @return
+ */
+function missingReturnType() {
+ return something;
+}
+
+class.missingDocs = function() { // MISSING_MEMBER_DOCUMENTATION
+};
+
+
+/**
+ * No return doc needed.
+ */
+function okMissingReturnDoc() {
+ return;
+}
+
+
+// +2: UNNECESSARY_RETURN_DOCUMENTATION
+/**
+ * @return {number} Unnecessary return doc.
+ */
+function unnecessaryMissingReturnDoc() {
+}
+
+
+/**
+ * The "suppress" causes the compiler to ignore the 'debugger' statement.
+ * @suppress {checkDebuggerStatement}
+ */
+function checkDebuggerStatementWithSuppress() {
+ debugger;
+}
+
+
+/**
+ * Return doc is present, but the function doesn't have a 'return' statement.
+ * The "suppress" causes the compiler to ignore the error.
+ * @suppress {missingReturn}
+ * @return {string}
+ */
+function unnecessaryMissingReturnDocWithSuppress() {
+ if (false) {
+ return '';
+ } else {
+ // Missing return statement in this branch.
+ }
+}
+
+
+// +3: MISSING_JSDOC_TAG_TYPE
+// +2: UNNECESSARY_RETURN_DOCUMENTATION
+/**
+ * @return
+ */
+function unnecessaryMissingReturnNoType() {
+}
+
+
+/**
+ * @return {undefined} Ok unnecessary return doc.
+ */
+function okUnnecessaryMissingReturnDoc() {
+}
+
+
+/**
+ * @return {*} Ok unnecessary return doc.
+ */
+function okUnnecessaryMissingReturnDoc2() {
+}
+
+
+/**
+ * @return {void} Ok unnecessary return doc.
+ */
+function okUnnecessaryMissingReturnDoc3() {
+}
+
+
+/**
+ * This function doesn't return anything, but it does contain the string return.
+ */
+function makeSureReturnTokenizesRight() {
+ fn(returnIsNotSomethingHappeningHere);
+}
+
+
+/**
+ * @return {number|undefined} Ok unnecessary return doc.
+ */
+function okUnnecessaryMissingReturnDoc3() {
+}
+
+
+/**
+ * @return {number} Ok unnecessary return doc.
+ */
+function okUnnecessaryReturnWithThrow() {
+ throw 'foo';
+}
+
+
+/** @inheritDoc */
+function okNoReturnWithInheritDoc() {
+ return 10;
+}
+
+
+/** @override */
+function okNoReturnWithOverride() {
+ return 10;
+}
+
+
+/**
+ * No return doc.
+ */ // MISSING_RETURN_DOCUMENTATION
+function badMissingReturnDoc() {
+ return 10;
+}
+
+
+
+/**
+ * Constructor so we should not have a return jsdoc tag.
+ * @constructor
+ */
+function OkNoReturnWithConstructor() {
+ return this;
+}
+
+
+/**
+ * Type of array is known, so the cast is unnecessary.
+ * @suppress {unnecessaryCasts}
+ */
+function unnecessaryCastWithSuppress() {
+ var numberArray = /** @type {!Array.<number>} */ ([]);
+ /** @type {number} */ (goog.array.peek(numberArray));
+}
+
+
+
+/**
+ * Make sure the 'unrestricted' annotation is accepted.
+ * @constructor @unrestricted
+ */
+function UnrestrictedClass() {}
+
+
+
+/**
+ * Check definition of fields in constructors.
+ * @constructor
+ */
+function AConstructor() {
+ /**
+ * A field.
+ * @type {string}
+ * @private
+ */
+ this.isOk_ = 'ok';
+
+ // +5: MISSING_PRIVATE
+ /**
+ * Another field.
+ * @type {string}
+ */
+ this.isBad_ = 'missing private';
+
+ /**
+ * This is ok, but a little weird.
+ * @type {number}
+ * @private
+ */
+ var x = this.x_ = 10;
+
+ // At first, this block mis-attributed the first typecast as a member doc,
+ // and therefore expected it to contain @private.
+ if (goog.math.Matrix.isValidArray(/** @type {Array} */ (m))) {
+ this.array_ = goog.array.clone(/** @type {Array.<Array.<number>>} */ (m));
+ }
+
+ // Use the private and local variables we've defined so they don't generate a
+ // warning.
+ var y = [
+ this.isOk_,
+ this.isBad_,
+ this.array_,
+ this.x_,
+ y,
+ x
+ ];
+}
+
+
+/**
+ * @desc This message description is allowed.
+ */
+var MSG_YADDA_YADDA_YADDA = 'A great message!';
+
+
+/**
+ * @desc So is this one.
+ * @hidden
+ * @meaning Some unusual meaning.
+ */
+x.y.z.MSG_YADDA_YADDA_YADDA = 'A great message!';
+
+
+/**
+ * @desc But desc can only apply to messages.
+ */
+var x = 10; // INVALID_USE_OF_DESC_TAG
+
+
+/**
+ * Same with hidden.
+ * @hidden
+ */
+var x = 10; // INVALID_USE_OF_DESC_TAG
+
+
+/**
+ * Same with meaning.
+ * @meaning Some unusual meaning.
+ */
+var x = 10; // INVALID_USE_OF_DESC_TAG
+
+
+// +9: MISSING_SPACE
+// +9: MISSING_JSDOC_TAG_TYPE
+// +10: OUT_OF_ORDER_JSDOC_TAG_TYPE
+// +10: MISSING_JSDOC_TAG_TYPE, MISSING_SPACE
+/**
+ * Lots of problems in this documentation.
+ *
+ * @param {Object} q params b & d are missing descriptions.
+ * @param {Object} a param d is missing a type (oh my).
+ * @param {Object}b
+ * @param d
+ * @param {Object} x param desc.
+ * @param z {type} Out of order type.
+ * @param{} y Empty type and missing space.
+ * @param {Object} omega mis-matched param.
+ */
+function manyProblems(a, b, c, d, x, z, y, alpha) {
+ // -1: MISSING_PARAMETER_DOCUMENTATION, EXTRA_PARAMETER_DOCUMENTATION
+ // -2: WRONG_PARAMETER_DOCUMENTATION
+}
+
+
+/**
+ * Good docs
+ *
+ * @param {really.really.really.really.really.really.really.long.type} good
+ * My param description.
+ * @param {really.really.really.really.really.really.really.really.long.type}
+ * okay My param description.
+ * @param
+ * {really.really.really.really.really.really.really.really.really.really.long.type}
+ * fine Wow that's a lot of wrapping.
+ */
+function wrappedParams(good, okay, fine) {
+}
+
+
+// +4: MISSING_JSDOC_TAG_TYPE
+// +3: MISSING_JSDOC_PARAM_NAME
+/**
+ * Really bad
+ * @param
+ */
+function reallyBadParam(a) { // MISSING_PARAMETER_DOCUMENTATION
+}
+
+
+/**
+ * Some docs.
+ *
+ * @private
+ */
+class.goodPrivate_ = function() {
+};
+
+
+/**
+ * Some docs.
+ */
+class.missingPrivate_ = function() { // MISSING_PRIVATE
+};
+
+
+/**
+ * Some docs.
+ *
+ * @private
+ */
+class.extraPrivate = function() { // EXTRA_PRIVATE
+};
+
+
+/**
+ * Anything ending with two underscores is not treated as private.
+ */
+class.__iterator__ = function() {
+};
+
+
+/**
+ * Some docs.
+ * @package
+ */
+class.goodPackage = function() {
+};
+
+
+/**
+ * Some docs.
+ * @package
+ */
+class.badPackage_ = function() { // MISSING_PRIVATE
+};
+
+
+/**
+ * Some docs.
+ * @protected
+ */
+class.goodProtected = function() {
+};
+
+
+/**
+ * Some docs.
+ * @protected
+ */
+class.badProtected_ = function() { // MISSING_PRIVATE
+};
+
+
+/**
+ * Example of a legacy name.
+ * @protected
+ * @suppress {underscore}
+ */
+class.dom_ = function() {
+ /** @suppress {with} */
+ with ({}) {}
+};
+
+
+/**
+ * Legacy names must be protected.
+ * @suppress {underscore}
+ */
+class.dom_ = function() {
+};
+
+
+/**
+ * Allow compound suppression.
+ * @private
+ */
+class.dom_ = function() {
+ /** @suppress {visibility|with} */
+ with ({}) {}
+};
+
+
+/**
+ * Allow compound suppression.
+ * @private
+ */
+class.dom_ = function() {
+ /** @suppress {visibility,with} */
+ with ({}) {}
+};
+
+
+// +4: UNNECESSARY_SUPPRESS
+/**
+ * Some docs.
+ * @private
+ * @suppress {underscore}
+ */
+class.unnecessarySuppress_ = function() {
+};
+
+
+/**
+ * Some docs.
+ * @public
+ */
+class.goodProtected = function() {
+};
+
+
+/**
+ * Some docs.
+ * @public
+ */
+class.badProtected_ = function() { // MISSING_PRIVATE
+};
+
+
+/**
+ * Example of a legacy name.
+ * @public
+ * @suppress {underscore}
+ */
+class.dom_ = function() {
+};
+
+
+// +5: JSDOC_PREFER_QUESTION_TO_PIPE_NULL
+// +7: JSDOC_PREFER_QUESTION_TO_PIPE_NULL
+/**
+ * Check JsDoc type annotations.
+ * @param {Object?} good A good one.
+ * @param {Object|null} bad A bad one.
+ * @param {Object|Element?} ok1 This is acceptable.
+ * @param {Object|Element|null} right The right way to do the above.
+ * @param {null|Object} bad2 Another bad one.
+ * @param {Object?|Element} ok2 Not good but acceptable.
+ * @param {Array.<string|number>?} complicated A good one that was reported as
+ * bad. See bug 1154506.
+ */
+class.sampleFunction = function(good, bad, ok1, right, bad2, ok2,
+ complicated) {
+};
+
+
+/**
+ * @return {Object?} A good return.
+ */
+class.goodReturn = function() {
+ return something;
+};
+
+
+/** @type {Array.<Object|null>} // JSDOC_PREFER_QUESTION_TO_PIPE_NULL */
+class.badType;
+
+
+/**
+ * For template types, the ?TYPE notation is not parsed correctly by the
+ * compiler, so don't warn here.
+ * @type {Array.<TYPE|null>}
+ * @template TYPE
+ */
+class.goodTemplateType;
+
+
+// As the syntax may look ambivalent: The function returns just null.
+/** @type {function():null|Object} */
+class.goodType;
+
+
+/** @type {function():(null|Object)} // JSDOC_PREFER_QUESTION_TO_PIPE_NULL */
+class.badType;
+
+
+// As the syntax may look ambivalent: The function returns just Object.
+/** @type {function():Object|null} // JSDOC_PREFER_QUESTION_TO_PIPE_NULL */
+class.badType;
+
+
+/** @type {(function():Object)|null} // JSDOC_PREFER_QUESTION_TO_PIPE_NULL */
+class.badType;
+
+
+/** @type {function(null,Object)} */
+class.goodType;
+
+
+/** @type {{a:null,b:Object}} */
+class.goodType;
+
+
+// +2: JSDOC_PREFER_QUESTION_TO_PIPE_NULL
+/**
+ * @return {Object|null} A bad return.
+ */
+class.badReturn = function() {
+ return something;
+};
+
+
+/**
+ * @return {Object|Element?} An not so pretty return, but acceptable.
+ */
+class.uglyReturn = function() {
+ return something;
+};
+
+
+/**
+ * @return {Object|Element|null} The right way to do the above.
+ */
+class.okReturn = function() {
+ return something;
+};
+
+
+// +2: MISSING_SPACE, MISSING_SPACE
+/**
+ * @return{mytype}Something.
+ */
+class.missingSpacesReturn = function() {
+ return something;
+};
+
+
+/**
+ * A good type in the new notation.
+ * @type {Object?}
+ */
+class.otherGoodType = null;
+
+
+/**
+ * A complex type that should allow both ? and |.
+ * @bug 1570763
+ * @type {function(number?, Object|undefined):void}
+ */
+class.complexGoodType = goog.nullFunction;
+
+
+/**
+ * A complex bad type that we can catch, though there are many we can't.
+ * Its acceptable.
+ * @type {Array.<string>|string?}
+ */
+class.complexBadType = x || 'foo';
+
+
+/**
+ * A strange good type that caught a bad version of type checking from
+ * other.js, so I added it here too just because.
+ * @type {number|string|Object|Element|Array.<Object>|null}
+ */
+class.aStrangeGoodType = null;
+
+
+/**
+ * A type that includes spaces.
+ * @type {function() : void}
+ */
+class.assignedFunc = goog.nullFunction;
+
+
+// +4: JSDOC_PREFER_QUESTION_TO_PIPE_NULL
+// +3: MISSING_BRACES_AROUND_TYPE
+/**
+ * A bad type.
+ * @type Object|null
+ */
+class.badType = null;
+
+
+// +3: JSDOC_PREFER_QUESTION_TO_PIPE_NULL
+/**
+ * A bad type, in the new notation.
+ * @type {Object|null}
+ */
+class.badType = null;
+
+
+/**
+ * An not pretty type, but acceptable.
+ * @type {Object|Element?}
+ */
+class.uglyType = null;
+
+
+/**
+ * The right way to do the above.
+ * @type {Object|Element|null}
+ */
+class.okType = null;
+
+
+/**
+ * @type {boolean} Is it okay to have a description here?
+ */
+class.maybeOkType = null;
+
+
+/**
+ * A property whose type will be infered from the right hand side since it is
+ * constant.
+ * @const
+ */
+class.okWithoutType = 'stout';
+
+
+/**
+ * Const property without type and text in next line. b/10407058.
+ * @const
+ * TODO(user): Nothing to do, just for scenario.
+ */
+class.okWithoutType = 'string';
+
+
+/**
+ * Another constant property, but we should use the type tag if the type can't
+ * be inferred.
+ * @type {string}
+ * @const
+ */
+class.useTypeWithConst = functionWithUntypedReturnValue();
+
+
+/**
+ * Another constant property, but using type with const if the type can't
+ * be inferred.
+ * @const {string}
+ */
+class.useTypeWithConst = functionWithUntypedReturnValue();
+
+
+// +3: MISSING_BRACES_AROUND_TYPE
+/**
+ * Constant property without proper type.
+ * @const string
+ */
+class.useImproperTypeWithConst = functionWithUntypedReturnValue();
+
+
+/**
+ * @define {boolean} A define.
+ */
+var COMPILED = false;
+
+
+// +2: MISSING_JSDOC_TAG_TYPE
+/**
+ * @define A define without type info.
+ */
+var UNTYPED_DEFINE = false;
+
+
+// +4: MISSING_JSDOC_TAG_DESCRIPTION, MISSING_SPACE
+/**
+ * A define without a description and missing a space.
+ *
+ * @define{boolean}
+ */
+var UNDESCRIBED_DEFINE = false;
+
+
+// Test where to check for docs.
+/**
+ * Docs for member object.
+ * @type {Object}
+ */
+x.objectContainingFunctionNeedsNoDocs = {
+ x: function(params, params) {}
+};
+
+if (test) {
+ x.functionInIfBlockNeedsDocs = function() { // MISSING_MEMBER_DOCUMENTATION
+ x.functionInFunctionNeedsNoDocs = function() {
+ };
+ };
+} else {
+ x.functionInElseBlockNeedsDocs = function() { // MISSING_MEMBER_DOCUMENTATION
+ x.functionInFunctionNeedsNoDocs = function() {
+ };
+ };
+}
+
+
+/**
+ * Regression test.
+ * @param {goog.math.Vec2} a
+ * @param {goog.math.Vec2} b
+ * @return {goog.math.Vec2} The sum vector.
+ */
+goog.math.Vec2.sum = function(a, b) {
+ return new goog.math.Vec2(a.x + b.x, a.y + b.y);
+};
+
+
+// +6: JSDOC_MISSING_OPTIONAL_PREFIX
+// +8: JSDOC_MISSING_OPTIONAL_PREFIX
+// +8: JSDOC_MISSING_OPTIONAL_TYPE
+// +8: JSDOC_MISSING_OPTIONAL_TYPE
+/**
+ * Optional parameters test.
+ * @param {number=} numberOptional The name should be prefixed by opt_.
+ * @param {function(number=)} funcOk Ok.
+ * @param {number} numberOk The type is ok.
+ * @param {function(string=):number=} funcOpt Param name need opt_ prefix.
+ * @param {string} opt_stringMissing The type miss an ending =.
+ * @param {function(number=)} opt_func The type miss an ending =.
+ * @param {string=} opt_ok The type is ok.
+ * @param {function(string=):number=} opt_funcOk Type is ok.
+ */
+goog.math.Vec2.aFunction = function(
+ numberOptional, funcOk, numberOk, funcOpt, opt_stringMissing, opt_func,
+ opt_ok, opt_funcOk) {
+};
+
+
+/**
+ * Good documentation!
+ *
+ * @override
+ */
+class.goodOverrideDocs = function() {
+};
+
+
+/**
+ * Test that flags embedded in docs don't trigger ends with invalid character
+ * error.
+ * @bug 2983692
+ * @deprecated Please use the {@code @hidden} annotation.
+ */
+function goodEndChar() {
+}
+
+
+/**
+ * Test that previous case handles unballanced doc tags.
+ * @param {boolean} a Whether we should honor '{' characters in the string.
+ */
+function goodEndChar2(a) {
+}
+
+
+/**
+ * Regression test for braces in description invalidly being matched as types.
+ * This caused a false error for missing punctuation because the bad token
+ * caused us to incorrectly calculate the full description.
+ * @bug 1406513
+ * @return {Object|undefined} A hash containing the attributes for the found url
+ * as in: {url: "page1.html", title: "First page"}
+ * or undefined if no match was found.
+ */
+x.z.a = function() {
+ return a;
+};
+
+
+/**
+ * @bug 1492606 HTML parse error for JSDoc descriptions grashed gjslint.
+ * @param {string} description a long email or common name, e.g.,
+ * "John Doe <john.doe@gmail.com>" or "Birthdays Calendar"
+ */
+function calendar(description) {
+}
+
+
+/**
+ * @bug 1492606 HTML parse error for JSDoc descriptions grashed gjslint.
+ * @param {string} description a long email or common name, e.g.,
+ * "John Doe <john.doe@gmail.com>" or <b>"Birthdays Calendar".</b>
+ */
+function calendar(description) {
+}
+
+
+/**
+ * Regression test for invoked functions, this code used to report missing
+ * param and missing return errors.
+ * @type {number}
+ */
+x.y.z = (function(x) {
+ return x + 1;
+})();
+
+
+/**
+ * Test for invoked function as part of an expression. It should not return
+ * an error for missing docs for x.
+ */
+goog.currentTime = something.Else || (function(x) {
+ //...
+})(10);
+
+
+/**
+ * @type boolean //MISSING_BRACES_AROUND_TYPE
+ */
+foo.bar = true;
+
+
+/**
+ * @enum {null //MISSING_BRACES_AROUND_TYPE
+ */
+bar.foo = null;
+
+
+/**
+ * @extends Object} //MISSING_BRACES_AROUND_TYPE
+ */ // JSDOC_DOES_NOT_PARSE
+bar.baz = x;
+
+
+/** @inheritDoc */ // INVALID_INHERIT_DOC_PRIVATE
+x.privateFoo_ = function() { // MISSING_PRIVATE
+};
+
+
+/**
+ * Does bar.
+ * @override // INVALID_OVERRIDE_PRIVATE
+ */
+x.privateBar_ = function() { // MISSING_PRIVATE
+};
+
+
+/**
+ * Inherits private baz_ method (evil, wrong behavior, but we have no choice).
+ * @override
+ * @suppress {accessControls}
+ */
+x.prototype.privateBaz_ = function() {
+};
+
+
+/**
+ * This looks like a function but it's a function call.
+ * @type {number}
+ */
+test.x = function() {
+ return 3;
+}();
+
+
+/**
+ * Invalid reference to this.
+ */ // MISSING_JSDOC_TAG_THIS
+test.x.y = function() {
+ var x = this.x; // UNUSED_LOCAL_VARIABLE
+};
+
+
+/**
+ * Invalid write to this.
+ */ // MISSING_JSDOC_TAG_THIS
+test.x.y = function() {
+ this.x = 10;
+};
+
+
+/**
+ * Invalid standalone this.
+ */ // MISSING_JSDOC_TAG_THIS
+test.x.y = function() {
+ some.func.call(this);
+};
+
+
+/**
+ * Invalid reference to this.
+ */ // MISSING_JSDOC_TAG_THIS
+function a() {
+ var x = this.x; // UNUSED_LOCAL_VARIABLE
+}
+
+
+/**
+ * Invalid write to this.
+ */ // MISSING_JSDOC_TAG_THIS
+function b() {
+ this.x = 10;
+}
+
+
+/**
+ * Invalid standalone this.
+ */ // MISSING_JSDOC_TAG_THIS
+function c() {
+ some.func.call(this);
+}
+
+
+/**
+ * Ok to do any in a prototype.
+ */
+test.prototype.x = function() {
+ var x = this.x;
+ this.y = x;
+ some.func.call(this);
+};
+
+
+/**
+ * Ok to do any in a prototype that ends in a hex-like number.
+ */
+test.prototype.getColorX2 = function() {
+ var x = this.x;
+ this.y = x;
+ some.func.call(this);
+};
+
+
+/**
+ * Ok to do any in a function with documented this usage.
+ * @this {test.x.y} Object bound to this via goog.bind.
+ */
+function a() {
+ var x = this.x;
+ this.y = x;
+ some.func.call(this);
+}
+
+
+/**
+ * Ok to do any in a function with documented this usage.
+ * @this {test.x.y} Object bound to this via goog.bind.
+ */
+test.x.y = function() {
+ var x = this.x;
+ this.y = x;
+ some.func.call(this);
+};
+
+
+/**
+ * Regression test for bug 1220601. Wrapped function declarations shouldn't
+ * cause need for an (at)this flag, which I can't write out or it would get
+ * parsed as being here.
+ * @param {Event} e The event.
+ */
+detroit.commands.ChangeOwnerCommand
+ .prototype.handleDocumentStoreCompleteEvent = function(e) {
+ this.x = e.target;
+};
+
+
+
+/**
+ * Ok to do any in a constructor.
+ * @constructor
+ */
+test.x.y = function() {
+ this.y = x;
+ var x = this.y; // UNUSED_LOCAL_VARIABLE
+ some.func.call(this);
+};
+
+// Test that anonymous function doesn't throw an error.
+window.setTimeout(function() {
+ var x = 10; // UNUSED_LOCAL_VARIABLE
+}, 0);
+
+
+/**
+ * @bug 1234567
+ */
+function testGoodBug() {
+}
+
+
+/**
+ * @bug 1234567 Descriptions are allowed.
+ */
+function testGoodBugWithDescription() {
+}
+
+
+// +2: NO_BUG_NUMBER_AFTER_BUG_TAG
+/**
+ * @bug Wrong
+ */
+function testBadBugNumber() {
+}
+
+
+// +2: NO_BUG_NUMBER_AFTER_BUG_TAG
+/**
+ * @bug Wrong
+ */
+function testMissingBugNumber() {
+}
+
+
+
+/**
+ * @interface
+ */
+function testInterface() {
+}
+
+
+
+/**
+ * @implements {testInterface}
+ * @constructor
+ */
+function testImplements() {
+}
+
+
+/**
+ * Function that has an export jsdoc tag.
+ * @export
+ */
+function testExport() {
+}
+
+
+/**
+ * Declare and doc this member here, without assigning to it.
+ * @bug 1473402
+ * @type {number}
+ */
+x.declareOnly;
+
+if (!someCondition) {
+ x.declareOnly = 10;
+}
+
+
+/**
+ * JsDoc describing array x.y as an array of function(arg). The missing
+ * semicolon caused the original bug.
+ * @type {Array.<Function>}
+ */
+x.y = [] // MISSING_SEMICOLON
+x.y[0] = function(arg) {};
+x.y[1] = function(arg) {};
+
+
+/**
+ * Regression test for unfiled bug where descriptions didn't properly exclude
+ * the star-slash that must end doc comments.
+ * @return {Function} A factory method.
+ */
+x.y.foo = function() {
+ /** @return {goog.dom.Range} A range. */
+ return function() {
+ return goog.dom.Range.createRangeFromNothing();
+ };
+};
+
+
+// +4: INCORRECT_SUPPRESS_SYNTAX
+// +4: INVALID_SUPPRESS_TYPE
+/**
+ * Docs...
+ * @suppress
+ * @suppress {fake}
+ */
+class.x = 10;
+
+
+/**
+ * These docs are OK. They used to not parse the identifier due to the use of
+ * array indices.
+ * @bug 1640846
+ * @private
+ */
+window['goog']['forms']['Validation'].prototype.form_ = null;
+
+
+/**
+ * Check JsDoc multiline type annotations.
+ * @param {string|
+ * number} multiline description.
+ */
+function testMultiline(multiline) {
+}
+
+
+/**
+ * Check JsDoc nosideeffects annotations.
+ * @nosideeffects
+ */
+function testNoSideEffects() {
+}
+
+
+/**
+ * @enum {google.visualization.DateFormat|google.visualization.NumberFormat|
+ * google.visualization.PatternFormat}
+ */
+MultiLineEnumTypeTest = {
+ FOO: 1,
+ BAR: 2,
+ BAZ: 3
+};
+
+
+/**
+ * @enum {google.visualization.DateFormat|google.visualization.NumberFormat|google.visualization.PatternFormat}
+ */
+AllowedLongLineEnum = {
+ CAT: 1,
+ DOG: 2,
+ RAT: 3
+};
+
+
+/**
+ * Typeless enum test
+ * @enum
+ */
+TypelessEnumTest = {
+ OK: 0,
+ CHECKING: 1,
+ DOWNLOADING: 2,
+ FAILURE: 3
+};
+
+// Regression test for bug 1880803, shouldn't need to document assignments to
+// prototype.
+x.prototype = {};
+
+y
+ .prototype = {};
+
+x.y
+ .z.prototype = {};
+
+x.myprototype = {}; // MISSING_MEMBER_DOCUMENTATION
+
+x.prototype.y = 5; // MISSING_MEMBER_DOCUMENTATION
+
+x.prototype
+ .y.z = {}; // MISSING_MEMBER_DOCUMENTATION
+
+
+/** @typedef {(string|number)} */
+goog.NumberLike;
+
+
+/**
+ * Something from the html5 externs file.
+ * @type {string}
+ * @implicitCast
+ */
+CanvasRenderingContext2D.prototype.fillStyle;
+
+
+
+/**
+ * Regression test.
+ * @bug 2994247
+ * @inheritDoc
+ * @extends {Bar}
+ * @constructor
+ * @private
+ */
+Foo_ = function() {
+};
+
+
+/**
+ * @param {function(this:T,...)} fn The function.
+ * @param {T} obj The object.
+ * @template T
+ */
+function bind(fn, obj) {
+}
+
+
+
+/**
+ * @constructor
+ * @classTemplate T
+ */
+function MyClass() {
+}
+
+
+foo(/** @lends {T} */ ({foo: 'bar'}));
+
+
+
+/**
+ * @param {*} x .
+ * @constructor
+ * @struct
+ */
+function StructMaker(x) { this.x = x; }
+
+var structObjLit = /** @struct */ { x: 123 };
+
+
+
+/**
+ * @param {*} x .
+ * @constructor
+ * @dict
+ */
+function DictMaker(x) { this['x'] = x; }
+
+var dictObjLit = /** @dict */ { x: 123 };
+
+
+/**
+ * @idGenerator
+ * @param {string} x .
+ * @return {string} .
+ */
+function makeId(x) {
+ return '';
+}
+
+
+/**
+ * @consistentIdGenerator
+ * @param {string} x .
+ * @return {string} .
+ */
+function makeConsistentId(x) {
+ return '';
+}
+
+
+/**
+ * @stableIdGenerator
+ * @param {string} x .
+ * @return {string} .
+ */
+function makeStableId(x) {
+ return '';
+}
+
+
+/**
+ * Test to make sure defining object with object literal doest not produce
+ * doc warning for @this.
+ * Regression test for b/4073735.
+ */
+var Foo = function();
+Foo.prototype = {
+ /**
+ * @return {number} Never.
+ */
+ method: function() {
+ return this.method();
+ }
+};
+
+/** Regression tests for annotation types with spaces. */
+
+
+/** @enum {goog.events.Event<string, number>} */
+var Bar;
+
+
+
+/**
+ * @constructor
+ * @implements {goog.dom.Range<string, number>}
+ */
+var Foo = function() {
+ /** @final {goog.events.Event<string, number>} */
+ this.bar = null;
+};
+
+/* Regression tests for not ending block comments. Keep at end of file! **/
+/**
+ * When there are multiple asteriks. In the failure case we would get an
+ * error that the file ended mid comment, with no end comment token***/
+/**
+ * Was a separate bug 2950646 when the closing bit was on it's own line
+ * because the ending star was being put into a different token type: DOC_PREFIX
+ * rather than DOC_COMMENT.
+ **/
diff --git a/tools/closure_linter/closure_linter/testdata/limited_doc_checks.js b/tools/closure_linter/closure_linter/testdata/limited_doc_checks.js
new file mode 100644
index 0000000000..701cce9892
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/limited_doc_checks.js
@@ -0,0 +1,29 @@
+// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Test file for limited doc checks.
+ */
+
+
+/**
+ * Don't require documentation of parameters.
+ * @param {boolean}
+ * @param {boolean} c
+ * @param {boolean} d No check for punctuation
+ * @bug 3259564
+ */
+x.y = function(a, b, c, d) {
+ return a;
+};
diff --git a/tools/closure_linter/closure_linter/testdata/minimal.js b/tools/closure_linter/closure_linter/testdata/minimal.js
new file mode 100644
index 0000000000..6dbe7333f4
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/minimal.js
@@ -0,0 +1 @@
+function f(x) {} // Regression test for old parsing bug.
diff --git a/tools/closure_linter/closure_linter/testdata/not_strict.js b/tools/closure_linter/closure_linter/testdata/not_strict.js
new file mode 100644
index 0000000000..f8ede3dc1d
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/not_strict.js
@@ -0,0 +1,42 @@
+/** // _WRONG_BLANK_LINE_COUNT
+ * @fileoverview This file has errors that could trigger both in strict and non
+ * strict mode. The errors beginning with _ should not be triggered when strict
+ * flag is false.
+ * // -1: _INVALID_AUTHOR_TAG_DESCRIPTION
+ */
+
+/** // _WRONG_BLANK_LINE_COUNT
+ * A constructor with 1 line above it (BAD).
+ * // +1: MISSING_JSDOC_TAG_TYPE
+ * @param a A parameter.
+ * @privtae // INVALID_JSDOC_TAG
+ * @constructor
+ */
+function someFunction(a) {
+ /** +1: _MISSING_BRACES_AROUND_TYPE
+ * @type number
+ */
+ this.a = 0;
+ someReallyReallyReallyReallyReallyReallyReallyReallyLongiName = quiteBigValue; // LINE_TOO_LONG
+ if (this.a == 0) {
+ // _WRONG_INDENTATION
+ return // MISSING_SEMICOLON
+ }
+}; // ILLEGAL_SEMICOLON_AFTER_FUNCTION
+
+
+// +1: _UNNECESSARY_BRACES_AROUND_INHERIT_DOC
+/** {@inheritDoc} */
+function someFunction.prototype.toString() {
+}
+
+
+/**
+ * When not strict, there is no problem with optional markers in types.
+ * @param {string=} a A string.
+ * @param {string} aOk An other string.
+ * @param {number} opt_b An optional number.
+ * @param {number=} opt_bOk An other optional number.
+ */
+someFunction.optionalParams = function(a, aOk, opt_b, opt_bOk) {
+};
diff --git a/tools/closure_linter/closure_linter/testdata/other.js b/tools/closure_linter/closure_linter/testdata/other.js
new file mode 100644
index 0000000000..1e424ce3f2
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/other.js
@@ -0,0 +1,459 @@
+// Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Miscellaneous style errors.
+ *
+ * @author robbyw@google.com (Robby Walker)
+ */
+
+goog.provide('goog.dom');
+
+goog.require('goog.events.EventHandler');
+
+var this_is_a_really_long_line = 100000000000000000000000000000000000000000000000; // LINE_TOO_LONG
+
+// Declaration in multiple lines.
+// Regression test for b/3009648
+var
+ a,
+ b = 10;
+
+// http://this.comment.should.be.allowed/because/it/is/a/URL/that/can't/be/broken/up
+
+
+/**
+ * Types are allowed to be long even though they contain spaces.
+ * @type {function(ReallyReallyReallyReallyLongType, AnotherExtremelyLongType) : LongReturnType}
+ */
+x.z = 1000;
+
+
+/**
+ * Params are also allowed to be long even though they contain spaces.
+ * @param {function(ReallyReallyReallyReallyLongType, AnotherExtremelyLongType) : LongReturnType} fn
+ * The function to call.
+ */
+x.z = function(fn) {
+};
+
+
+/**
+ * Visibility tags are allowed to have type, therefore they allowed to be long.
+ * @private {function(ReallyReallyReallyReallyLongType, AnotherExtremelyLongType) : LongReturnType}
+ */
+x.z_ = 1000;
+
+
+/**
+ * Visibility tags are allowed to have type, therefore they allowed to be long.
+ * @public {function(ReallyReallyReallyReallyLongType, AnotherExtremelyLongType) : LongReturnType}
+ */
+x.z = 1000;
+
+
+/**
+ * Visibility tags are allowed to have type, therefore they allowed to be long.
+ * @protected {function(ReallyReallyReallyReallyLongType, AnotherExtremelyLongType) : LongReturnType}
+ */
+x.z = 1000;
+
+
+/**
+ * Visibility tags are allowed to have type, therefore they allowed to be long.
+ * @package {function(ReallyReallyReallyReallyLongType,AnotherExtremelyLongType):LongReturnType}
+ */
+x.z = 1000;
+
+// +2: LINE_TOO_LONG
+var x =
+ a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.a.b.c.d.tooLongEvenThoughNoSpaces;
+
+// +1: LINE_TOO_LONG
+getSomeExtremelyLongNamedFunctionWowThisNameIsSoLongItIsAlmostUnbelievable().dispose();
+
+
+/**
+ * @param {number|string|Object|Element|Array.<Object>|null} aReallyReallyReallyStrangeParameter
+ * @param {number|string|Object|Element|goog.a.really.really.really.really.really.really.really.really.long.Type|null} shouldThisParameterWrap
+ * @return {goog.a.really.really.really.really.really.really.really.really.long.Type}
+ */
+x.y = function(aReallyReallyReallyStrangeParameter, shouldThisParameterWrap) {
+ return something;
+};
+
+
+/**
+ * @type {goog.a.really.really.really.really.really.really.really.really.long.Type?}
+ */
+x.y = null;
+
+function doesEndWithSemicolon() {
+}; // ILLEGAL_SEMICOLON_AFTER_FUNCTION
+
+function doesNotEndWithSemicolon() {
+}
+
+doesEndWithSemicolon = function() {
+ // +1: UNUSED_LOCAL_VARIABLE
+ var shouldEndWithSemicolon = function() {
+ } // MISSING_SEMICOLON_AFTER_FUNCTION
+};
+
+doesNotEndWithSemicolon = function() {
+} // MISSING_SEMICOLON_AFTER_FUNCTION
+
+doesEndWithSemicolon['100'] = function() {
+};
+
+doesNotEndWithSemicolon['100'] = function() {
+} // MISSING_SEMICOLON_AFTER_FUNCTION
+
+if (some_flag) {
+ function doesEndWithSemicolon() {
+ }; // ILLEGAL_SEMICOLON_AFTER_FUNCTION
+
+ function doesNotEndWithSemicolon() {
+ }
+
+ doesEndWithSemicolon = function() {
+ };
+
+ doesNotEndWithSemicolon = function() {
+ } // MISSING_SEMICOLON_AFTER_FUNCTION
+}
+
+// No semicolon for expressions that are immediately called.
+var immediatelyCalledFunctionReturnValue = function() {
+}();
+
+
+/**
+ * Regression test for function expressions treating semicolons wrong.
+ * @bug 1044052
+ */
+goog.now = Date.now || function() {
+ //...
+};
+
+
+/**
+ * Regression test for function expressions treating semicolons wrong.
+ * @bug 1044052
+ */
+goog.now = Date.now || function() {
+ //...
+} // MISSING_SEMICOLON_AFTER_FUNCTION
+
+
+/**
+ * Function defined in ternary operator
+ * @bug 1413743
+ * @param {string} id The ID of the element.
+ * @return {Element} The matching element.
+ */
+goog.dom.$ = document.getElementById ?
+ function(id) {
+ return document.getElementById(id);
+ } :
+ function(id) {
+ return document.all[id];
+ };
+
+
+/**
+ * Test function in object literal needs no semicolon.
+ * @type {Object}
+ */
+x.y = {
+ /**
+ * @return {number} Doc the inner function too.
+ */
+ a: function() {
+ return 10;
+ }
+};
+
+// Semicolon required at end of object literal.
+var throwObjectLiteral = function() {
+ throw {
+ x: 0,
+ y: 1
+ } // MISSING_SEMICOLON
+};
+
+var testRegex = /(\([^\)]*\))|(\[[^\]]*\])|({[^}]*})|(&lt;[^&]*&gt;)/g;
+var testRegex2 = /abc/gimsx;
+
+var x = 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100
+ + 20; // LINE_STARTS_WITH_OPERATOR
+
+var x = 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 + 100 +
+ -20; // unary minus is ok
+
+var x = z++
+ + 20; // LINE_STARTS_WITH_OPERATOR
+
+var x = z. // LINE_ENDS_WITH_DOT
+ y();
+
+// Regression test: This line was incorrectly not reporting an error
+var marginHeight = x.layout.getSpacing_(elem, 'marginTop')
+ + x.layout.getSpacing_(elem, 'marginBottom');
+// -1: LINE_STARTS_WITH_OPERATOR
+
+// Regression test: This line was correctly reporting an error
+x.layout.setHeight(elem, totalHeight - paddingHeight - borderHeight
+ - marginHeight); // LINE_STARTS_WITH_OPERATOR
+
+// Regression test: This line was incorrectly reporting spacing and binary
+// operator errors
+if (i == index) {
+}
+++i;
+
+var twoSemicolons = 10;; // REDUNDANT_SEMICOLON
+
+if (i == index) {
+} else; // REDUNDANT_SEMICOLON
+i++;
+
+do; // REDUNDANT_SEMICOLON
+{
+} while (i == index);
+
+twoSemicolons = 10;
+// A more interesting example of two semicolons
+ ; // EXTRA_SPACE, WRONG_INDENTATION, REDUNDANT_SEMICOLON
+
+
+/** @bug 1598895 */
+for (;;) {
+ // Do nothing.
+}
+
+for (var x = 0, foo = blah(), bar = {};; x = update(x)) {
+ // A ridiculous case that should probably never happen, but I suppose is
+ // valid.
+}
+
+var x = "allow'd double quoted string";
+var x = "unnecessary double quotes string"; // UNNECESSARY_DOUBLE_QUOTED_STRING
+// +1: MULTI_LINE_STRING, UNNECESSARY_DOUBLE_QUOTED_STRING,
+var x = "multi-line unnecessary double quoted \
+ string.";
+
+
+// Regression test: incorrectly reported missing doc for variable used in global
+// scope.
+/**
+ * Whether the "Your browser isn't fully supported..." warning should be shown
+ * to the user; defaults to false.
+ * @type {boolean}
+ * @private
+ */
+init.browserWarning_ = false;
+
+init.browserWarning_ = true;
+
+if (someCondition) {
+ delete this.foo_[bar];
+}
+
+// Commas at the end of literals used to be forbidden.
+x = [1, 2, 3,];
+x = [1, 2, 3, /* A comment */];
+x = [
+ 1,
+ 2,
+ 3,
+];
+x = {
+ a: 1,
+};
+
+// Make sure we don't screw up typing for Lvalues and think b:c is a type value
+// pair.
+x = a ? b : c = 34;
+x = a ? b:c; // MISSING_SPACE, MISSING_SPACE
+x = (a ? b:c = 34); // MISSING_SPACE, MISSING_SPACE
+
+if (x) {
+ x += 10;
+}; // REDUNDANT_SEMICOLON
+
+
+/**
+ * Bad assignment of array to prototype.
+ * @type {Array}
+ */
+x.prototype.badArray = []; // ILLEGAL_PROTOTYPE_MEMBER_VALUE
+
+
+/**
+ * Bad assignment of object to prototype.
+ * @type {Object}
+ */
+x.prototype.badObject = {}; // ILLEGAL_PROTOTYPE_MEMBER_VALUE
+
+
+/**
+ * Bad assignment of class instance to prototype.
+ * @type {goog.events.EventHandler}
+ */
+x.prototype.badInstance = new goog.events.EventHandler();
+// -1: ILLEGAL_PROTOTYPE_MEMBER_VALUE
+
+// Check that some basic structures cause no errors.
+x = function() {
+ try {
+ } finally {
+ y = 10;
+ }
+};
+
+switch (x) {
+ case 10:
+ break;
+ case 20:
+ // Fallthrough.
+ case 30:
+ break;
+ case 40: {
+ break;
+ }
+ default:
+ break;
+}
+
+do {
+ x += 10;
+} while (x < 100);
+
+do {
+ x += 10;
+} while (x < 100) // MISSING_SEMICOLON
+
+// Missing semicolon checks.
+x = 10 // MISSING_SEMICOLON
+x = someOtherVariable // MISSING_SEMICOLON
+x = fnCall() // MISSING_SEMICOLON
+x = {a: 10, b: 20} // MISSING_SEMICOLON
+x = [10, 20, 30] // MISSING_SEMICOLON
+x = (1 + 2) // MISSING_SEMICOLON
+x = {
+ a: [
+ 10, 20, (30 +
+ 40)
+ ]
+} // MISSING_SEMICOLON
+x = a
+ .b
+ .c(). // LINE_ENDS_WITH_DOT
+ d;
+
+// Test that blocks without braces don't generate incorrect semicolon and
+// indentation errors. TODO: consider disallowing blocks without braces.
+if (x)
+ y = 10;
+
+if (x)
+ y = 8 // MISSING_SEMICOLON
+
+// Regression test for bug 2973408, bad missing semi-colon error when else
+// is not followed by an opening brace.
+if (x)
+ y = 3;
+else
+ z = 4;
+
+// We used to erroneously report a missing semicolon error.
+if (x)
+{
+}
+
+while (x)
+ y = 10;
+
+for (x = 0; x < 10; x++)
+ y += 10;
+ z += 10; // WRONG_INDENTATION
+
+var x = 100 // MISSING_SEMICOLON
+
+// Also regression test for bug 2973407 Parse error on nested ternary statments.
+foo = bar ? baz ? 1 : 2 : 3 // MISSING_SEMICOLON
+foo = bar ? 1 : baz ? 2 : 3;
+bar ? 1 : baz ? 2 : bat ? 3 : 4;
+bar ? 1 : baz ? bat ? 3 : 4 : baq ? 5 : 6;
+foo = bar ? 1 : 2;
+
+foo = {
+ str: bar ? baz ? blah ? 1 : 2 : 3 : 4
+} // MISSING_SEMICOLON
+
+
+// Regression tests for bug 2969408 GJsLint doesn't like labeled statements.
+mainLoop: while (!y) {
+}
+
+myLabel1: myLabel2: var x;
+
+for (var i = 0; i < n; i++) {
+ myLabel3:
+ while (true) {
+ break myLabel3;
+ }
+}
+
+myLabelA : myLabelB : x > y ? 0 : 1; // EXTRA_SPACE, EXTRA_SPACE, EXTRA_SPACE
+
+// Regression test for bug 4269466.
+var a = new Scheme({default: 0});
+switch (foo) {
+ default:
+ var a = new Scheme({default: 0});
+ break;
+}
+
+
+/** @private Some text is allowed after tag */
+x.y_ = function() {
+};
+
+
+/** @private Some text is allowed after tag but not the long oneeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee.*/ // LINE_TOO_LONG
+x.y_ = function() {
+};
+
+
+/** @private {number} Some text is allowed after tag */
+x.z_ = 200;
+
+
+/** @private {number} Some text is allowed after tag but not the long oneeeeeeeeeeeeeeee. */ // LINE_TOO_LONG
+x.z_ = 200;
+
+// Regression tests for b/16298424.
+var z = function() {}.bind();
+window.alert(function() {}.bind());
+function() {
+}.bind();
+var y = function() {
+}.bind();
+var y = function() {
+ }
+ .bind();
+
+/* comment not closed // FILE_MISSING_NEWLINE, FILE_IN_BLOCK \ No newline at end of file
diff --git a/tools/closure_linter/closure_linter/testdata/provide_blank.js b/tools/closure_linter/closure_linter/testdata/provide_blank.js
new file mode 100644
index 0000000000..a4e0716419
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/provide_blank.js
@@ -0,0 +1,29 @@
+// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Checks that missing provides are reported at the first require
+ * when there are no other provides in the file.
+ */
+
+goog.require('dummy.package.ClassName'); // MISSING_GOOG_PROVIDE
+
+
+
+/**
+ * @constructor
+ */
+dummy.Something = function() {};
+
+var x = new dummy.package.ClassName();
diff --git a/tools/closure_linter/closure_linter/testdata/provide_extra.js b/tools/closure_linter/closure_linter/testdata/provide_extra.js
new file mode 100644
index 0000000000..3370950e3b
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/provide_extra.js
@@ -0,0 +1,39 @@
+// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed 2to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Checks for extra goog.provides.
+ *
+ */
+
+goog.provide(''); // EXTRA_GOOG_PROVIDE
+
+goog.provide('dummy.AnotherThingTest'); // ok since mentioned in setTestOnly
+goog.provide('dummy.AnotherTrulyLongNamespaceToMakeItExceedEightyCharactersThingTest');
+
+goog.provide('dummy.Something');
+goog.provide('dummy.Something'); // EXTRA_GOOG_PROVIDE
+goog.provide('dummy.SomethingElse'); // EXTRA_GOOG_PROVIDE
+
+goog.provide('dummy.YetAnotherThingTest'); // EXTRA_GOOG_PROVIDE
+
+goog.setTestOnly('dummy.AnotherThingTest');
+goog.setTestOnly('dummy.AnotherTrulyLongNamespaceToMakeItExceedEightyCharactersThingTest');
+
+
+
+/**
+ * @constructor
+ */
+dummy.Something = function() {};
diff --git a/tools/closure_linter/closure_linter/testdata/provide_missing.js b/tools/closure_linter/closure_linter/testdata/provide_missing.js
new file mode 100644
index 0000000000..42de489671
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/provide_missing.js
@@ -0,0 +1,40 @@
+// Copyright 2011 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// We are missing a provide of goog.something.Else.
+// -15: MISSING_GOOG_PROVIDE
+
+/**
+ * @fileoverview Tests missing provides and the usage of the missing provide
+ * suppression annotation.
+ *
+ */
+
+
+
+/**
+ * Constructor for Something.
+ * @constructor
+ * @suppress {missingProvide}
+ */
+goog.something.Something = function() {};
+
+
+
+/**
+ * Constructor for Else. We should get an error about providing this, but not
+ * about the constructor for Something.
+ * @constructor
+ */
+goog.something.Else = function() {};
diff --git a/tools/closure_linter/closure_linter/testdata/require_alias.js b/tools/closure_linter/closure_linter/testdata/require_alias.js
new file mode 100644
index 0000000000..804b2ed142
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/require_alias.js
@@ -0,0 +1,14 @@
+// We are missing a require of goog.sample.UsedType
+goog.provide('goog.something.Else'); // +1: MISSING_GOOG_REQUIRE
+
+
+goog.scope(function() {
+var unused = goog.events.unused; // UNUSED_LOCAL_VARIABLE
+var used = goog.events.used; // ALIAS_STMT_NEEDS_GOOG_REQUIRE
+var UsedType = goog.sample.UsedType;
+var other = goog.sample.other;
+
+
+/** @type {used.UsedAlias|other.UsedAlias} */
+goog.something.Else = UsedType.create();
+}); // goog.scope
diff --git a/tools/closure_linter/closure_linter/testdata/require_all_caps.js b/tools/closure_linter/closure_linter/testdata/require_all_caps.js
new file mode 100644
index 0000000000..49344f2c18
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/require_all_caps.js
@@ -0,0 +1,30 @@
+// Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview A should come before B.
+ *
+ */
+
+goog.provide('XX'); // GOOG_PROVIDES_NOT_ALPHABETIZED
+goog.provide('ZZ');
+goog.provide('YY');
+
+goog.require('dummy.AA'); // GOOG_REQUIRES_NOT_ALPHABETIZED
+goog.require('dummy.CC');
+goog.require('dummy.BB');
+
+dummy.AA();
+dummy.CC();
+dummy.BB();
diff --git a/tools/closure_linter/closure_linter/testdata/require_blank.js b/tools/closure_linter/closure_linter/testdata/require_blank.js
new file mode 100644
index 0000000000..060781ce7c
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/require_blank.js
@@ -0,0 +1,29 @@
+// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Checks that missing requires are reported just after the last
+ * provide when there are no other requires in the file.
+ */
+
+goog.provide('dummy.Something'); // +1: MISSING_GOOG_REQUIRE
+
+
+
+/**
+ * @constructor
+ */
+dummy.Something = function() {};
+
+var x = new dummy.package.ClassName();
diff --git a/tools/closure_linter/closure_linter/testdata/require_extra.js b/tools/closure_linter/closure_linter/testdata/require_extra.js
new file mode 100644
index 0000000000..3ee39c73a8
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/require_extra.js
@@ -0,0 +1,35 @@
+// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Checks for extra goog.requires.
+ *
+ */
+
+goog.require(''); // EXTRA_GOOG_REQUIRE
+goog.require('dummy.Aa');
+goog.require('dummy.Aa.CONSTANT'); // EXTRA_GOOG_REQUIRE
+goog.require('dummy.Aa.Enum'); // EXTRA_GOOG_REQUIRE
+goog.require('dummy.Bb');
+goog.require('dummy.Ff'); // EXTRA_GOOG_REQUIRE
+goog.require('dummy.Gg'); // EXTRA_GOOG_REQUIRE
+goog.require('dummy.cc');
+goog.require('dummy.cc'); // EXTRA_GOOG_REQUIRE
+goog.require('dummy.hh'); // EXTRA_GOOG_REQUIRE
+
+new dummy.Aa();
+dummy.Bb.someMethod();
+dummy.cc();
+var x = dummy.Aa.Enum.VALUE;
+var y = dummy.Aa.CONSTANT;
diff --git a/tools/closure_linter/closure_linter/testdata/require_function.js b/tools/closure_linter/closure_linter/testdata/require_function.js
new file mode 100644
index 0000000000..532bb67103
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/require_function.js
@@ -0,0 +1,22 @@
+// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Description of this file.
+ */
+
+goog.require('goog.mobile.paging.getPage');
+
+
+goog.mobile.paging.getPage();
diff --git a/tools/closure_linter/closure_linter/testdata/require_function_missing.js b/tools/closure_linter/closure_linter/testdata/require_function_missing.js
new file mode 100644
index 0000000000..33bec21eab
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/require_function_missing.js
@@ -0,0 +1,24 @@
+// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// -14: MISSING_GOOG_REQUIRE
+
+/**
+ * @fileoverview Description of this file.
+ */
+
+
+
+goog.mobile.paging.getPage();
+goog.mobile.paging.getOtherPage();
diff --git a/tools/closure_linter/closure_linter/testdata/require_function_through_both.js b/tools/closure_linter/closure_linter/testdata/require_function_through_both.js
new file mode 100644
index 0000000000..d9525ec4e1
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/require_function_through_both.js
@@ -0,0 +1,23 @@
+// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Description of this file.
+ */
+
+goog.require('goog.mobile.paging');
+goog.require('goog.mobile.paging.getPage');
+
+
+goog.mobile.paging.getPage();
diff --git a/tools/closure_linter/closure_linter/testdata/require_function_through_namespace.js b/tools/closure_linter/closure_linter/testdata/require_function_through_namespace.js
new file mode 100644
index 0000000000..55628fccfb
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/require_function_through_namespace.js
@@ -0,0 +1,22 @@
+// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Description of this file.
+ */
+
+goog.require('goog.mobile.paging');
+
+
+goog.mobile.paging.getPage();
diff --git a/tools/closure_linter/closure_linter/testdata/require_interface.js b/tools/closure_linter/closure_linter/testdata/require_interface.js
new file mode 100644
index 0000000000..d6e83024b4
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/require_interface.js
@@ -0,0 +1,31 @@
+// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Contains a test to verify that interfaces implemented in a file
+ * are goog.require'd.
+ *
+ */
+
+// We're missing a goog.require of goog.something.SomeInterface.
+goog.provide('goog.something.SomeClass'); // +1: MISSING_GOOG_REQUIRE
+
+
+
+/**
+ * Constructor for SomeClass.
+ * @constructor
+ * @implements {goog.something.SomeInterface}
+ */
+goog.something.SomeClass = function() {};
diff --git a/tools/closure_linter/closure_linter/testdata/require_interface_alias.js b/tools/closure_linter/closure_linter/testdata/require_interface_alias.js
new file mode 100644
index 0000000000..c71b29ca3c
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/require_interface_alias.js
@@ -0,0 +1,34 @@
+// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Contains a test to verify that aliased interfaces
+ * are goog.require'd.
+ */
+
+// We're missing a goog.require of goog.something.SomeInterface.
+goog.provide('goog.something.SomeClass'); // +1: MISSING_GOOG_REQUIRE
+
+goog.scope(function() {
+var something = goog.something;
+
+
+
+/**
+ * Constructor for SomeClass.
+ * @constructor
+ * @implements {something.SomeInterface}
+ */
+something.SomeClass = function() {};
+}); // goog.scope
diff --git a/tools/closure_linter/closure_linter/testdata/require_interface_base.js b/tools/closure_linter/closure_linter/testdata/require_interface_base.js
new file mode 100644
index 0000000000..c8bb1f67a5
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/require_interface_base.js
@@ -0,0 +1,31 @@
+// Copyright 2011 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Contains a test to verify that parent interfaces
+ * implemented are goog.require'd.
+ *
+ */
+
+// We're missing a goog.require of goog.something.BaseInterface.
+goog.provide('goog.something.SomeInterface'); // +1: MISSING_GOOG_REQUIRE
+
+
+
+/**
+ * Constructor for SomeInterface.
+ * @interface
+ * @extends {goog.something.BaseInterface}
+ */
+goog.something.SomeInterface = function() {};
diff --git a/tools/closure_linter/closure_linter/testdata/require_lower_case.js b/tools/closure_linter/closure_linter/testdata/require_lower_case.js
new file mode 100644
index 0000000000..c1fff4a3ed
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/require_lower_case.js
@@ -0,0 +1,30 @@
+// Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview The B should come before the b.
+ *
+ */
+
+goog.provide('x'); // GOOG_PROVIDES_NOT_ALPHABETIZED
+goog.provide('X');
+goog.provide('Y');
+
+goog.require('dummy.bb'); // GOOG_REQUIRES_NOT_ALPHABETIZED
+goog.require('dummy.Bb');
+goog.require('dummy.Cc');
+
+var x = dummy.bb.a();
+var y = dummy.Bb.a();
+var z = dummy.Cc.a();
diff --git a/tools/closure_linter/closure_linter/testdata/require_missing.js b/tools/closure_linter/closure_linter/testdata/require_missing.js
new file mode 100644
index 0000000000..3539c9489d
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/require_missing.js
@@ -0,0 +1,40 @@
+// Copyright 2011 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Tests missing requires around the usage of the require
+ * suppression annotation.
+ *
+ */
+
+// We are missing a require of goog.foo.
+goog.provide('goog.something.Else'); // +1: MISSING_GOOG_REQUIRE
+
+
+
+/**
+ * Constructor for Else.
+ * @constructor
+ */
+goog.something.Else = function() {
+ /** @suppress {missingRequire} */
+ this.control.createConstructorMock(
+ goog.foo.bar, 'Baz');
+
+ // Previous suppress should only be scoped to that statement.
+ this.control.createConstructorMock(
+ goog.foo.bar, 'Baz');
+
+ this.control.invoke(goog.foo.bar, 'Test');
+};
diff --git a/tools/closure_linter/closure_linter/testdata/require_numeric.js b/tools/closure_linter/closure_linter/testdata/require_numeric.js
new file mode 100644
index 0000000000..29d8377afa
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/require_numeric.js
@@ -0,0 +1,30 @@
+// Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Numbers should come before letters.
+ *
+ */
+
+goog.provide('xa'); // GOOG_PROVIDES_NOT_ALPHABETIZED
+goog.provide('x1');
+goog.provide('xb');
+
+goog.require('dummy.aa'); // GOOG_REQUIRES_NOT_ALPHABETIZED
+goog.require('dummy.a1');
+goog.require('dummy.ab');
+
+dummy.aa.a;
+dummy.a1.a;
+dummy.ab.a;
diff --git a/tools/closure_linter/closure_linter/testdata/require_provide_blank.js b/tools/closure_linter/closure_linter/testdata/require_provide_blank.js
new file mode 100644
index 0000000000..0e0c188ca5
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/require_provide_blank.js
@@ -0,0 +1,31 @@
+// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// -14: MISSING_GOOG_PROVIDE
+// -15: MISSING_GOOG_REQUIRE
+
+/**
+ * @fileoverview Checks that missing requires and provides are reported at the
+ * top of the file when there are no existing goog.requires or provides in the
+ * file.
+ */
+
+
+
+/**
+ * @constructor
+ */
+dummy.Something = function() {};
+
+var x = new dummy.package.ClassName();
diff --git a/tools/closure_linter/closure_linter/testdata/require_provide_missing.js b/tools/closure_linter/closure_linter/testdata/require_provide_missing.js
new file mode 100644
index 0000000000..a56f4d0007
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/require_provide_missing.js
@@ -0,0 +1,76 @@
+// Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview The same code as require_provide_ok, but missing a provide
+ * and a require call.
+ *
+ */
+
+goog.provide('goog.something'); // +1: MISSING_GOOG_PROVIDE
+// Missing provide of goog.something.Else and goog.something.SomeTypeDef.
+
+goog.require('goog.Class');
+goog.require('goog.package'); // +1: MISSING_GOOG_REQUIRE
+// Missing requires of goog.Class.Enum and goog.otherThing.Class.Enum.
+
+
+var x = new goog.Class();
+goog.package.staticFunction();
+
+var y = goog.Class.Enum.VALUE;
+
+
+/**
+ * @typedef {string}
+ */
+goog.something.SomeTypeDef;
+
+
+/**
+ * Private variable.
+ * @type {number}
+ * @private
+ */
+goog.something.private_ = 10;
+
+
+/**
+ * Use private variables defined in this file so they don't cause a warning.
+ */
+goog.something.usePrivateVariables = function() {
+ var x = [
+ goog.something.private_,
+ x
+ ];
+};
+
+
+/**
+ * Static function.
+ */
+goog.something.staticFunction = function() {
+};
+
+
+
+/**
+ * Constructor for Else.
+ * @constructor
+ */
+goog.something.Else = function() {
+ // Bug 1801608: Provide goog.otherThing.Class.Enum isn't missing.
+ var enum = goog.otherThing.Class.Enum;
+ goog.otherThing.Class.Enum = enum;
+};
diff --git a/tools/closure_linter/closure_linter/testdata/require_provide_ok.js b/tools/closure_linter/closure_linter/testdata/require_provide_ok.js
new file mode 100644
index 0000000000..01ddafe490
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/require_provide_ok.js
@@ -0,0 +1,214 @@
+// Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview There is nothing wrong w/ this javascript.
+ *
+ */
+goog.module('goog.super.long.DependencyNameThatForcesMethodDefinitionToSpanMultipleLinesFooBar');
+goog.provide('goog.something');
+goog.provide('goog.something.Else');
+goog.provide('goog.something.Else.Enum');
+/** @suppress {extraProvide} */
+goog.provide('goog.something.Extra');
+goog.provide('goog.something.SomeTypeDef');
+goog.provide('goog.somethingelse.someMethod');
+goog.provide('goog.super.long.DependencyNameThatForcesTheLineToBeOverEightyCharacters');
+goog.provide('notInClosurizedNamespacesSoNotExtra');
+
+goog.require('dummy.foo');
+goog.require('dummy.foo.someSpecificallyRequiredMethod');
+goog.require('goog.Class');
+/** @suppress {extraRequire} */
+goog.require('goog.extra.require');
+goog.require('goog.package');
+goog.require('goog.package.ClassName');
+goog.require('goog.package.OtherClassName');
+/** @suppress {extraRequire} Legacy dependency on enum */
+goog.require('goog.package.OuterClassName.InnerClassName');
+goog.require('goog.super.long.DependencyNameThatForcesMethodDefinitionToSpanMultipleLinesFooBar');
+goog.require('goog.super.long.DependencyNameThatForcesTheLineToBeOverEightyCharacters2');
+goog.require('goog.super.long.DependencyNameThatForcesTheLineToBeOverEightyCharacters3');
+goog.require('notInClosurizedNamespacesSoNotExtra');
+
+dummy.foo.someMethod();
+dummy.foo.someSpecificallyRequiredMethod();
+
+
+// Regression test for bug 3473189. Both of these 'goog.provide' tokens should
+// be completely ignored by alphabetization checks.
+if (typeof goog != 'undefined' && typeof goog.provide == 'function') {
+ goog.provide('goog.something.SomethingElse');
+}
+
+
+var x = new goog.Class();
+goog.package.staticFunction();
+
+var y = goog.Class.Enum.VALUE;
+
+
+// This should not trigger a goog.require.
+var somethingPrivate = goog.somethingPrivate.PrivateEnum_.VALUE;
+
+
+/**
+ * This method is provided directly, instead of its namespace.
+ */
+goog.somethingelse.someMethod = function() {};
+
+
+/**
+ * Defining a private property on a required namespace should not trigger a
+ * provide of that namespace. Yes, people actually do this.
+ * @private
+ */
+goog.Class.privateProperty_ = 1;
+
+
+/**
+ * @typedef {string}
+ */
+goog.something.SomeTypeDef;
+
+
+/**
+ * @typedef {string}
+ * @private
+ */
+goog.something.SomePrivateTypeDef_;
+
+
+/**
+ * Some variable that is declared but not initialized.
+ * @type {string|undefined}
+ * @private
+ */
+goog.something.somePrivateVariable_;
+
+
+/**
+ * Private variable.
+ * @type {number}
+ * @private
+ */
+goog.something.private_ = 10;
+
+
+/**
+ * Use private variables defined in this file so they don't cause a warning.
+ */
+goog.something.usePrivateVariables = function() {
+ var x = [
+ goog.something.private_,
+ goog.Class.privateProperty_,
+ x
+ ];
+};
+
+
+
+/**
+ * A really long class name to provide and usage of a really long class name to
+ * be required.
+ * @constructor
+ */
+goog.super.long.DependencyNameThatForcesTheLineToBeOverEightyCharacters =
+ function() {
+ var x = new goog.super.long. // LINE_ENDS_WITH_DOT
+ DependencyNameThatForcesTheLineToBeOverEightyCharacters2();
+ var x = new goog.super.long
+ .DependencyNameThatForcesTheLineToBeOverEightyCharacters3();
+ // Use x to avoid a warning.
+ var x = [x];
+};
+
+
+/**
+ * A really long class name to to force a method definition to be greater than
+ * 80 lines. We should be grabbing the whole identifier regardless of how many
+ * lines it is on.
+ */
+goog.super.long
+ .DependencyNameThatForcesMethodDefinitionToSpanMultipleLinesFooBar
+ .prototype.someMethod = function() {
+};
+
+
+/**
+ * Static function.
+ */
+goog.something.staticFunction = function() {
+ // Tests that namespace usages are identified using 'namespace.' not just
+ // 'namespace'.
+ googSomething.property;
+ dummySomething.property;
+ goog.package.ClassName // A comment in between the identifier pieces.
+ .IDENTIFIER_SPLIT_OVER_MULTIPLE_LINES;
+ goog.package.OtherClassName.property = 1;
+
+ // Test case where inner class needs to be required explicitly.
+ new goog.package.OuterClassName.InnerClassName();
+
+ // Don't just use goog.bar for missing namespace, hard coded to never require
+ // goog since it's never provided.
+ control.createConstructorMock(
+ /** @suppress {missingRequire} */ goog.foo.bar, 'Baz');
+
+ goog.require('goog.shouldBeIgnored');
+};
+
+
+
+/**
+ * Constructor for Else.
+ * @constructor
+ */
+goog.something.Else = function() {
+ /** @suppress {missingRequire} */
+ this.control.createConstructorMock(goog.foo.bar, 'Baz');
+};
+
+
+/**
+ * Enum attached to Else. Should not need to be provided explicitly, but
+ * should not generate an extra require warning either.
+ * @enum {number}
+ */
+goog.something.Else.Enum = {
+ 'key': 1
+};
+
+
+/**
+ * Sample of a typedef. This should not need a provide as it is an inner
+ * element like an enum.
+ *
+ * @typedef {{val1: string, val2: boolean, val3: number}}
+ */
+goog.something.Else.Typedef;
+
+
+
+/**
+ * Constructor for SomethingElse.
+ * @constructor
+ */
+goog.something.SomethingElse = function() {};
+
+
+/**
+ * @suppress {missingProvide}
+ */
+goog.suppress.someMethod = function() {};
diff --git a/tools/closure_linter/closure_linter/testdata/semicolon_missing.js b/tools/closure_linter/closure_linter/testdata/semicolon_missing.js
new file mode 100644
index 0000000000..5601db8dec
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/semicolon_missing.js
@@ -0,0 +1,18 @@
+/**
+ * @fileoverview This is for regression testing of scenario where semicolon is
+ * missing at EOF. b/10801776.
+ */
+
+goog.provide('dummy.foo.DATA');
+
+/**
+ * @type {string}
+ * @const
+ *
+ * For repeating the bug blank comment line above this is needed.
+ */
+
+// +3: MISSING_SEMICOLON
+dummy.foo.DATA =
+ 'SFSDFSDdfgdfgdftreterterterterterggsdfsrrwerwerwsfwerwerwere55454ss' +
+ 'SFSDFSDdfgdfgdftretertertertertergg'
diff --git a/tools/closure_linter/closure_linter/testdata/simple.html b/tools/closure_linter/closure_linter/testdata/simple.html
new file mode 100644
index 0000000000..42ebab97d0
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/simple.html
@@ -0,0 +1,33 @@
+<!DOCTYPE html>
+<html lang="en" dir="ltr">
+<head>
+ <!--
+ Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS-IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+ <title>GJsLint HTML JavaScript extraction tests</title>
+ <script src="some_file.js">Ignore this</script>
+ <script>
+ x =10; // MISSING_SPACE
+ </script>
+<body>
+ <script type="text/javascript">
+ // +1: EXTRA_SPACE
+ x = 10; </script>
+ <script type="text/javascript">
+ // Not extra space.
+ x = 10;
+ </script>
+</body>
+</html>
diff --git a/tools/closure_linter/closure_linter/testdata/spaces.js b/tools/closure_linter/closure_linter/testdata/spaces.js
new file mode 100644
index 0000000000..85a36e53bf
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/spaces.js
@@ -0,0 +1,354 @@
+// Copyright 2007 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Errors relating to whitespace.
+ *
+ * @author robbyw@google.com (Robby Walker)
+ */
+
+if(needs_space) { // MISSING_SPACE
+}
+
+if ( too_much_space) { // EXTRA_SPACE
+}
+
+if (different_extra_space ) { // EXTRA_SPACE
+}
+
+switch(needs_space) { // MISSING_SPACE
+}
+
+var x = 'if(not_an_error)';
+
+var y = afjkljkl + ajklasdflj + ajkadfjkasdfklj + aadskfasdjklf + jkasdfa + (
+ kasdfkjlasdfjkl / jklasdfjklasdfjkl);
+
+x = 5+ 8; // MISSING_SPACE
+x = 5 +8; // MISSING_SPACE
+x= 5; // MISSING_SPACE
+x = 6; // EXTRA_SPACE
+x = 7; // EXTRA_SPACE
+x = 6 + 2; // EXTRA_SPACE
+x += 10;
+
+throw Error('Selector not supported yet('+ opt_selector + ')'); // MISSING_SPACE
+throw Error('Selector not supported yet(' +opt_selector + ')'); // MISSING_SPACE
+throw Error(
+ 'Selector not supported yet' +
+ '(' +(opt_selector ? 'foo' : 'bar') + ')'); // MISSING_SPACE
+
+x++;
+x ++; // EXTRA_SPACE
+x++ ; // EXTRA_SPACE
+y = a + ++b;
+for (var i = 0; i < 10; ++i) {
+}
+
+// We omit the update section of the for loop to test that a space is allowed
+// in this special case.
+for (var part; part = parts.shift(); ) {
+}
+
+if (x == y) {
+}
+
+x = 10; // no error here
+x = -1;
+x++;
+++x;
+
+x = bool ? -1 : -1;
+
+x = {a: 10};
+x = {a:10}; // MISSING_SPACE
+
+x = !!y;
+
+x >>= 0;
+x <<= 10;
+
+x[100] = 10;
+x[ 100] = 10; // EXTRA_SPACE
+x[100 ] = 10; // EXTRA_SPACE
+x [100] = 10; // EXTRA_SPACE
+x[10]= 5; // MISSING_SPACE
+var x = [];
+x = [[]];
+x = [[x]];
+x = [[[x, y]]];
+var craziness = ([1, 2, 3])[1];
+var crazinessError = ([1, 2, 3]) [1]; // EXTRA_SPACE
+var multiArray = x[1][2][3][4];
+var multiArrayError = x[1] [2][3][4]; // EXTRA_SPACE
+
+array[aReallyLooooooooooooooooooooooooooooongIndex1][
+ anotherVeryLoooooooooooooooooooooooooooooooooooongIndex
+] = 10;
+
+if (x) {
+ array[aReallyLooooooooooooooooooooooooooooongIndex1][
+ anotherVeryLoooooooooooooooooooooooooooooooooooongIndex
+ ] = 10;
+}
+
+
+/**
+ * Docs.
+ * @param {Number} x desc.
+ * @return {boolean} Some boolean value.
+ */
+function functionName( x) { // EXTRA_SPACE
+ return !!x;
+}
+
+
+/**
+ * Docs.
+ * @param {Number} x desc.
+ */
+function functionName(x ) { // EXTRA_SPACE
+ return;
+}
+
+
+/**
+ * Docs.
+ * @param {Number} x desc.
+ * @param {Number} y desc.
+ */
+function functionName(x,y) { // MISSING_SPACE
+}
+
+
+/**
+ * Docs.
+ * @param {Number} x desc.
+ * @param {Number} y desc.
+ */
+function functionName(x, y) {
+}
+
+
+/**
+ * Docs.
+ */
+function functionName() { // EXTRA_SPACE
+}
+
+
+/**
+ * Docs.
+ */
+function functionName(){ // MISSING_SPACE
+}
+
+functionName (); // EXTRA_SPACE
+
+
+/**
+ * Docs.
+ */
+function functionName () { // EXTRA_SPACE
+}
+
+
+/**
+ * Docs.
+ */
+var foo = function () { // EXTRA_SPACE
+};
+
+
+
+/**
+ * Missing a newline.
+ * @constructor
+ * @extends {a.b.c}
+ */
+x.y.z = function() {
+};goog.inherits(x.y.z, a.b.c); // MISSING_LINE
+
+
+
+/**
+ * Extra space.
+ * @constructor
+ * @extends {a.b.c}
+ */
+x.y.z = function() {
+};
+ goog.inherits(x.y.z, a.b.c); // WRONG_INDENTATION
+
+
+
+/**
+ * Extra blank line.
+ * @constructor
+ * @extends {a.b.c}
+ */
+x.y.z = function() {
+};
+
+goog.inherits(x.y.z, a.b.c); // -1: EXTRA_LINE
+
+
+
+/**
+ * Perfect!
+ * @constructor
+ * @extends {a.b.c}
+ */
+x.y.z = function() {
+};
+goog.inherits(x.y.z, a.b.c);
+
+if (flag) {
+ /**
+ * Also ok!
+ * @constructor
+ * @extends {a.b.c}
+ */
+ x.y.z = function() {
+ };
+ goog.inherits(x.y.z, a.b.c);
+}
+
+
+/**
+ * Docs.
+ */
+x.finally = function() {
+};
+
+x.finally();
+x
+ .finally();
+x.finally (); // EXTRA_SPACE
+x
+ .finally (); // EXTRA_SPACE
+try {
+} finally (e) {
+}
+try {
+} finally(e) { // MISSING_SPACE
+}
+
+functionName(x , y); // EXTRA_SPACE
+functionName(x,y); // MISSING_SPACE
+functionName(x, y);
+
+var really_really_really_really_really_really_really_really_really_long_name =
+ 2;
+
+var current = arr[cursorRead++];
+
+var x = -(y + z);
+
+// Tab before +
+var foo + 3; // ILLEGAL_TAB
+if (something) {
+ var x = 4; // ILLEGAL_TAB
+}
+
+// +1: ILLEGAL_TAB
+// Tab <-- in a comment.
+
+
+// +3: ILLEGAL_TAB
+// +3: ILLEGAL_TAB
+/**
+ * An inline flag with a tab {@code asdfasd}.
+ * @return {string} Illegal <-- tab in a doc description.
+ */
+function x() {
+ return '';
+}
+
+
+// +2: ILLEGAL_TAB
+/**
+ * @type {tabBeforeMe}
+ */
+
+// +1: EXTRA_SPACE
+var whitespaceAtEndOfLine;
+
+// +1: EXTRA_SPACE
+// Whitespace at end of comment.
+
+
+// +4: EXTRA_SPACE
+// +4: EXTRA_SPACE
+// +4: EXTRA_SPACE
+// +4: EXTRA_SPACE
+/*
+ * Whitespace at EOL.
+ * @type {string}
+ * @param {string} Description with whitespace at EOL.
+ */
+x = 10;
+
+
+/**
+ * @param {?{foo, bar: number}} x This is a valid annotation.
+ * @return {{baz}} This is also a valid annotation.
+ */
+function recordTypeFunction(x) {
+ return x;
+}
+
+if (y) {
+ // Colons are difficult.
+ y = x ? 1 : 2;
+ y = x ? 1: 2; // MISSING_SPACE
+
+ x = {
+ b: 'Good',
+ d : 'Space before colon is bad', // EXTRA_SPACE
+ f: abc ? def : ghi // These colons should be treated differently
+ };
+
+ x = {language: langCode}; // EXTRA_SPACE
+}
+
+// 1094445 - should produce missing space error before +.
+// +1: MISSING_SPACE
+throw Error('Selector not supported yet ('+ opt_selector + ')');
+
+// This code is ok.
+for (i = 0; i < len; ++i) {
+}
+
+for (i = 0;i < 10; i++) { // MISSING_SPACE
+}
+for (i = 0; i < 10;i++) { // MISSING_SPACE
+}
+for ( i = 0; i < 10; i++) { // EXTRA_SPACE
+}
+for (i = 0 ; i < 10; i++) { // EXTRA_SPACE
+}
+for (i = 0; i < 10 ; i++) { // EXTRA_SPACE
+}
+for (i = 0; i < 10; i++ ) { // EXTRA_SPACE
+}
+for (i = 0; i < 10; i++) { // EXTRA_SPACE
+}
+for (i = 0; i < 10; i++) { // EXTRA_SPACE
+}
+for (i = 0 ;i < 10; i++) { // EXTRA_SPACE, MISSING_SPACE
+}
+
+// Regression test for bug 3508480, parse error when tab as last token.
+// +1: ILLEGAL_TAB, EXTRA_SPACE
diff --git a/tools/closure_linter/closure_linter/testdata/tokenizer.js b/tools/closure_linter/closure_linter/testdata/tokenizer.js
new file mode 100644
index 0000000000..1fbcf4bd41
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/tokenizer.js
@@ -0,0 +1,78 @@
+// Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Errors relating to tokenizing.
+ *
+ * @author robbyw@google.com (Robby Walker)
+ */
+
+// Regression test: if regular expressions parse incorrectly this will emit an
+// error such as: Missing space after '/'
+x = /[^\']/; // and all the other chars
+
+// Regression test: if regular expressions parse incorrectly this will emit an
+// error such as: Missing space before +
+var regExp = fromStart ? / ^[\t\r\n]+/ : /[ \t\r\n]+$/;
+
+// Regression test for bug 1032312: test for correct parsing of multiline
+// strings
+// +2: MULTI_LINE_STRING
+var RG_MONTH_EVENT_TEMPLATE_SINGLE_QUOTE = new Template(
+ '\
+<div id="${divID}" class=month_event \
+ style="top:${top}px;left:${left}px;width:${width}px;height:${height}px;\
+ z-index:' + Z_INDEX_MONTH_EVENT);
+
+// +2: MULTI_LINE_STRING
+var RG_MONTH_EVENT_TEMPLATE_DOUBLE_QUOTE = new Template(
+ "\
+<div id='${divID}' class=month_event \
+ style='top:${top}px;left:${left}px;width:${width}px;height:${height}px;\
+ z-index:" + Z_INDEX_MONTH_EVENT);
+
+// Regression test for bug 1032312: test for correct parsing of single line
+// comment at end of line. If it's parsed incorrectly, it reads the entire next
+// line as a comment.
+//
+if (x) {
+ // If the above line is treated as a comment, the closing brace below will
+ // cause the linter to crash.
+}
+
+// Regression test for bitwise operators '^=', '>>>' and '>>>=' that weren't
+// recognized as operators.
+a -= b; a -= c; a ^= c >>> 13; a >>>= 1;
+
+// Regression test as xor was not allowed on the end of a line.
+x = 1000 ^
+ 45;
+
+// Regression test for proper number parsing. If parsed incorrectly, some of
+// these notations can lead to missing spaces errors.
+var x = 1e-6 + 1e+6 + 0. + .5 + 0.5 + 0.e-6 + .5e-6 + 0.5e-6 + 0x123abc +
+ 0X1Ab3 + 1E7;
+
+// Regression test for keyword parsing - making sure the fact that the "do"
+// keyword is a part of the identifier below doesn't break anything.
+this.undoRedoManager_.undo();
+
+// Regression test for regex as object value not matching.
+x = {x: /./};
+
+// Regression test for regex as last array element not matching.
+x = [/./];
+
+// Syntax tests for ES6:
+x = x => x;
diff --git a/tools/closure_linter/closure_linter/testdata/unparseable.js b/tools/closure_linter/closure_linter/testdata/unparseable.js
new file mode 100644
index 0000000000..e842614286
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/unparseable.js
@@ -0,0 +1,44 @@
+// Copyright 2008 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+
+
+/**
+ * Constructs and initializes a new good object.
+ * @constructor
+ */
+goog.good = function() {
+};
+
+
+/**
+ * Makes this good object go bad.
+ * @param {number} badnessLevel How bad this object is going.
+ */
+goog.good.prototype.goBad = function() { // EXTRA_PARAMETER_DOCUMENTATION
+};
+
+if (x)
+ // Cannot parse ending block because beginning block is missing.
+} // FILE_DOES_NOT_PARSE
+
+
+/**
+ * Unecessary return documentation error is not reported because file checking
+ * stopped at token causing parse error.
+ *
+ * @return {boolean} Whether reform was sucessful.
+ */
+goog.good.prototype.reform = function() {
+};
diff --git a/tools/closure_linter/closure_linter/testdata/unused_local_variables.js b/tools/closure_linter/closure_linter/testdata/unused_local_variables.js
new file mode 100644
index 0000000000..e9e51a1164
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/unused_local_variables.js
@@ -0,0 +1,88 @@
+// Copyright 2013 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Checks that unused local variables result in an error.
+ */
+
+goog.provide('dummy.Something');
+
+
+
+/**
+ * @constructor
+ */
+dummy.Something = function() {
+ // This variable isn't really used, but we can't tell for sure.
+ var usedVariable = [];
+ usedVariable.length = 1;
+
+ var variableUsedInAClosure = [];
+ var functionUsedByInvoking = function() {
+ variableUsedInAClosure[1] = 'abc';
+ };
+ functionUsedByInvoking();
+
+ var variableUsedTwoLevelsDeep = [];
+ var firstLevelFunction = function() {
+ function() {
+ variableUsedTwoLevelsDeep.append(1);
+ }
+ };
+ firstLevelFunction();
+
+ // This variable isn't being declared so is unchecked.
+ undeclaredLocal = 1;
+
+ var unusedVariable;
+
+ // Check that using a variable as member name doesn't trigger
+ // usage.
+ this.unusedVariable = 0;
+ this.unusedVariable = this.unusedVariable + 1;
+
+ // Check that declaring a variable twice doesn't trigger
+ // usage.
+ var unusedVariable; // UNUSED_LOCAL_VARIABLE
+
+ var unusedVariableWithReassignment = []; // UNUSED_LOCAL_VARIABLE
+ unusedVariableWithReassignment = 'a';
+
+ var unusedFunction = function() {}; // UNUSED_LOCAL_VARIABLE
+
+ var unusedHiddenVariable = 1; // UNUSED_LOCAL_VARIABLE
+ firstLevelFunction = function() {
+ // This variable is actually used in the function below, but hides the outer
+ // variable with the same name.
+ var unusedHiddenVariable = 1;
+ function() {
+ delete unusedHiddenVariable;
+ }
+ };
+};
+
+
+goog.scope(function() {
+var unusedAlias = dummy.Something; // UNUSED_LOCAL_VARIABLE
+var UsedTypeAlias = dummy.Something;
+var AnotherUsedTypeAlias = dummy.Something;
+
+
+/** @protected {AnotherUsedTypeAlias.Something|UsedTypeAlias} */
+var usedAlias = dummy.Something;
+new usedAlias();
+}); // goog.scope
+
+// Unused top level variables are not checked.
+var unusedTopLevelVariable;
diff --git a/tools/closure_linter/closure_linter/testdata/unused_private_members.js b/tools/closure_linter/closure_linter/testdata/unused_private_members.js
new file mode 100644
index 0000000000..76c0865bcc
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/unused_private_members.js
@@ -0,0 +1,205 @@
+// Copyright 2010 The Closure Linter Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS-IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/**
+ * @fileoverview Checks that an unused private members result in an error.
+ */
+
+goog.provide('dummy.Something');
+
+
+
+/**
+ * @constructor
+ */
+dummy.Something = function() {
+ /**
+ * @type {number}
+ * @private
+ */
+ this.normalVariable_ = 1;
+
+ // +5: UNUSED_PRIVATE_MEMBER
+ /**
+ * @type {number}
+ * @private
+ */
+ this.unusedVariable_ = 1;
+
+ /**
+ * @type {number}
+ * @private
+ * @suppress {unusedPrivateMembers}
+ */
+ this.suppressedUnusedVariable_ = 1;
+};
+
+
+/**
+ * @type {number}
+ * @private
+ */
+dummy.Something.NORMAL_CONSTANT_ = 1;
+
+
+// +5: UNUSED_PRIVATE_MEMBER
+/**
+ * @type {number}
+ * @private
+ */
+dummy.Something.UNUSED_CONSTANT_ = 1;
+
+
+/**
+ * @type {number}
+ * @private
+ * @suppress {unusedPrivateMembers}
+ */
+dummy.Something.SUPPRESSED_UNUSED_CONSTANT_ = 1;
+
+
+/**
+ * @type {number}
+ * @private
+ */
+dummy.Something.normalStaticVariable_ = 1;
+
+
+// +5: UNUSED_PRIVATE_MEMBER
+/**
+ * @type {number}
+ * @private
+ */
+dummy.Something.unusedStaticVariable_ = 1;
+
+
+/**
+ * @type {number}
+ * @private
+ * @suppress {unusedPrivateMembers}
+ */
+dummy.Something.suppressedUnusedStaticVariable_ = 1;
+
+
+/**
+ * @type {number}
+ * @private
+ */
+dummy.Something.prototype.normalVariableOnPrototype_ = 1;
+
+
+// +5: UNUSED_PRIVATE_MEMBER
+/**
+ * @type {number}
+ * @private
+ */
+dummy.Something.prototype.unusedVariableOnPrototype_ = 1;
+
+
+/**
+ * @type {number}
+ * @private
+ * @suppress {unusedPrivateMembers}
+ */
+dummy.Something.prototype.suppressedUnusedVariableOnPrototype_ = 1;
+
+
+/**
+ * Check edge cases that should not be reported.
+ */
+dummy.Something.prototype.checkFalsePositives = function() {
+ this.__iterator__ = 1;
+ this.normalVariable_.unknownChainedVariable_ = 1;
+ othernamespace.unusedVariable_ = 1;
+
+ this.element_ = 1;
+ this.element_.modifyPublicMember = 1;
+
+ /** @suppress {underscore} */
+ this.suppressedUnderscore_ = true;
+};
+
+
+/**
+ * Use all the normal variables.
+ */
+dummy.Something.prototype.useAllTheThings = function() {
+ var x = [
+ dummy.Something.NORMAL_CONSTANT_,
+ this.normalStaticVariable_,
+ this.normalVariable_,
+ this.normalVariableOnPrototype_,
+ dummy.Something.normalStaticMethod_(),
+ this.normalMethod_(),
+ x
+ ];
+};
+
+
+// +5: UNUSED_PRIVATE_MEMBER
+/**
+ * Unused static method.
+ * @private
+ */
+dummy.Something.unusedStaticMethod_ = function() {
+ // Do nothing.
+};
+
+
+/**
+ * Unused static method.
+ * @private
+ * @suppress {unusedPrivateMembers}
+ */
+dummy.Something.suppressedUnusedStaticMethod_ = function() {
+ // Do nothing.
+};
+
+
+/**
+ * Normal static method.
+ * @private
+ */
+dummy.Something.normalStaticMethod_ = function() {
+ // Do nothing.
+};
+
+
+// +5: UNUSED_PRIVATE_MEMBER
+/**
+ * Unused non-static method.
+ * @private
+ */
+dummy.Something.prototype.unusedMethod_ = function() {
+ // Do nothing.
+};
+
+
+/**
+ * Unused non-static method that is suppressed.
+ * @private
+ * @suppress {unusedPrivateMembers}
+ */
+dummy.Something.prototype.suppressedUnusedMethod_ = function() {
+ // Do nothing.
+};
+
+
+/**
+ * Normal non-static method.
+ * @private
+ */
+dummy.Something.prototype.normalMethod_ = function() {
+ // Do nothing.
+};
diff --git a/tools/closure_linter/closure_linter/testdata/utf8.html b/tools/closure_linter/closure_linter/testdata/utf8.html
new file mode 100644
index 0000000000..29517d0941
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testdata/utf8.html
@@ -0,0 +1,26 @@
+<!--
+ Copyright 2009 The Closure Linter Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS-IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+
+<meta http-equiv="Content-Type" content="charset=UTF-8"></meta>
+<script src=../somescript.js></script>
+<script>
+/** @bug 1735846 */
+window.onload = function() {
+ // +1: UNUSED_LOCAL_VARIABLE
+ var notFoundMsg = 'ぐーぐるぐるぐるという場所は見つかりませんでした。';
+};
+</script>
diff --git a/tools/closure_linter/closure_linter/testutil.py b/tools/closure_linter/closure_linter/testutil.py
new file mode 100644
index 0000000000..f7084ee37b
--- /dev/null
+++ b/tools/closure_linter/closure_linter/testutil.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utility functions for testing gjslint components."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+import StringIO
+
+from closure_linter import ecmametadatapass
+from closure_linter import javascriptstatetracker
+from closure_linter import javascripttokenizer
+
+
+def TokenizeSource(source):
+ """Convert a source into a string of tokens.
+
+ Args:
+ source: A source file as a string or file-like object (iterates lines).
+
+ Returns:
+ The first token of the resulting token stream.
+ """
+
+ if isinstance(source, basestring):
+ source = StringIO.StringIO(source)
+
+ tokenizer = javascripttokenizer.JavaScriptTokenizer()
+ return tokenizer.TokenizeFile(source)
+
+
+def TokenizeSourceAndRunEcmaPass(source):
+ """Tokenize a source and run the EcmaMetaDataPass on it.
+
+ Args:
+ source: A source file as a string or file-like object (iterates lines).
+
+ Returns:
+ The first token of the resulting token stream.
+ """
+ start_token = TokenizeSource(source)
+ ecma_pass = ecmametadatapass.EcmaMetaDataPass()
+ ecma_pass.Process(start_token)
+ return start_token
+
+
+def ParseFunctionsAndComments(source, error_handler=None):
+ """Run the tokenizer and tracker and return comments and functions found.
+
+ Args:
+ source: A source file as a string or file-like object (iterates lines).
+ error_handler: An error handler.
+
+ Returns:
+ The functions and comments as a tuple.
+ """
+ start_token = TokenizeSourceAndRunEcmaPass(source)
+
+ tracker = javascriptstatetracker.JavaScriptStateTracker()
+ if error_handler is not None:
+ tracker.DocFlagPass(start_token, error_handler)
+
+ functions = []
+ comments = []
+ for token in start_token:
+ tracker.HandleToken(token, tracker.GetLastNonSpaceToken())
+
+ function = tracker.GetFunction()
+ if function and function not in functions:
+ functions.append(function)
+
+ comment = tracker.GetDocComment()
+ if comment and comment not in comments:
+ comments.append(comment)
+
+ tracker.HandleAfterToken(token)
+
+ return functions, comments
diff --git a/tools/closure_linter/closure_linter/tokenutil.py b/tools/closure_linter/closure_linter/tokenutil.py
index 6ed5f7f81c..11e3ccc68b 100755
--- a/tools/closure_linter/closure_linter/tokenutil.py
+++ b/tools/closure_linter/closure_linter/tokenutil.py
@@ -19,15 +19,17 @@
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
-from closure_linter.common import tokens
-from closure_linter import javascripttokens
-
import copy
+import StringIO
+
+from closure_linter.common import tokens
+from closure_linter.javascripttokens import JavaScriptToken
+from closure_linter.javascripttokens import JavaScriptTokenType
# Shorthand
-JavaScriptToken = javascripttokens.JavaScriptToken
Type = tokens.TokenType
+
def GetFirstTokenInSameLine(token):
"""Returns the first token in the same line as token.
@@ -42,6 +44,58 @@ def GetFirstTokenInSameLine(token):
return token
+def GetFirstTokenInPreviousLine(token):
+ """Returns the first token in the previous line as token.
+
+ Args:
+ token: Any token in the line.
+
+ Returns:
+ The first token in the previous line as token, or None if token is on the
+ first line.
+ """
+ first_in_line = GetFirstTokenInSameLine(token)
+ if first_in_line.previous:
+ return GetFirstTokenInSameLine(first_in_line.previous)
+
+ return None
+
+
+def GetLastTokenInSameLine(token):
+ """Returns the last token in the same line as token.
+
+ Args:
+ token: Any token in the line.
+
+ Returns:
+ The last token in the same line as token.
+ """
+ while not token.IsLastInLine():
+ token = token.next
+ return token
+
+
+def GetAllTokensInSameLine(token):
+ """Returns all tokens in the same line as the given token.
+
+ Args:
+ token: Any token in the line.
+
+ Returns:
+ All tokens on the same line as the given token.
+ """
+ first_token = GetFirstTokenInSameLine(token)
+ last_token = GetLastTokenInSameLine(token)
+
+ tokens_in_line = []
+ while first_token != last_token:
+ tokens_in_line.append(first_token)
+ first_token = first_token.next
+ tokens_in_line.append(last_token)
+
+ return tokens_in_line
+
+
def CustomSearch(start_token, func, end_func=None, distance=None,
reverse=False):
"""Returns the first token where func is True within distance of this token.
@@ -77,14 +131,14 @@ def CustomSearch(start_token, func, end_func=None, distance=None,
else:
while token and (distance is None or distance > 0):
- next = token.next
- if next:
- if func(next):
- return next
- if end_func and end_func(next):
+ next_token = token.next
+ if next_token:
+ if func(next_token):
+ return next_token
+ if end_func and end_func(next_token):
return None
- token = next
+ token = next_token
if distance is not None:
distance -= 1
@@ -123,7 +177,6 @@ def SearchExcept(start_token, token_types, distance=None, reverse=False):
reverse: When true, search the tokens before this one instead of the tokens
after it
-
Returns:
The first token of any type in token_types within distance of this token, or
None if no such token is found.
@@ -162,6 +215,13 @@ def DeleteToken(token):
Args:
token: The token to delete
"""
+ # When deleting a token, we do not update the deleted token itself to make
+ # sure the previous and next pointers are still pointing to tokens which are
+ # not deleted. Also it is very hard to keep track of all previously deleted
+ # tokens to update them when their pointers become invalid. So we add this
+ # flag that any token linked list iteration logic can skip deleted node safely
+ # when its current token is deleted.
+ token.is_deleted = True
if token.previous:
token.previous.next = token.next
@@ -173,19 +233,62 @@ def DeleteToken(token):
following_token.metadata.last_code = token.metadata.last_code
following_token = following_token.next
-def DeleteTokens(token, tokenCount):
+
+def DeleteTokens(token, token_count):
"""Deletes the given number of tokens starting with the given token.
Args:
token: The token to start deleting at.
- tokenCount: The total number of tokens to delete.
+ token_count: The total number of tokens to delete.
"""
- for i in xrange(1, tokenCount):
+ for i in xrange(1, token_count):
DeleteToken(token.next)
DeleteToken(token)
+
+def InsertTokenBefore(new_token, token):
+ """Insert new_token before token.
+
+ Args:
+ new_token: A token to be added to the stream
+ token: A token already in the stream
+ """
+ new_token.next = token
+ new_token.previous = token.previous
+
+ new_token.metadata = copy.copy(token.metadata)
+
+ if new_token.IsCode():
+ old_last_code = token.metadata.last_code
+ following_token = token
+ while (following_token and
+ following_token.metadata.last_code == old_last_code):
+ following_token.metadata.last_code = new_token
+ following_token = following_token.next
+
+ token.previous = new_token
+ if new_token.previous:
+ new_token.previous.next = new_token
+
+ if new_token.start_index is None:
+ if new_token.line_number == token.line_number:
+ new_token.start_index = token.start_index
+ else:
+ previous_token = new_token.previous
+ if previous_token:
+ new_token.start_index = (previous_token.start_index +
+ len(previous_token.string))
+ else:
+ new_token.start_index = 0
+
+ iterator = new_token.next
+ while iterator and iterator.line_number == new_token.line_number:
+ iterator.start_index += len(new_token.string)
+ iterator = iterator.next
+
+
def InsertTokenAfter(new_token, token):
- """Insert new_token after token
+ """Insert new_token after token.
Args:
new_token: A token to be added to the stream
@@ -221,6 +324,21 @@ def InsertTokenAfter(new_token, token):
iterator = iterator.next
+def InsertTokensAfter(new_tokens, token):
+ """Insert multiple tokens after token.
+
+ Args:
+ new_tokens: An array of tokens to be added to the stream
+ token: A token already in the stream
+ """
+ # TODO(user): It would be nicer to have InsertTokenAfter defer to here
+ # instead of vice-versa.
+ current_token = token
+ for new_token in new_tokens:
+ InsertTokenAfter(new_token, current_token)
+ current_token = new_token
+
+
def InsertSpaceTokenAfter(token):
"""Inserts a space token after the given token.
@@ -228,28 +346,44 @@ def InsertSpaceTokenAfter(token):
token: The token to insert a space token after
Returns:
- A single space token"""
+ A single space token
+ """
space_token = JavaScriptToken(' ', Type.WHITESPACE, token.line,
token.line_number)
InsertTokenAfter(space_token, token)
-def InsertLineAfter(token):
+def InsertBlankLineAfter(token):
"""Inserts a blank line after the given token.
Args:
token: The token to insert a blank line after
Returns:
- A single space token"""
+ A single space token
+ """
blank_token = JavaScriptToken('', Type.BLANK_LINE, '',
token.line_number + 1)
- InsertTokenAfter(blank_token, token)
- # Update all subsequent ine numbers.
- blank_token = blank_token.next
- while blank_token:
- blank_token.line_number += 1
- blank_token = blank_token.next
+ InsertLineAfter(token, [blank_token])
+
+
+def InsertLineAfter(token, new_tokens):
+ """Inserts a new line consisting of new_tokens after the given token.
+
+ Args:
+ token: The token to insert after.
+ new_tokens: The tokens that will make up the new line.
+ """
+ insert_location = token
+ for new_token in new_tokens:
+ InsertTokenAfter(new_token, insert_location)
+ insert_location = new_token
+
+ # Update all subsequent line numbers.
+ next_token = new_tokens[-1].next
+ while next_token:
+ next_token.line_number += 1
+ next_token = next_token.next
def SplitToken(token, position):
@@ -275,6 +409,10 @@ def SplitToken(token, position):
def Compare(token1, token2):
"""Compares two tokens and determines their relative order.
+ Args:
+ token1: The first token to compare.
+ token2: The second token to compare.
+
Returns:
A negative integer, zero, or a positive integer as the first token is
before, equal, or after the second in the token stream.
@@ -283,3 +421,277 @@ def Compare(token1, token2):
return token1.line_number - token2.line_number
else:
return token1.start_index - token2.start_index
+
+
+def GoogScopeOrNoneFromStartBlock(token):
+ """Determines if the given START_BLOCK is part of a goog.scope statement.
+
+ Args:
+ token: A token of type START_BLOCK.
+
+ Returns:
+ The goog.scope function call token, or None if such call doesn't exist.
+ """
+ if token.type != JavaScriptTokenType.START_BLOCK:
+ return None
+
+ # Search for a goog.scope statement, which will be 5 tokens before the
+ # block. Illustration of the tokens found prior to the start block:
+ # goog.scope(function() {
+ # 5 4 3 21 ^
+
+ maybe_goog_scope = token
+ for unused_i in xrange(5):
+ maybe_goog_scope = (maybe_goog_scope.previous if maybe_goog_scope and
+ maybe_goog_scope.previous else None)
+ if maybe_goog_scope and maybe_goog_scope.string == 'goog.scope':
+ return maybe_goog_scope
+
+
+def GetTokenRange(start_token, end_token):
+ """Returns a list of tokens between the two given, inclusive.
+
+ Args:
+ start_token: Start token in the range.
+ end_token: End token in the range.
+
+ Returns:
+ A list of tokens, in order, from start_token to end_token (including start
+ and end). Returns none if the tokens do not describe a valid range.
+ """
+
+ token_range = []
+ token = start_token
+
+ while token:
+ token_range.append(token)
+
+ if token == end_token:
+ return token_range
+
+ token = token.next
+
+
+def TokensToString(token_iterable):
+ """Convert a number of tokens into a string.
+
+ Newlines will be inserted whenever the line_number of two neighboring
+ strings differ.
+
+ Args:
+ token_iterable: The tokens to turn to a string.
+
+ Returns:
+ A string representation of the given tokens.
+ """
+
+ buf = StringIO.StringIO()
+ token_list = list(token_iterable)
+ if not token_list:
+ return ''
+
+ line_number = token_list[0].line_number
+
+ for token in token_list:
+
+ while line_number < token.line_number:
+ line_number += 1
+ buf.write('\n')
+
+ if line_number > token.line_number:
+ line_number = token.line_number
+ buf.write('\n')
+
+ buf.write(token.string)
+
+ return buf.getvalue()
+
+
+def GetPreviousCodeToken(token):
+ """Returns the code token before the specified token.
+
+ Args:
+ token: A token.
+
+ Returns:
+ The code token before the specified token or None if no such token
+ exists.
+ """
+
+ return CustomSearch(
+ token,
+ lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES,
+ reverse=True)
+
+
+def GetNextCodeToken(token):
+ """Returns the next code token after the specified token.
+
+ Args:
+ token: A token.
+
+ Returns:
+ The next code token after the specified token or None if no such token
+ exists.
+ """
+
+ return CustomSearch(
+ token,
+ lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES,
+ reverse=False)
+
+
+def GetIdentifierStart(token):
+ """Returns the first token in an identifier.
+
+ Given a token which is part of an identifier, returns the token at the start
+ of the identifier.
+
+ Args:
+ token: A token which is part of an identifier.
+
+ Returns:
+ The token at the start of the identifier or None if the identifier was not
+ of the form 'a.b.c' (e.g. "['a']['b'].c").
+ """
+
+ start_token = token
+ previous_code_token = GetPreviousCodeToken(token)
+
+ while (previous_code_token and (
+ previous_code_token.IsType(JavaScriptTokenType.IDENTIFIER) or
+ IsDot(previous_code_token))):
+ start_token = previous_code_token
+ previous_code_token = GetPreviousCodeToken(previous_code_token)
+
+ if IsDot(start_token):
+ return None
+
+ return start_token
+
+
+def GetIdentifierForToken(token):
+ """Get the symbol specified by a token.
+
+ Given a token, this function additionally concatenates any parts of an
+ identifying symbol being identified that are split by whitespace or a
+ newline.
+
+ The function will return None if the token is not the first token of an
+ identifier.
+
+ Args:
+ token: The first token of a symbol.
+
+ Returns:
+ The whole symbol, as a string.
+ """
+
+ # Search backward to determine if this token is the first token of the
+ # identifier. If it is not the first token, return None to signal that this
+ # token should be ignored.
+ prev_token = token.previous
+ while prev_token:
+ if (prev_token.IsType(JavaScriptTokenType.IDENTIFIER) or
+ IsDot(prev_token)):
+ return None
+
+ if (prev_token.IsType(tokens.TokenType.WHITESPACE) or
+ prev_token.IsAnyType(JavaScriptTokenType.COMMENT_TYPES)):
+ prev_token = prev_token.previous
+ else:
+ break
+
+ # A "function foo()" declaration.
+ if token.type is JavaScriptTokenType.FUNCTION_NAME:
+ return token.string
+
+ # A "var foo" declaration (if the previous token is 'var')
+ previous_code_token = GetPreviousCodeToken(token)
+
+ if previous_code_token and previous_code_token.IsKeyword('var'):
+ return token.string
+
+ # Otherwise, this is potentially a namespaced (goog.foo.bar) identifier that
+ # could span multiple lines or be broken up by whitespace. We need
+ # to concatenate.
+ identifier_types = set([
+ JavaScriptTokenType.IDENTIFIER,
+ JavaScriptTokenType.SIMPLE_LVALUE
+ ])
+
+ assert token.type in identifier_types
+
+ # Start with the first token
+ symbol_tokens = [token]
+
+ if token.next:
+ for t in token.next:
+ last_symbol_token = symbol_tokens[-1]
+
+ # A dot is part of the previous symbol.
+ if IsDot(t):
+ symbol_tokens.append(t)
+ continue
+
+ # An identifier is part of the previous symbol if the previous one was a
+ # dot.
+ if t.type in identifier_types:
+ if IsDot(last_symbol_token):
+ symbol_tokens.append(t)
+ continue
+ else:
+ break
+
+ # Skip any whitespace
+ if t.type in JavaScriptTokenType.NON_CODE_TYPES:
+ continue
+
+ # This is the end of the identifier. Stop iterating.
+ break
+
+ if symbol_tokens:
+ return ''.join([t.string for t in symbol_tokens])
+
+
+def GetStringAfterToken(token):
+ """Get string after token.
+
+ Args:
+ token: Search will be done after this token.
+
+ Returns:
+ String if found after token else None (empty string will also
+ return None).
+
+ Search until end of string as in case of empty string Type.STRING_TEXT is not
+ present/found and don't want to return next string.
+ E.g.
+ a = '';
+ b = 'test';
+ When searching for string after 'a' if search is not limited by end of string
+ then it will return 'test' which is not desirable as there is a empty string
+ before that.
+
+ This will return None for cases where string is empty or no string found
+ as in both cases there is no Type.STRING_TEXT.
+ """
+ string_token = SearchUntil(token, JavaScriptTokenType.STRING_TEXT,
+ [JavaScriptTokenType.SINGLE_QUOTE_STRING_END,
+ JavaScriptTokenType.DOUBLE_QUOTE_STRING_END])
+ if string_token:
+ return string_token.string
+ else:
+ return None
+
+
+def IsDot(token):
+ """Whether the token represents a "dot" operator (foo.bar)."""
+ return token.type is JavaScriptTokenType.OPERATOR and token.string == '.'
+
+
+def IsIdentifierOrDot(token):
+ """Whether the token is either an identifier or a '.'."""
+ return (token.type in [JavaScriptTokenType.IDENTIFIER,
+ JavaScriptTokenType.SIMPLE_LVALUE] or
+ IsDot(token))
diff --git a/tools/closure_linter/closure_linter/tokenutil_test.py b/tools/closure_linter/closure_linter/tokenutil_test.py
new file mode 100644
index 0000000000..c7d3854776
--- /dev/null
+++ b/tools/closure_linter/closure_linter/tokenutil_test.py
@@ -0,0 +1,297 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS-IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests for the scopeutil module."""
+
+# Allow non-Google copyright
+# pylint: disable=g-bad-file-header
+
+__author__ = ('nnaze@google.com (Nathan Naze)')
+
+import unittest as googletest
+
+from closure_linter import ecmametadatapass
+from closure_linter import javascripttokens
+from closure_linter import testutil
+from closure_linter import tokenutil
+
+
+class FakeToken(object):
+ pass
+
+
+class TokenUtilTest(googletest.TestCase):
+
+ def testGetTokenRange(self):
+
+ a = FakeToken()
+ b = FakeToken()
+ c = FakeToken()
+ d = FakeToken()
+ e = FakeToken()
+
+ a.next = b
+ b.next = c
+ c.next = d
+
+ self.assertEquals([a, b, c, d], tokenutil.GetTokenRange(a, d))
+
+ # This is an error as e does not come after a in the token chain.
+ self.assertRaises(Exception, lambda: tokenutil.GetTokenRange(a, e))
+
+ def testTokensToString(self):
+
+ a = FakeToken()
+ b = FakeToken()
+ c = FakeToken()
+ d = FakeToken()
+ e = FakeToken()
+
+ a.string = 'aaa'
+ b.string = 'bbb'
+ c.string = 'ccc'
+ d.string = 'ddd'
+ e.string = 'eee'
+
+ a.line_number = 5
+ b.line_number = 6
+ c.line_number = 6
+ d.line_number = 10
+ e.line_number = 11
+
+ self.assertEquals(
+ 'aaa\nbbbccc\n\n\n\nddd\neee',
+ tokenutil.TokensToString([a, b, c, d, e]))
+
+ self.assertEquals(
+ 'ddd\neee\naaa\nbbbccc',
+ tokenutil.TokensToString([d, e, a, b, c]),
+ 'Neighboring tokens not in line_number order should have a newline '
+ 'between them.')
+
+ def testGetPreviousCodeToken(self):
+
+ tokens = testutil.TokenizeSource("""
+start1. // comment
+ /* another comment */
+ end1
+""")
+
+ def _GetTokenStartingWith(token_starts_with):
+ for t in tokens:
+ if t.string.startswith(token_starts_with):
+ return t
+
+ self.assertEquals(
+ None,
+ tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('start1')))
+
+ self.assertEquals(
+ '.',
+ tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('end1')).string)
+
+ self.assertEquals(
+ 'start1',
+ tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('.')).string)
+
+ def testGetNextCodeToken(self):
+
+ tokens = testutil.TokenizeSource("""
+start1. // comment
+ /* another comment */
+ end1
+""")
+
+ def _GetTokenStartingWith(token_starts_with):
+ for t in tokens:
+ if t.string.startswith(token_starts_with):
+ return t
+
+ self.assertEquals(
+ '.',
+ tokenutil.GetNextCodeToken(_GetTokenStartingWith('start1')).string)
+
+ self.assertEquals(
+ 'end1',
+ tokenutil.GetNextCodeToken(_GetTokenStartingWith('.')).string)
+
+ self.assertEquals(
+ None,
+ tokenutil.GetNextCodeToken(_GetTokenStartingWith('end1')))
+
+ def testGetIdentifierStart(self):
+
+ tokens = testutil.TokenizeSource("""
+start1 . // comment
+ prototype. /* another comment */
+ end1
+
+['edge'][case].prototype.
+ end2 = function() {}
+""")
+
+ def _GetTokenStartingWith(token_starts_with):
+ for t in tokens:
+ if t.string.startswith(token_starts_with):
+ return t
+
+ self.assertEquals(
+ 'start1',
+ tokenutil.GetIdentifierStart(_GetTokenStartingWith('end1')).string)
+
+ self.assertEquals(
+ 'start1',
+ tokenutil.GetIdentifierStart(_GetTokenStartingWith('start1')).string)
+
+ self.assertEquals(
+ None,
+ tokenutil.GetIdentifierStart(_GetTokenStartingWith('end2')))
+
+ def testInsertTokenBefore(self):
+
+ self.AssertInsertTokenAfterBefore(False)
+
+ def testInsertTokenAfter(self):
+
+ self.AssertInsertTokenAfterBefore(True)
+
+ def AssertInsertTokenAfterBefore(self, after):
+
+ new_token = javascripttokens.JavaScriptToken(
+ 'a', javascripttokens.JavaScriptTokenType.IDENTIFIER, 1, 1)
+
+ existing_token1 = javascripttokens.JavaScriptToken(
+ 'var', javascripttokens.JavaScriptTokenType.KEYWORD, 1, 1)
+ existing_token1.start_index = 0
+ existing_token1.metadata = ecmametadatapass.EcmaMetaData()
+
+ existing_token2 = javascripttokens.JavaScriptToken(
+ ' ', javascripttokens.JavaScriptTokenType.WHITESPACE, 1, 1)
+ existing_token2.start_index = 3
+ existing_token2.metadata = ecmametadatapass.EcmaMetaData()
+ existing_token2.metadata.last_code = existing_token1
+
+ existing_token1.next = existing_token2
+ existing_token2.previous = existing_token1
+
+ if after:
+ tokenutil.InsertTokenAfter(new_token, existing_token1)
+ else:
+ tokenutil.InsertTokenBefore(new_token, existing_token2)
+
+ self.assertEquals(existing_token1, new_token.previous)
+ self.assertEquals(existing_token2, new_token.next)
+
+ self.assertEquals(new_token, existing_token1.next)
+ self.assertEquals(new_token, existing_token2.previous)
+
+ self.assertEquals(existing_token1, new_token.metadata.last_code)
+ self.assertEquals(new_token, existing_token2.metadata.last_code)
+
+ self.assertEquals(0, existing_token1.start_index)
+ self.assertEquals(3, new_token.start_index)
+ self.assertEquals(4, existing_token2.start_index)
+
+ def testGetIdentifierForToken(self):
+
+ tokens = testutil.TokenizeSource("""
+start1.abc.def.prototype.
+ onContinuedLine
+
+(start2.abc.def
+ .hij.klm
+ .nop)
+
+start3.abc.def
+ .hij = function() {};
+
+// An absurd multi-liner.
+start4.abc.def.
+ hij.
+ klm = function() {};
+
+start5 . aaa . bbb . ccc
+ shouldntBePartOfThePreviousSymbol
+
+start6.abc.def ghi.shouldntBePartOfThePreviousSymbol
+
+var start7 = 42;
+
+function start8() {
+
+}
+
+start9.abc. // why is there a comment here?
+ def /* another comment */
+ shouldntBePart
+
+start10.abc // why is there a comment here?
+ .def /* another comment */
+ shouldntBePart
+
+start11.abc. middle1.shouldNotBeIdentifier
+""")
+
+ def _GetTokenStartingWith(token_starts_with):
+ for t in tokens:
+ if t.string.startswith(token_starts_with):
+ return t
+
+ self.assertEquals(
+ 'start1.abc.def.prototype.onContinuedLine',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start1')))
+
+ self.assertEquals(
+ 'start2.abc.def.hij.klm.nop',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start2')))
+
+ self.assertEquals(
+ 'start3.abc.def.hij',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start3')))
+
+ self.assertEquals(
+ 'start4.abc.def.hij.klm',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start4')))
+
+ self.assertEquals(
+ 'start5.aaa.bbb.ccc',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start5')))
+
+ self.assertEquals(
+ 'start6.abc.def',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start6')))
+
+ self.assertEquals(
+ 'start7',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start7')))
+
+ self.assertEquals(
+ 'start8',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start8')))
+
+ self.assertEquals(
+ 'start9.abc.def',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start9')))
+
+ self.assertEquals(
+ 'start10.abc.def',
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start10')))
+
+ self.assertIsNone(
+ tokenutil.GetIdentifierForToken(_GetTokenStartingWith('middle1')))
+
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/closure_linter/typeannotation.py b/tools/closure_linter/closure_linter/typeannotation.py
new file mode 100644
index 0000000000..00604c13a7
--- /dev/null
+++ b/tools/closure_linter/closure_linter/typeannotation.py
@@ -0,0 +1,401 @@
+#!/usr/bin/env python
+#*-* coding: utf-8
+"""Closure typeannotation parsing and utilities."""
+
+
+
+from closure_linter import errors
+from closure_linter import javascripttokens
+from closure_linter.common import error
+
+# Shorthand
+TYPE = javascripttokens.JavaScriptTokenType
+
+
+class TypeAnnotation(object):
+ """Represents a structured view of a closure type annotation.
+
+ Attribute:
+ identifier: The name of the type.
+ key_type: The name part before a colon.
+ sub_types: The list of sub_types used e.g. for Array.<…>
+ or_null: The '?' annotation
+ not_null: The '!' annotation
+ type_group: If this a a grouping (a|b), but does not include function(a).
+ return_type: The return type of a function definition.
+ alias: The actual type set by closurizednamespaceinfo if the identifier uses
+ an alias to shorten the name.
+ tokens: An ordered list of tokens used for this type. May contain
+ TypeAnnotation instances for sub_types, key_type or return_type.
+ """
+
+ IMPLICIT_TYPE_GROUP = 2
+
+ NULLABILITY_UNKNOWN = 2
+
+ # Frequently used known non-nullable types.
+ NON_NULLABLE = frozenset([
+ 'boolean', 'function', 'number', 'string', 'undefined'])
+ # Frequently used known nullable types.
+ NULLABLE_TYPE_WHITELIST = frozenset([
+ 'Array', 'Document', 'Element', 'Function', 'Node', 'NodeList',
+ 'Object'])
+
+ def __init__(self):
+ self.identifier = ''
+ self.sub_types = []
+ self.or_null = False
+ self.not_null = False
+ self.type_group = False
+ self.alias = None
+ self.key_type = None
+ self.record_type = False
+ self.opt_arg = False
+ self.return_type = None
+ self.tokens = []
+
+ def IsFunction(self):
+ """Determines whether this is a function definition."""
+ return self.identifier == 'function'
+
+ def IsConstructor(self):
+ """Determines whether this is a function definition for a constructor."""
+ key_type = self.sub_types and self.sub_types[0].key_type
+ return self.IsFunction() and key_type.identifier == 'new'
+
+ def IsRecordType(self):
+ """Returns True if this type is a record type."""
+ return (self.record_type or
+ bool([t for t in self.sub_types if t.IsRecordType()]))
+
+ def IsVarArgsType(self):
+ """Determines if the type is a var_args type, i.e. starts with '...'."""
+ return self.identifier.startswith('...') or (
+ self.type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP and
+ self.sub_types[0].identifier.startswith('...'))
+
+ def IsEmpty(self):
+ """Returns True if the type is empty."""
+ return not self.tokens
+
+ def IsUnknownType(self):
+ """Returns True if this is the unknown type {?}."""
+ return (self.or_null
+ and not self.identifier
+ and not self.sub_types
+ and not self.return_type)
+
+ def Append(self, item):
+ """Adds a sub_type to this type and finalizes it.
+
+ Args:
+ item: The TypeAnnotation item to append.
+ """
+ # item is a TypeAnnotation instance, so pylint: disable=protected-access
+ self.sub_types.append(item._Finalize(self))
+
+ def __repr__(self):
+ """Reconstructs the type definition."""
+ append = ''
+ if self.sub_types:
+ separator = (',' if not self.type_group else '|')
+ if self.identifier == 'function':
+ surround = '(%s)'
+ else:
+ surround = {False: '{%s}' if self.record_type else '<%s>',
+ True: '(%s)',
+ self.IMPLICIT_TYPE_GROUP: '%s'}[self.type_group]
+ append = surround % separator.join([repr(t) for t in self.sub_types])
+ if self.return_type:
+ append += ':%s' % repr(self.return_type)
+ append += '=' if self.opt_arg else ''
+ prefix = '' + ('?' if self.or_null else '') + ('!' if self.not_null else '')
+ keyword = '%s:' % repr(self.key_type) if self.key_type else ''
+ return keyword + prefix + '%s' % (self.alias or self.identifier) + append
+
+ def ToString(self):
+ """Concats the type's tokens to form a string again."""
+ ret = []
+ for token in self.tokens:
+ if not isinstance(token, TypeAnnotation):
+ ret.append(token.string)
+ else:
+ ret.append(token.ToString())
+ return ''.join(ret)
+
+ def Dump(self, indent=''):
+ """Dumps this type's structure for debugging purposes."""
+ result = []
+ for t in self.tokens:
+ if isinstance(t, TypeAnnotation):
+ result.append(indent + str(t) + ' =>\n' + t.Dump(indent + ' '))
+ else:
+ result.append(indent + str(t))
+ return '\n'.join(result)
+
+ def IterIdentifiers(self):
+ """Iterates over all identifiers in this type and its subtypes."""
+ if self.identifier:
+ yield self.identifier
+ for subtype in self.IterTypes():
+ for identifier in subtype.IterIdentifiers():
+ yield identifier
+
+ def IterTypeGroup(self):
+ """Iterates over all types in the type group including self.
+
+ Yields:
+ If this is a implicit or manual type-group: all sub_types.
+ Otherwise: self
+ E.g. for @type {Foo.<Bar>} this will yield only Foo.<Bar>,
+ for @type {Foo|(Bar|Sample)} this will yield Foo, Bar and Sample.
+
+ """
+ if self.type_group:
+ for sub_type in self.sub_types:
+ for sub_type in sub_type.IterTypeGroup():
+ yield sub_type
+ else:
+ yield self
+
+ def IterTypes(self):
+ """Iterates over each subtype as well as return and key types."""
+ if self.return_type:
+ yield self.return_type
+
+ if self.key_type:
+ yield self.key_type
+
+ for sub_type in self.sub_types:
+ yield sub_type
+
+ def GetNullability(self, modifiers=True):
+ """Computes whether the type may be null.
+
+ Args:
+ modifiers: Whether the modifiers ? and ! should be considered in the
+ evaluation.
+ Returns:
+ True if the type allows null, False if the type is strictly non nullable
+ and NULLABILITY_UNKNOWN if the nullability cannot be determined.
+ """
+
+ # Explicitly marked nullable types or 'null' are nullable.
+ if (modifiers and self.or_null) or self.identifier == 'null':
+ return True
+
+ # Explicitly marked non-nullable types or non-nullable base types:
+ if ((modifiers and self.not_null) or self.record_type
+ or self.identifier in self.NON_NULLABLE):
+ return False
+
+ # A type group is nullable if any of its elements are nullable.
+ if self.type_group:
+ maybe_nullable = False
+ for sub_type in self.sub_types:
+ nullability = sub_type.GetNullability()
+ if nullability == self.NULLABILITY_UNKNOWN:
+ maybe_nullable = nullability
+ elif nullability:
+ return True
+ return maybe_nullable
+
+ # Whitelisted types are nullable.
+ if self.identifier.rstrip('.') in self.NULLABLE_TYPE_WHITELIST:
+ return True
+
+ # All other types are unknown (most should be nullable, but
+ # enums are not and typedefs might not be).
+ return self.NULLABILITY_UNKNOWN
+
+ def WillAlwaysBeNullable(self):
+ """Computes whether the ! flag is illegal for this type.
+
+ This is the case if this type or any of the subtypes is marked as
+ explicitly nullable.
+
+ Returns:
+ True if the ! flag would be illegal.
+ """
+ if self.or_null or self.identifier == 'null':
+ return True
+
+ if self.type_group:
+ return bool([t for t in self.sub_types if t.WillAlwaysBeNullable()])
+
+ return False
+
+ def _Finalize(self, parent):
+ """Fixes some parsing issues once the TypeAnnotation is complete."""
+
+ # Normalize functions whose definition ended up in the key type because
+ # they defined a return type after a colon.
+ if self.key_type and self.key_type.identifier == 'function':
+ current = self.key_type
+ current.return_type = self
+ self.key_type = None
+ # opt_arg never refers to the return type but to the function itself.
+ current.opt_arg = self.opt_arg
+ self.opt_arg = False
+ return current
+
+ # If a typedef just specified the key, it will not end up in the key type.
+ if parent.record_type and not self.key_type:
+ current = TypeAnnotation()
+ current.key_type = self
+ current.tokens.append(self)
+ return current
+ return self
+
+ def FirstToken(self):
+ """Returns the first token used in this type or any of its subtypes."""
+ first = self.tokens[0]
+ return first.FirstToken() if isinstance(first, TypeAnnotation) else first
+
+
+def Parse(token, token_end, error_handler):
+ """Parses a type annotation and returns a TypeAnnotation object."""
+ return TypeAnnotationParser(error_handler).Parse(token.next, token_end)
+
+
+class TypeAnnotationParser(object):
+ """A parser for type annotations constructing the TypeAnnotation object."""
+
+ def __init__(self, error_handler):
+ self._stack = []
+ self._error_handler = error_handler
+ self._closing_error = False
+
+ def Parse(self, token, token_end):
+ """Parses a type annotation and returns a TypeAnnotation object."""
+ root = TypeAnnotation()
+ self._stack.append(root)
+ current = TypeAnnotation()
+ root.tokens.append(current)
+
+ while token and token != token_end:
+ if token.type in (TYPE.DOC_TYPE_START_BLOCK, TYPE.DOC_START_BRACE):
+ if token.string == '(':
+ if (current.identifier and
+ current.identifier not in ['function', '...']):
+ self.Error(token,
+ 'Invalid identifier for (): "%s"' % current.identifier)
+ current.type_group = current.identifier != 'function'
+ elif token.string == '{':
+ current.record_type = True
+ current.tokens.append(token)
+ self._stack.append(current)
+ current = TypeAnnotation()
+ self._stack[-1].tokens.append(current)
+
+ elif token.type in (TYPE.DOC_TYPE_END_BLOCK, TYPE.DOC_END_BRACE):
+ prev = self._stack.pop()
+ prev.Append(current)
+ current = prev
+
+ # If an implicit type group was created, close it as well.
+ if prev.type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP:
+ prev = self._stack.pop()
+ prev.Append(current)
+ current = prev
+ current.tokens.append(token)
+
+ elif token.type == TYPE.DOC_TYPE_MODIFIER:
+ if token.string == '!':
+ current.tokens.append(token)
+ current.not_null = True
+ elif token.string == '?':
+ current.tokens.append(token)
+ current.or_null = True
+ elif token.string == ':':
+ current.tokens.append(token)
+ prev = current
+ current = TypeAnnotation()
+ prev.tokens.append(current)
+ current.key_type = prev
+ elif token.string == '=':
+ # For implicit type groups the '=' refers to the parent.
+ try:
+ if self._stack[-1].type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP:
+ self._stack[-1].tokens.append(token)
+ self._stack[-1].opt_arg = True
+ else:
+ current.tokens.append(token)
+ current.opt_arg = True
+ except IndexError:
+ self.ClosingError(token)
+ elif token.string == '|':
+ # If a type group has explicitly been opened do a normal append.
+ # Otherwise we have to open the type group and move the current
+ # type into it, before appending
+ if not self._stack[-1].type_group:
+ type_group = TypeAnnotation()
+ if current.key_type and current.key_type.identifier != 'function':
+ type_group.key_type = current.key_type
+ current.key_type = None
+ type_group.type_group = TypeAnnotation.IMPLICIT_TYPE_GROUP
+ # Fix the token order
+ prev = self._stack[-1].tokens.pop()
+ self._stack[-1].tokens.append(type_group)
+ type_group.tokens.append(prev)
+ self._stack.append(type_group)
+ self._stack[-1].tokens.append(token)
+ self.Append(current, error_token=token)
+ current = TypeAnnotation()
+ self._stack[-1].tokens.append(current)
+ elif token.string == ',':
+ self.Append(current, error_token=token)
+ current = TypeAnnotation()
+ self._stack[-1].tokens.append(token)
+ self._stack[-1].tokens.append(current)
+ else:
+ current.tokens.append(token)
+ self.Error(token, 'Invalid token')
+
+ elif token.type == TYPE.COMMENT:
+ current.tokens.append(token)
+ current.identifier += token.string.strip()
+
+ elif token.type in [TYPE.DOC_PREFIX, TYPE.WHITESPACE]:
+ current.tokens.append(token)
+
+ else:
+ current.tokens.append(token)
+ self.Error(token, 'Unexpected token')
+
+ token = token.next
+
+ self.Append(current, error_token=token)
+ try:
+ ret = self._stack.pop()
+ except IndexError:
+ self.ClosingError(token)
+ # The type is screwed up, but let's return something.
+ return current
+
+ if self._stack and (len(self._stack) != 1 or
+ ret.type_group != TypeAnnotation.IMPLICIT_TYPE_GROUP):
+ self.Error(token, 'Too many opening items.')
+
+ return ret if len(ret.sub_types) > 1 else ret.sub_types[0]
+
+ def Append(self, type_obj, error_token):
+ """Appends a new TypeAnnotation object to the current parent."""
+ if self._stack:
+ self._stack[-1].Append(type_obj)
+ else:
+ self.ClosingError(error_token)
+
+ def ClosingError(self, token):
+ """Reports an error about too many closing items, but only once."""
+ if not self._closing_error:
+ self._closing_error = True
+ self.Error(token, 'Too many closing items.')
+
+ def Error(self, token, message):
+ """Calls the error_handler to post an error message."""
+ if self._error_handler:
+ self._error_handler.HandleError(error.Error(
+ errors.JSDOC_DOES_NOT_PARSE,
+ 'Error parsing jsdoc type at token "%s" (column: %d): %s' %
+ (token.string, token.start_index, message), token))
diff --git a/tools/closure_linter/closure_linter/typeannotation_test.py b/tools/closure_linter/closure_linter/typeannotation_test.py
new file mode 100755
index 0000000000..da9dfa369f
--- /dev/null
+++ b/tools/closure_linter/closure_linter/typeannotation_test.py
@@ -0,0 +1,232 @@
+#!/usr/bin/env python
+"""Unit tests for the typeannotation module."""
+
+
+
+
+import unittest as googletest
+
+from closure_linter import testutil
+from closure_linter.common import erroraccumulator
+
+CRAZY_TYPE = ('Array.<!function(new:X,{a:null},...(c|d)):'
+ 'function(...(Object.<string>))>')
+
+
+class TypeErrorException(Exception):
+ """Exception for TypeErrors."""
+
+ def __init__(self, errors):
+ super(TypeErrorException, self).__init__()
+ self.errors = errors
+
+
+class TypeParserTest(googletest.TestCase):
+ """Tests for typeannotation parsing."""
+
+ def _ParseComment(self, script):
+ """Parse a script that contains one comment and return it."""
+ accumulator = erroraccumulator.ErrorAccumulator()
+ _, comments = testutil.ParseFunctionsAndComments(script, accumulator)
+ if accumulator.GetErrors():
+ raise TypeErrorException(accumulator.GetErrors())
+ self.assertEquals(1, len(comments))
+ return comments[0]
+
+ def _ParseType(self, type_str):
+ """Creates a comment to parse and returns the parsed type."""
+ comment = self._ParseComment('/** @type {%s} **/' % type_str)
+ return comment.GetDocFlags()[0].jstype
+
+ def assertProperReconstruction(self, type_str, matching_str=None):
+ """Parses the type and asserts the its repr matches the type.
+
+ If matching_str is specified, it will assert that the repr matches this
+ string instead.
+
+ Args:
+ type_str: The type string to parse.
+ matching_str: A string the __repr__ of the parsed type should match.
+ Returns:
+ The parsed js_type.
+ """
+ parsed_type = self._ParseType(type_str)
+ # Use listEqual assertion to more easily identify the difference
+ self.assertListEqual(list(matching_str or type_str),
+ list(repr(parsed_type)))
+ self.assertEquals(matching_str or type_str, repr(parsed_type))
+
+ # Newlines will be inserted by the file writer.
+ self.assertEquals(type_str.replace('\n', ''), parsed_type.ToString())
+ return parsed_type
+
+ def assertNullable(self, type_str, nullable=True):
+ parsed_type = self.assertProperReconstruction(type_str)
+ self.assertEquals(nullable, parsed_type.GetNullability(),
+ '"%s" should %sbe nullable' %
+ (type_str, 'not ' if nullable else ''))
+
+ def assertNotNullable(self, type_str):
+ return self.assertNullable(type_str, nullable=False)
+
+ def testReconstruction(self):
+ self.assertProperReconstruction('*')
+ self.assertProperReconstruction('number')
+ self.assertProperReconstruction('(((number)))')
+ self.assertProperReconstruction('!number')
+ self.assertProperReconstruction('?!number')
+ self.assertProperReconstruction('number=')
+ self.assertProperReconstruction('number=!?', '?!number=')
+ self.assertProperReconstruction('number|?string')
+ self.assertProperReconstruction('(number|string)')
+ self.assertProperReconstruction('?(number|string)')
+ self.assertProperReconstruction('Object.<number,string>')
+ self.assertProperReconstruction('function(new:Object)')
+ self.assertProperReconstruction('function(new:Object):number')
+ self.assertProperReconstruction('function(new:Object,Element):number')
+ self.assertProperReconstruction('function(this:T,...)')
+ self.assertProperReconstruction('{a:?number}')
+ self.assertProperReconstruction('{a:?number,b:(number|string)}')
+ self.assertProperReconstruction('{c:{nested_element:*}|undefined}')
+ self.assertProperReconstruction('{handleEvent:function(?):?}')
+ self.assertProperReconstruction('function():?|null')
+ self.assertProperReconstruction('null|function():?|bar')
+
+ def testOptargs(self):
+ self.assertProperReconstruction('number=')
+ self.assertProperReconstruction('number|string=')
+ self.assertProperReconstruction('(number|string)=')
+ self.assertProperReconstruction('(number|string=)')
+ self.assertProperReconstruction('(number=|string)')
+ self.assertProperReconstruction('function(...):number=')
+
+ def testIndepth(self):
+ # Do an deeper check of the crazy identifier
+ crazy = self.assertProperReconstruction(CRAZY_TYPE)
+ self.assertEquals('Array.', crazy.identifier)
+ self.assertEquals(1, len(crazy.sub_types))
+ func1 = crazy.sub_types[0]
+ func2 = func1.return_type
+ self.assertEquals('function', func1.identifier)
+ self.assertEquals('function', func2.identifier)
+ self.assertEquals(3, len(func1.sub_types))
+ self.assertEquals(1, len(func2.sub_types))
+ self.assertEquals('Object.', func2.sub_types[0].sub_types[0].identifier)
+
+ def testIterIdentifiers(self):
+ nested_identifiers = self._ParseType('(a|{b:(c|function(new:d):e)})')
+ for identifier in ('a', 'b', 'c', 'd', 'e'):
+ self.assertIn(identifier, nested_identifiers.IterIdentifiers())
+
+ def testIsEmpty(self):
+ self.assertTrue(self._ParseType('').IsEmpty())
+ self.assertFalse(self._ParseType('?').IsEmpty())
+ self.assertFalse(self._ParseType('!').IsEmpty())
+ self.assertFalse(self._ParseType('<?>').IsEmpty())
+
+ def testIsConstructor(self):
+ self.assertFalse(self._ParseType('').IsConstructor())
+ self.assertFalse(self._ParseType('Array.<number>').IsConstructor())
+ self.assertTrue(self._ParseType('function(new:T)').IsConstructor())
+
+ def testIsVarArgsType(self):
+ self.assertTrue(self._ParseType('...number').IsVarArgsType())
+ self.assertTrue(self._ParseType('...Object|Array').IsVarArgsType())
+ self.assertTrue(self._ParseType('...(Object|Array)').IsVarArgsType())
+ self.assertFalse(self._ParseType('Object|...Array').IsVarArgsType())
+ self.assertFalse(self._ParseType('(...Object|Array)').IsVarArgsType())
+
+ def testIsUnknownType(self):
+ self.assertTrue(self._ParseType('?').IsUnknownType())
+ self.assertTrue(self._ParseType('Foo.<?>').sub_types[0].IsUnknownType())
+ self.assertFalse(self._ParseType('?|!').IsUnknownType())
+ self.assertTrue(self._ParseType('?|!').sub_types[0].IsUnknownType())
+ self.assertFalse(self._ParseType('!').IsUnknownType())
+
+ long_type = 'function():?|{handleEvent:function(?=):?,sample:?}|?='
+ record = self._ParseType(long_type)
+ # First check that there's not just one type with 3 return types, but three
+ # top-level types.
+ self.assertEquals(3, len(record.sub_types))
+
+ # Now extract all unknown type instances and verify that they really are.
+ handle_event, sample = record.sub_types[1].sub_types
+ for i, sub_type in enumerate([
+ record.sub_types[0].return_type,
+ handle_event.return_type,
+ handle_event.sub_types[0],
+ sample,
+ record.sub_types[2]]):
+ self.assertTrue(sub_type.IsUnknownType(),
+ 'Type %d should be the unknown type: %s\n%s' % (
+ i, sub_type.tokens, record.Dump()))
+
+ def testTypedefNames(self):
+ easy = self._ParseType('{a}')
+ self.assertTrue(easy.record_type)
+
+ easy = self.assertProperReconstruction('{a}', '{a:}').sub_types[0]
+ self.assertEquals('a', easy.key_type.identifier)
+ self.assertEquals('', easy.identifier)
+
+ easy = self.assertProperReconstruction('{a:b}').sub_types[0]
+ self.assertEquals('a', easy.key_type.identifier)
+ self.assertEquals('b', easy.identifier)
+
+ def assertTypeError(self, type_str):
+ """Asserts that parsing the given type raises a linter error."""
+ self.assertRaises(TypeErrorException, self._ParseType, type_str)
+
+ def testParseBadTypes(self):
+ """Tests that several errors in types don't break the parser."""
+ self.assertTypeError('<')
+ self.assertTypeError('>')
+ self.assertTypeError('Foo.<Bar')
+ self.assertTypeError('Foo.Bar>=')
+ self.assertTypeError('Foo.<Bar>>=')
+ self.assertTypeError('(')
+ self.assertTypeError(')')
+ self.assertTypeError('Foo.<Bar)>')
+ self._ParseType(':')
+ self._ParseType(':foo')
+ self.assertTypeError(':)foo')
+ self.assertTypeError('(a|{b:(c|function(new:d):e')
+
+ def testNullable(self):
+ self.assertNullable('null')
+ self.assertNullable('Object')
+ self.assertNullable('?string')
+ self.assertNullable('?number')
+
+ self.assertNotNullable('string')
+ self.assertNotNullable('number')
+ self.assertNotNullable('boolean')
+ self.assertNotNullable('function(Object)')
+ self.assertNotNullable('function(Object):Object')
+ self.assertNotNullable('function(?Object):?Object')
+ self.assertNotNullable('!Object')
+
+ self.assertNotNullable('boolean|string')
+ self.assertNotNullable('(boolean|string)')
+
+ self.assertNullable('(boolean|string|null)')
+ self.assertNullable('(?boolean)')
+ self.assertNullable('?(boolean)')
+
+ self.assertNullable('(boolean|Object)')
+ self.assertNotNullable('(boolean|(string|{a:}))')
+
+ def testSpaces(self):
+ """Tests that spaces don't change the outcome."""
+ type_str = (' A < b | ( c | ? ! d e f ) > | '
+ 'function ( x : . . . ) : { y : z = } ')
+ two_spaces = type_str.replace(' ', ' ')
+ no_spaces = type_str.replace(' ', '')
+ newlines = type_str.replace(' ', '\n * ')
+ self.assertProperReconstruction(no_spaces)
+ self.assertProperReconstruction(type_str, no_spaces)
+ self.assertProperReconstruction(two_spaces, no_spaces)
+ self.assertProperReconstruction(newlines, no_spaces)
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tools/closure_linter/dist/closure_linter-2.3.17-py2.7.egg b/tools/closure_linter/dist/closure_linter-2.3.17-py2.7.egg
new file mode 100644
index 0000000000..e9a847ca28
--- /dev/null
+++ b/tools/closure_linter/dist/closure_linter-2.3.17-py2.7.egg
Binary files differ
diff --git a/tools/closure_linter/gflags.py b/tools/closure_linter/gflags.py
deleted file mode 100644
index 21aa88e761..0000000000
--- a/tools/closure_linter/gflags.py
+++ /dev/null
@@ -1,2489 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2007, Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# ---
-# Author: Chad Lester
-# Design and style contributions by:
-# Amit Patel, Bogdan Cocosel, Daniel Dulitz, Eric Tiedemann,
-# Eric Veach, Laurence Gonsalves, Matthew Springer
-# Code reorganized a bit by Craig Silverstein
-
-"""This module is used to define and parse command line flags.
-
-This module defines a *distributed* flag-definition policy: rather than
-an application having to define all flags in or near main(), each python
-module defines flags that are useful to it. When one python module
-imports another, it gains access to the other's flags. (This is
-implemented by having all modules share a common, global registry object
-containing all the flag information.)
-
-Flags are defined through the use of one of the DEFINE_xxx functions.
-The specific function used determines how the flag is parsed, checked,
-and optionally type-converted, when it's seen on the command line.
-
-
-IMPLEMENTATION: DEFINE_* creates a 'Flag' object and registers it with a
-'FlagValues' object (typically the global FlagValues FLAGS, defined
-here). The 'FlagValues' object can scan the command line arguments and
-pass flag arguments to the corresponding 'Flag' objects for
-value-checking and type conversion. The converted flag values are
-available as attributes of the 'FlagValues' object.
-
-Code can access the flag through a FlagValues object, for instance
-gflags.FLAGS.myflag. Typically, the __main__ module passes the
-command line arguments to gflags.FLAGS for parsing.
-
-At bottom, this module calls getopt(), so getopt functionality is
-supported, including short- and long-style flags, and the use of -- to
-terminate flags.
-
-Methods defined by the flag module will throw 'FlagsError' exceptions.
-The exception argument will be a human-readable string.
-
-
-FLAG TYPES: This is a list of the DEFINE_*'s that you can do. All flags
-take a name, default value, help-string, and optional 'short' name
-(one-letter name). Some flags have other arguments, which are described
-with the flag.
-
-DEFINE_string: takes any input, and interprets it as a string.
-
-DEFINE_bool or
-DEFINE_boolean: typically does not take an argument: say --myflag to
- set FLAGS.myflag to true, or --nomyflag to set
- FLAGS.myflag to false. Alternately, you can say
- --myflag=true or --myflag=t or --myflag=1 or
- --myflag=false or --myflag=f or --myflag=0
-
-DEFINE_float: takes an input and interprets it as a floating point
- number. Takes optional args lower_bound and upper_bound;
- if the number specified on the command line is out of
- range, it will raise a FlagError.
-
-DEFINE_integer: takes an input and interprets it as an integer. Takes
- optional args lower_bound and upper_bound as for floats.
-
-DEFINE_enum: takes a list of strings which represents legal values. If
- the command-line value is not in this list, raise a flag
- error. Otherwise, assign to FLAGS.flag as a string.
-
-DEFINE_list: Takes a comma-separated list of strings on the commandline.
- Stores them in a python list object.
-
-DEFINE_spaceseplist: Takes a space-separated list of strings on the
- commandline. Stores them in a python list object.
- Example: --myspacesepflag "foo bar baz"
-
-DEFINE_multistring: The same as DEFINE_string, except the flag can be
- specified more than once on the commandline. The
- result is a python list object (list of strings),
- even if the flag is only on the command line once.
-
-DEFINE_multi_int: The same as DEFINE_integer, except the flag can be
- specified more than once on the commandline. The
- result is a python list object (list of ints), even if
- the flag is only on the command line once.
-
-
-SPECIAL FLAGS: There are a few flags that have special meaning:
- --help prints a list of all the flags in a human-readable fashion
- --helpshort prints a list of all key flags (see below).
- --helpxml prints a list of all flags, in XML format. DO NOT parse
- the output of --help and --helpshort. Instead, parse
- the output of --helpxml. For more info, see
- "OUTPUT FOR --helpxml" below.
- --flagfile=foo read flags from file foo.
- --undefok=f1,f2 ignore unrecognized option errors for f1,f2.
- For boolean flags, you should use --undefok=boolflag, and
- --boolflag and --noboolflag will be accepted. Do not use
- --undefok=noboolflag.
- -- as in getopt(), terminates flag-processing
-
-
-NOTE ON --flagfile:
-
-Flags may be loaded from text files in addition to being specified on
-the commandline.
-
-Any flags you don't feel like typing, throw them in a file, one flag per
-line, for instance:
- --myflag=myvalue
- --nomyboolean_flag
-You then specify your file with the special flag '--flagfile=somefile'.
-You CAN recursively nest flagfile= tokens OR use multiple files on the
-command line. Lines beginning with a single hash '#' or a double slash
-'//' are comments in your flagfile.
-
-Any flagfile=<file> will be interpreted as having a relative path from
-the current working directory rather than from the place the file was
-included from:
- myPythonScript.py --flagfile=config/somefile.cfg
-
-If somefile.cfg includes further --flagfile= directives, these will be
-referenced relative to the original CWD, not from the directory the
-including flagfile was found in!
-
-The caveat applies to people who are including a series of nested files
-in a different dir than they are executing out of. Relative path names
-are always from CWD, not from the directory of the parent include
-flagfile. We do now support '~' expanded directory names.
-
-Absolute path names ALWAYS work!
-
-
-EXAMPLE USAGE:
-
- import gflags
- FLAGS = gflags.FLAGS
-
- # Flag names are globally defined! So in general, we need to be
- # careful to pick names that are unlikely to be used by other libraries.
- # If there is a conflict, we'll get an error at import time.
- gflags.DEFINE_string('name', 'Mr. President', 'your name')
- gflags.DEFINE_integer('age', None, 'your age in years', lower_bound=0)
- gflags.DEFINE_boolean('debug', False, 'produces debugging output')
- gflags.DEFINE_enum('gender', 'male', ['male', 'female'], 'your gender')
-
- def main(argv):
- try:
- argv = FLAGS(argv) # parse flags
- except gflags.FlagsError, e:
- print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)
- sys.exit(1)
- if FLAGS.debug: print 'non-flag arguments:', argv
- print 'Happy Birthday', FLAGS.name
- if FLAGS.age is not None:
- print 'You are a %s, who is %d years old' % (FLAGS.gender, FLAGS.age)
-
- if __name__ == '__main__':
- main(sys.argv)
-
-
-KEY FLAGS:
-
-As we already explained, each module gains access to all flags defined
-by all the other modules it transitively imports. In the case of
-non-trivial scripts, this means a lot of flags ... For documentation
-purposes, it is good to identify the flags that are key (i.e., really
-important) to a module. Clearly, the concept of "key flag" is a
-subjective one. When trying to determine whether a flag is key to a
-module or not, assume that you are trying to explain your module to a
-potential user: which flags would you really like to mention first?
-
-We'll describe shortly how to declare which flags are key to a module.
-For the moment, assume we know the set of key flags for each module.
-Then, if you use the app.py module, you can use the --helpshort flag to
-print only the help for the flags that are key to the main module, in a
-human-readable format.
-
-NOTE: If you need to parse the flag help, do NOT use the output of
---help / --helpshort. That output is meant for human consumption, and
-may be changed in the future. Instead, use --helpxml; flags that are
-key for the main module are marked there with a <key>yes</key> element.
-
-The set of key flags for a module M is composed of:
-
-1. Flags defined by module M by calling a DEFINE_* function.
-
-2. Flags that module M explictly declares as key by using the function
-
- DECLARE_key_flag(<flag_name>)
-
-3. Key flags of other modules that M specifies by using the function
-
- ADOPT_module_key_flags(<other_module>)
-
- This is a "bulk" declaration of key flags: each flag that is key for
- <other_module> becomes key for the current module too.
-
-Notice that if you do not use the functions described at points 2 and 3
-above, then --helpshort prints information only about the flags defined
-by the main module of our script. In many cases, this behavior is good
-enough. But if you move part of the main module code (together with the
-related flags) into a different module, then it is nice to use
-DECLARE_key_flag / ADOPT_module_key_flags and make sure --helpshort
-lists all relevant flags (otherwise, your code refactoring may confuse
-your users).
-
-Note: each of DECLARE_key_flag / ADOPT_module_key_flags has its own
-pluses and minuses: DECLARE_key_flag is more targeted and may lead a
-more focused --helpshort documentation. ADOPT_module_key_flags is good
-for cases when an entire module is considered key to the current script.
-Also, it does not require updates to client scripts when a new flag is
-added to the module.
-
-
-EXAMPLE USAGE 2 (WITH KEY FLAGS):
-
-Consider an application that contains the following three files (two
-auxiliary modules and a main module):
-
-File libfoo.py:
-
- import gflags
-
- gflags.DEFINE_integer('num_replicas', 3, 'Number of replicas to start')
- gflags.DEFINE_boolean('rpc2', True, 'Turn on the usage of RPC2.')
-
- ... some code ...
-
-File libbar.py:
-
- import gflags
-
- gflags.DEFINE_string('bar_gfs_path', '/gfs/path',
- 'Path to the GFS files for libbar.')
- gflags.DEFINE_string('email_for_bar_errors', 'bar-team@google.com',
- 'Email address for bug reports about module libbar.')
- gflags.DEFINE_boolean('bar_risky_hack', False,
- 'Turn on an experimental and buggy optimization.')
-
- ... some code ...
-
-File myscript.py:
-
- import gflags
- import libfoo
- import libbar
-
- gflags.DEFINE_integer('num_iterations', 0, 'Number of iterations.')
-
- # Declare that all flags that are key for libfoo are
- # key for this module too.
- gflags.ADOPT_module_key_flags(libfoo)
-
- # Declare that the flag --bar_gfs_path (defined in libbar) is key
- # for this module.
- gflags.DECLARE_key_flag('bar_gfs_path')
-
- ... some code ...
-
-When myscript is invoked with the flag --helpshort, the resulted help
-message lists information about all the key flags for myscript:
---num_iterations, --num_replicas, --rpc2, and --bar_gfs_path (in
-addition to the special flags --help and --helpshort).
-
-Of course, myscript uses all the flags declared by it (in this case,
-just --num_replicas) or by any of the modules it transitively imports
-(e.g., the modules libfoo, libbar). E.g., it can access the value of
-FLAGS.bar_risky_hack, even if --bar_risky_hack is not declared as a key
-flag for myscript.
-
-
-OUTPUT FOR --helpxml:
-
-The --helpxml flag generates output with the following structure:
-
-<?xml version="1.0"?>
-<AllFlags>
- <program>PROGRAM_BASENAME</program>
- <usage>MAIN_MODULE_DOCSTRING</usage>
- (<flag>
- [<key>yes</key>]
- <file>DECLARING_MODULE</file>
- <name>FLAG_NAME</name>
- <meaning>FLAG_HELP_MESSAGE</meaning>
- <default>DEFAULT_FLAG_VALUE</default>
- <current>CURRENT_FLAG_VALUE</current>
- <type>FLAG_TYPE</type>
- [OPTIONAL_ELEMENTS]
- </flag>)*
-</AllFlags>
-
-Notes:
-
-1. The output is intentionally similar to the output generated by the
-C++ command-line flag library. The few differences are due to the
-Python flags that do not have a C++ equivalent (at least not yet),
-e.g., DEFINE_list.
-
-2. New XML elements may be added in the future.
-
-3. DEFAULT_FLAG_VALUE is in serialized form, i.e., the string you can
-pass for this flag on the command-line. E.g., for a flag defined
-using DEFINE_list, this field may be foo,bar, not ['foo', 'bar'].
-
-4. CURRENT_FLAG_VALUE is produced using str(). This means that the
-string 'false' will be represented in the same way as the boolean
-False. Using repr() would have removed this ambiguity and simplified
-parsing, but would have broken the compatibility with the C++
-command-line flags.
-
-5. OPTIONAL_ELEMENTS describe elements relevant for certain kinds of
-flags: lower_bound, upper_bound (for flags that specify bounds),
-enum_value (for enum flags), list_separator (for flags that consist of
-a list of values, separated by a special token).
-
-6. We do not provide any example here: please use --helpxml instead.
-"""
-
-import cgi
-import getopt
-import os
-import re
-import string
-import sys
-
-# Are we running at least python 2.2?
-try:
- if tuple(sys.version_info[:3]) < (2,2,0):
- raise NotImplementedError("requires python 2.2.0 or later")
-except AttributeError: # a very old python, that lacks sys.version_info
- raise NotImplementedError("requires python 2.2.0 or later")
-
-# If we're not running at least python 2.2.1, define True, False, and bool.
-# Thanks, Guido, for the code.
-try:
- True, False, bool
-except NameError:
- False = 0
- True = 1
- def bool(x):
- if x:
- return True
- else:
- return False
-
-# Are we running under pychecker?
-_RUNNING_PYCHECKER = 'pychecker.python' in sys.modules
-
-
-def _GetCallingModule():
- """Returns the name of the module that's calling into this module.
-
- We generally use this function to get the name of the module calling a
- DEFINE_foo... function.
- """
- # Walk down the stack to find the first globals dict that's not ours.
- for depth in range(1, sys.getrecursionlimit()):
- if not sys._getframe(depth).f_globals is globals():
- globals_for_frame = sys._getframe(depth).f_globals
- module_name = _GetModuleObjectAndName(globals_for_frame)[1]
- if module_name is not None:
- return module_name
- raise AssertionError("No module was found")
-
-
-def _GetThisModuleObjectAndName():
- """Returns: (module object, module name) for this module."""
- return _GetModuleObjectAndName(globals())
-
-
-# module exceptions:
-class FlagsError(Exception):
- """The base class for all flags errors."""
- pass
-
-
-class DuplicateFlag(FlagsError):
- """Raised if there is a flag naming conflict."""
- pass
-
-
-class DuplicateFlagCannotPropagateNoneToSwig(DuplicateFlag):
- """Special case of DuplicateFlag -- SWIG flag value can't be set to None.
-
- This can be raised when a duplicate flag is created. Even if allow_override is
- True, we still abort if the new value is None, because it's currently
- impossible to pass None default value back to SWIG. See FlagValues.SetDefault
- for details.
- """
- pass
-
-
-# A DuplicateFlagError conveys more information than a
-# DuplicateFlag. Since there are external modules that create
-# DuplicateFlags, the interface to DuplicateFlag shouldn't change.
-class DuplicateFlagError(DuplicateFlag):
-
- def __init__(self, flagname, flag_values):
- self.flagname = flagname
- message = "The flag '%s' is defined twice." % self.flagname
- flags_by_module = flag_values.FlagsByModuleDict()
- for module in flags_by_module:
- for flag in flags_by_module[module]:
- if flag.name == flagname or flag.short_name == flagname:
- message = message + " First from " + module + ","
- break
- message = message + " Second from " + _GetCallingModule()
- DuplicateFlag.__init__(self, message)
-
-
-class IllegalFlagValue(FlagsError):
- """The flag command line argument is illegal."""
- pass
-
-
-class UnrecognizedFlag(FlagsError):
- """Raised if a flag is unrecognized."""
- pass
-
-
-# An UnrecognizedFlagError conveys more information than an
-# UnrecognizedFlag. Since there are external modules that create
-# DuplicateFlags, the interface to DuplicateFlag shouldn't change.
-class UnrecognizedFlagError(UnrecognizedFlag):
- def __init__(self, flagname):
- self.flagname = flagname
- UnrecognizedFlag.__init__(
- self, "Unknown command line flag '%s'" % flagname)
-
-
-# Global variable used by expvar
-_exported_flags = {}
-_help_width = 80 # width of help output
-
-
-def GetHelpWidth():
- """Returns: an integer, the width of help lines that is used in TextWrap."""
- return _help_width
-
-
-def CutCommonSpacePrefix(text):
- """Removes a common space prefix from the lines of a multiline text.
-
- If the first line does not start with a space, it is left as it is and
- only in the remaining lines a common space prefix is being searched
- for. That means the first line will stay untouched. This is especially
- useful to turn doc strings into help texts. This is because some
- people prefer to have the doc comment start already after the
- apostrophy and then align the following lines while others have the
- apostrophies on a seperately line.
-
- The function also drops trailing empty lines and ignores empty lines
- following the initial content line while calculating the initial
- common whitespace.
-
- Args:
- text: text to work on
-
- Returns:
- the resulting text
- """
- text_lines = text.splitlines()
- # Drop trailing empty lines
- while text_lines and not text_lines[-1]:
- text_lines = text_lines[:-1]
- if text_lines:
- # We got some content, is the first line starting with a space?
- if text_lines[0] and text_lines[0][0].isspace():
- text_first_line = []
- else:
- text_first_line = [text_lines.pop(0)]
- # Calculate length of common leading whitesppace (only over content lines)
- common_prefix = os.path.commonprefix([line for line in text_lines if line])
- space_prefix_len = len(common_prefix) - len(common_prefix.lstrip())
- # If we have a common space prefix, drop it from all lines
- if space_prefix_len:
- for index in xrange(len(text_lines)):
- if text_lines[index]:
- text_lines[index] = text_lines[index][space_prefix_len:]
- return '\n'.join(text_first_line + text_lines)
- return ''
-
-
-def TextWrap(text, length=None, indent='', firstline_indent=None, tabs=' '):
- """Wraps a given text to a maximum line length and returns it.
-
- We turn lines that only contain whitespaces into empty lines. We keep
- new lines and tabs (e.g., we do not treat tabs as spaces).
-
- Args:
- text: text to wrap
- length: maximum length of a line, includes indentation
- if this is None then use GetHelpWidth()
- indent: indent for all but first line
- firstline_indent: indent for first line; if None, fall back to indent
- tabs: replacement for tabs
-
- Returns:
- wrapped text
-
- Raises:
- FlagsError: if indent not shorter than length
- FlagsError: if firstline_indent not shorter than length
- """
- # Get defaults where callee used None
- if length is None:
- length = GetHelpWidth()
- if indent is None:
- indent = ''
- if len(indent) >= length:
- raise FlagsError('Indent must be shorter than length')
- # In line we will be holding the current line which is to be started
- # with indent (or firstline_indent if available) and then appended
- # with words.
- if firstline_indent is None:
- firstline_indent = ''
- line = indent
- else:
- line = firstline_indent
- if len(firstline_indent) >= length:
- raise FlagsError('First iline indent must be shorter than length')
-
- # If the callee does not care about tabs we simply convert them to
- # spaces If callee wanted tabs to be single space then we do that
- # already here.
- if not tabs or tabs == ' ':
- text = text.replace('\t', ' ')
- else:
- tabs_are_whitespace = not tabs.strip()
-
- line_regex = re.compile('([ ]*)(\t*)([^ \t]+)', re.MULTILINE)
-
- # Split the text into lines and the lines with the regex above. The
- # resulting lines are collected in result[]. For each split we get the
- # spaces, the tabs and the next non white space (e.g. next word).
- result = []
- for text_line in text.splitlines():
- # Store result length so we can find out whether processing the next
- # line gave any new content
- old_result_len = len(result)
- # Process next line with line_regex. For optimization we do an rstrip().
- # - process tabs (changes either line or word, see below)
- # - process word (first try to squeeze on line, then wrap or force wrap)
- # Spaces found on the line are ignored, they get added while wrapping as
- # needed.
- for spaces, current_tabs, word in line_regex.findall(text_line.rstrip()):
- # If tabs weren't converted to spaces, handle them now
- if current_tabs:
- # If the last thing we added was a space anyway then drop
- # it. But let's not get rid of the indentation.
- if (((result and line != indent) or
- (not result and line != firstline_indent)) and line[-1] == ' '):
- line = line[:-1]
- # Add the tabs, if that means adding whitespace, just add it at
- # the line, the rstrip() code while shorten the line down if
- # necessary
- if tabs_are_whitespace:
- line += tabs * len(current_tabs)
- else:
- # if not all tab replacement is whitespace we prepend it to the word
- word = tabs * len(current_tabs) + word
- # Handle the case where word cannot be squeezed onto current last line
- if len(line) + len(word) > length and len(indent) + len(word) <= length:
- result.append(line.rstrip())
- line = indent + word
- word = ''
- # No space left on line or can we append a space?
- if len(line) + 1 >= length:
- result.append(line.rstrip())
- line = indent
- else:
- line += ' '
- # Add word and shorten it up to allowed line length. Restart next
- # line with indent and repeat, or add a space if we're done (word
- # finished) This deals with words that caanot fit on one line
- # (e.g. indent + word longer than allowed line length).
- while len(line) + len(word) >= length:
- line += word
- result.append(line[:length])
- word = line[length:]
- line = indent
- # Default case, simply append the word and a space
- if word:
- line += word + ' '
- # End of input line. If we have content we finish the line. If the
- # current line is just the indent but we had content in during this
- # original line then we need to add an emoty line.
- if (result and line != indent) or (not result and line != firstline_indent):
- result.append(line.rstrip())
- elif len(result) == old_result_len:
- result.append('')
- line = indent
-
- return '\n'.join(result)
-
-
-def DocToHelp(doc):
- """Takes a __doc__ string and reformats it as help."""
-
- # Get rid of starting and ending white space. Using lstrip() or even
- # strip() could drop more than maximum of first line and right space
- # of last line.
- doc = doc.strip()
-
- # Get rid of all empty lines
- whitespace_only_line = re.compile('^[ \t]+$', re.M)
- doc = whitespace_only_line.sub('', doc)
-
- # Cut out common space at line beginnings
- doc = CutCommonSpacePrefix(doc)
-
- # Just like this module's comment, comments tend to be aligned somehow.
- # In other words they all start with the same amount of white space
- # 1) keep double new lines
- # 2) keep ws after new lines if not empty line
- # 3) all other new lines shall be changed to a space
- # Solution: Match new lines between non white space and replace with space.
- doc = re.sub('(?<=\S)\n(?=\S)', ' ', doc, re.M)
-
- return doc
-
-
-def _GetModuleObjectAndName(globals_dict):
- """Returns the module that defines a global environment, and its name.
-
- Args:
- globals_dict: A dictionary that should correspond to an environment
- providing the values of the globals.
-
- Returns:
- A pair consisting of (1) module object and (2) module name (a
- string). Returns (None, None) if the module could not be
- identified.
- """
- # The use of .items() (instead of .iteritems()) is NOT a mistake: if
- # a parallel thread imports a module while we iterate over
- # .iteritems() (not nice, but possible), we get a RuntimeError ...
- # Hence, we use the slightly slower but safer .items().
- for name, module in sys.modules.items():
- if getattr(module, '__dict__', None) is globals_dict:
- if name == '__main__':
- # Pick a more informative name for the main module.
- name = sys.argv[0]
- return (module, name)
- return (None, None)
-
-
-def _GetMainModule():
- """Returns the name of the module from which execution started."""
- for depth in range(1, sys.getrecursionlimit()):
- try:
- globals_of_main = sys._getframe(depth).f_globals
- except ValueError:
- return _GetModuleObjectAndName(globals_of_main)[1]
- raise AssertionError("No module was found")
-
-
-class FlagValues:
- """Registry of 'Flag' objects.
-
- A 'FlagValues' can then scan command line arguments, passing flag
- arguments through to the 'Flag' objects that it owns. It also
- provides easy access to the flag values. Typically only one
- 'FlagValues' object is needed by an application: gflags.FLAGS
-
- This class is heavily overloaded:
-
- 'Flag' objects are registered via __setitem__:
- FLAGS['longname'] = x # register a new flag
-
- The .value attribute of the registered 'Flag' objects can be accessed
- as attributes of this 'FlagValues' object, through __getattr__. Both
- the long and short name of the original 'Flag' objects can be used to
- access its value:
- FLAGS.longname # parsed flag value
- FLAGS.x # parsed flag value (short name)
-
- Command line arguments are scanned and passed to the registered 'Flag'
- objects through the __call__ method. Unparsed arguments, including
- argv[0] (e.g. the program name) are returned.
- argv = FLAGS(sys.argv) # scan command line arguments
-
- The original registered Flag objects can be retrieved through the use
- of the dictionary-like operator, __getitem__:
- x = FLAGS['longname'] # access the registered Flag object
-
- The str() operator of a 'FlagValues' object provides help for all of
- the registered 'Flag' objects.
- """
-
- def __init__(self):
- # Since everything in this class is so heavily overloaded, the only
- # way of defining and using fields is to access __dict__ directly.
-
- # Dictionary: flag name (string) -> Flag object.
- self.__dict__['__flags'] = {}
- # Dictionary: module name (string) -> list of Flag objects that are defined
- # by that module.
- self.__dict__['__flags_by_module'] = {}
- # Dictionary: module name (string) -> list of Flag objects that are
- # key for that module.
- self.__dict__['__key_flags_by_module'] = {}
-
- # Set if we should use new style gnu_getopt rather than getopt when parsing
- # the args. Only possible with Python 2.3+
- self.UseGnuGetOpt(False)
-
- def UseGnuGetOpt(self, use_gnu_getopt=True):
- self.__dict__['__use_gnu_getopt'] = use_gnu_getopt
-
- def IsGnuGetOpt(self):
- return self.__dict__['__use_gnu_getopt']
-
- def FlagDict(self):
- return self.__dict__['__flags']
-
- def FlagsByModuleDict(self):
- """Returns the dictionary of module_name -> list of defined flags.
-
- Returns:
- A dictionary. Its keys are module names (strings). Its values
- are lists of Flag objects.
- """
- return self.__dict__['__flags_by_module']
-
- def KeyFlagsByModuleDict(self):
- """Returns the dictionary of module_name -> list of key flags.
-
- Returns:
- A dictionary. Its keys are module names (strings). Its values
- are lists of Flag objects.
- """
- return self.__dict__['__key_flags_by_module']
-
- def _RegisterFlagByModule(self, module_name, flag):
- """Records the module that defines a specific flag.
-
- We keep track of which flag is defined by which module so that we
- can later sort the flags by module.
-
- Args:
- module_name: A string, the name of a Python module.
- flag: A Flag object, a flag that is key to the module.
- """
- flags_by_module = self.FlagsByModuleDict()
- flags_by_module.setdefault(module_name, []).append(flag)
-
- def _RegisterKeyFlagForModule(self, module_name, flag):
- """Specifies that a flag is a key flag for a module.
-
- Args:
- module_name: A string, the name of a Python module.
- flag: A Flag object, a flag that is key to the module.
- """
- key_flags_by_module = self.KeyFlagsByModuleDict()
- # The list of key flags for the module named module_name.
- key_flags = key_flags_by_module.setdefault(module_name, [])
- # Add flag, but avoid duplicates.
- if flag not in key_flags:
- key_flags.append(flag)
-
- def _GetFlagsDefinedByModule(self, module):
- """Returns the list of flags defined by a module.
-
- Args:
- module: A module object or a module name (a string).
-
- Returns:
- A new list of Flag objects. Caller may update this list as he
- wishes: none of those changes will affect the internals of this
- FlagValue object.
- """
- if not isinstance(module, str):
- module = module.__name__
-
- return list(self.FlagsByModuleDict().get(module, []))
-
- def _GetKeyFlagsForModule(self, module):
- """Returns the list of key flags for a module.
-
- Args:
- module: A module object or a module name (a string)
-
- Returns:
- A new list of Flag objects. Caller may update this list as he
- wishes: none of those changes will affect the internals of this
- FlagValue object.
- """
- if not isinstance(module, str):
- module = module.__name__
-
- # Any flag is a key flag for the module that defined it. NOTE:
- # key_flags is a fresh list: we can update it without affecting the
- # internals of this FlagValues object.
- key_flags = self._GetFlagsDefinedByModule(module)
-
- # Take into account flags explicitly declared as key for a module.
- for flag in self.KeyFlagsByModuleDict().get(module, []):
- if flag not in key_flags:
- key_flags.append(flag)
- return key_flags
-
- def AppendFlagValues(self, flag_values):
- """Appends flags registered in another FlagValues instance.
-
- Args:
- flag_values: registry to copy from
- """
- for flag_name, flag in flag_values.FlagDict().iteritems():
- # Each flags with shortname appears here twice (once under its
- # normal name, and again with its short name). To prevent
- # problems (DuplicateFlagError) with double flag registration, we
- # perform a check to make sure that the entry we're looking at is
- # for its normal name.
- if flag_name == flag.name:
- self[flag_name] = flag
-
- def RemoveFlagValues(self, flag_values):
- """Remove flags that were previously appended from another FlagValues.
-
- Args:
- flag_values: registry containing flags to remove.
- """
- for flag_name in flag_values.FlagDict():
- self.__delattr__(flag_name)
-
- def __setitem__(self, name, flag):
- """Registers a new flag variable."""
- fl = self.FlagDict()
- if not isinstance(flag, Flag):
- raise IllegalFlagValue(flag)
- if not isinstance(name, type("")):
- raise FlagsError("Flag name must be a string")
- if len(name) == 0:
- raise FlagsError("Flag name cannot be empty")
- # If running under pychecker, duplicate keys are likely to be
- # defined. Disable check for duplicate keys when pycheck'ing.
- if (fl.has_key(name) and not flag.allow_override and
- not fl[name].allow_override and not _RUNNING_PYCHECKER):
- raise DuplicateFlagError(name, self)
- short_name = flag.short_name
- if short_name is not None:
- if (fl.has_key(short_name) and not flag.allow_override and
- not fl[short_name].allow_override and not _RUNNING_PYCHECKER):
- raise DuplicateFlagError(short_name, self)
- fl[short_name] = flag
- fl[name] = flag
- global _exported_flags
- _exported_flags[name] = flag
-
- def __getitem__(self, name):
- """Retrieves the Flag object for the flag --name."""
- return self.FlagDict()[name]
-
- def __getattr__(self, name):
- """Retrieves the 'value' attribute of the flag --name."""
- fl = self.FlagDict()
- if not fl.has_key(name):
- raise AttributeError(name)
- return fl[name].value
-
- def __setattr__(self, name, value):
- """Sets the 'value' attribute of the flag --name."""
- fl = self.FlagDict()
- fl[name].value = value
- return value
-
- def _FlagIsRegistered(self, flag_obj):
- """Checks whether a Flag object is registered under some name.
-
- Note: this is non trivial: in addition to its normal name, a flag
- may have a short name too. In self.FlagDict(), both the normal and
- the short name are mapped to the same flag object. E.g., calling
- only "del FLAGS.short_name" is not unregistering the corresponding
- Flag object (it is still registered under the longer name).
-
- Args:
- flag_obj: A Flag object.
-
- Returns:
- A boolean: True iff flag_obj is registered under some name.
- """
- flag_dict = self.FlagDict()
- # Check whether flag_obj is registered under its long name.
- name = flag_obj.name
- if flag_dict.get(name, None) == flag_obj:
- return True
- # Check whether flag_obj is registered under its short name.
- short_name = flag_obj.short_name
- if (short_name is not None and
- flag_dict.get(short_name, None) == flag_obj):
- return True
- # The flag cannot be registered under any other name, so we do not
- # need to do a full search through the values of self.FlagDict().
- return False
-
- def __delattr__(self, flag_name):
- """Deletes a previously-defined flag from a flag object.
-
- This method makes sure we can delete a flag by using
-
- del flag_values_object.<flag_name>
-
- E.g.,
-
- flags.DEFINE_integer('foo', 1, 'Integer flag.')
- del flags.FLAGS.foo
-
- Args:
- flag_name: A string, the name of the flag to be deleted.
-
- Raises:
- AttributeError: When there is no registered flag named flag_name.
- """
- fl = self.FlagDict()
- if flag_name not in fl:
- raise AttributeError(flag_name)
-
- flag_obj = fl[flag_name]
- del fl[flag_name]
-
- if not self._FlagIsRegistered(flag_obj):
- # If the Flag object indicated by flag_name is no longer
- # registered (please see the docstring of _FlagIsRegistered), then
- # we delete the occurences of the flag object in all our internal
- # dictionaries.
- self.__RemoveFlagFromDictByModule(self.FlagsByModuleDict(), flag_obj)
- self.__RemoveFlagFromDictByModule(self.KeyFlagsByModuleDict(), flag_obj)
-
- def __RemoveFlagFromDictByModule(self, flags_by_module_dict, flag_obj):
- """Removes a flag object from a module -> list of flags dictionary.
-
- Args:
- flags_by_module_dict: A dictionary that maps module names to lists of
- flags.
- flag_obj: A flag object.
- """
- for unused_module, flags_in_module in flags_by_module_dict.iteritems():
- # while (as opposed to if) takes care of multiple occurences of a
- # flag in the list for the same module.
- while flag_obj in flags_in_module:
- flags_in_module.remove(flag_obj)
-
- def SetDefault(self, name, value):
- """Changes the default value of the named flag object."""
- fl = self.FlagDict()
- if not fl.has_key(name):
- raise AttributeError(name)
- fl[name].SetDefault(value)
-
- def __contains__(self, name):
- """Returns True if name is a value (flag) in the dict."""
- return name in self.FlagDict()
-
- has_key = __contains__ # a synonym for __contains__()
-
- def __iter__(self):
- return self.FlagDict().iterkeys()
-
- def __call__(self, argv):
- """Parses flags from argv; stores parsed flags into this FlagValues object.
-
- All unparsed arguments are returned. Flags are parsed using the GNU
- Program Argument Syntax Conventions, using getopt:
-
- http://www.gnu.org/software/libc/manual/html_mono/libc.html#Getopt
-
- Args:
- argv: argument list. Can be of any type that may be converted to a list.
-
- Returns:
- The list of arguments not parsed as options, including argv[0]
-
- Raises:
- FlagsError: on any parsing error
- """
- # Support any sequence type that can be converted to a list
- argv = list(argv)
-
- shortopts = ""
- longopts = []
-
- fl = self.FlagDict()
-
- # This pre parses the argv list for --flagfile=<> options.
- argv = argv[:1] + self.ReadFlagsFromFiles(argv[1:], force_gnu=False)
-
- # Correct the argv to support the google style of passing boolean
- # parameters. Boolean parameters may be passed by using --mybool,
- # --nomybool, --mybool=(true|false|1|0). getopt does not support
- # having options that may or may not have a parameter. We replace
- # instances of the short form --mybool and --nomybool with their
- # full forms: --mybool=(true|false).
- original_argv = list(argv) # list() makes a copy
- shortest_matches = None
- for name, flag in fl.items():
- if not flag.boolean:
- continue
- if shortest_matches is None:
- # Determine the smallest allowable prefix for all flag names
- shortest_matches = self.ShortestUniquePrefixes(fl)
- no_name = 'no' + name
- prefix = shortest_matches[name]
- no_prefix = shortest_matches[no_name]
-
- # Replace all occurences of this boolean with extended forms
- for arg_idx in range(1, len(argv)):
- arg = argv[arg_idx]
- if arg.find('=') >= 0: continue
- if arg.startswith('--'+prefix) and ('--'+name).startswith(arg):
- argv[arg_idx] = ('--%s=true' % name)
- elif arg.startswith('--'+no_prefix) and ('--'+no_name).startswith(arg):
- argv[arg_idx] = ('--%s=false' % name)
-
- # Loop over all of the flags, building up the lists of short options
- # and long options that will be passed to getopt. Short options are
- # specified as a string of letters, each letter followed by a colon
- # if it takes an argument. Long options are stored in an array of
- # strings. Each string ends with an '=' if it takes an argument.
- for name, flag in fl.items():
- longopts.append(name + "=")
- if len(name) == 1: # one-letter option: allow short flag type also
- shortopts += name
- if not flag.boolean:
- shortopts += ":"
-
- longopts.append('undefok=')
- undefok_flags = []
-
- # In case --undefok is specified, loop to pick up unrecognized
- # options one by one.
- unrecognized_opts = []
- args = argv[1:]
- while True:
- try:
- if self.__dict__['__use_gnu_getopt']:
- optlist, unparsed_args = getopt.gnu_getopt(args, shortopts, longopts)
- else:
- optlist, unparsed_args = getopt.getopt(args, shortopts, longopts)
- break
- except getopt.GetoptError, e:
- if not e.opt or e.opt in fl:
- # Not an unrecognized option, reraise the exception as a FlagsError
- raise FlagsError(e)
- # Handle an unrecognized option.
- unrecognized_opts.append(e.opt)
- # Remove offender from args and try again
- for arg_index in range(len(args)):
- if ((args[arg_index] == '--' + e.opt) or
- (args[arg_index] == '-' + e.opt) or
- args[arg_index].startswith('--' + e.opt + '=')):
- args = args[0:arg_index] + args[arg_index+1:]
- break
- else:
- # We should have found the option, so we don't expect to get
- # here. We could assert, but raising the original exception
- # might work better.
- raise FlagsError(e)
-
- for name, arg in optlist:
- if name == '--undefok':
- flag_names = arg.split(',')
- undefok_flags.extend(flag_names)
- # For boolean flags, if --undefok=boolflag is specified, then we should
- # also accept --noboolflag, in addition to --boolflag.
- # Since we don't know the type of the undefok'd flag, this will affect
- # non-boolean flags as well.
- # NOTE: You shouldn't use --undefok=noboolflag, because then we will
- # accept --nonoboolflag here. We are choosing not to do the conversion
- # from noboolflag -> boolflag because of the ambiguity that flag names
- # can start with 'no'.
- undefok_flags.extend('no' + name for name in flag_names)
- continue
- if name.startswith('--'):
- # long option
- name = name[2:]
- short_option = 0
- else:
- # short option
- name = name[1:]
- short_option = 1
- if fl.has_key(name):
- flag = fl[name]
- if flag.boolean and short_option: arg = 1
- flag.Parse(arg)
-
- # If there were unrecognized options, raise an exception unless
- # the options were named via --undefok.
- for opt in unrecognized_opts:
- if opt not in undefok_flags:
- raise UnrecognizedFlagError(opt)
-
- if unparsed_args:
- if self.__dict__['__use_gnu_getopt']:
- # if using gnu_getopt just return the program name + remainder of argv.
- return argv[:1] + unparsed_args
- else:
- # unparsed_args becomes the first non-flag detected by getopt to
- # the end of argv. Because argv may have been modified above,
- # return original_argv for this region.
- return argv[:1] + original_argv[-len(unparsed_args):]
- else:
- return argv[:1]
-
- def Reset(self):
- """Resets the values to the point before FLAGS(argv) was called."""
- for f in self.FlagDict().values():
- f.Unparse()
-
- def RegisteredFlags(self):
- """Returns: a list of the names and short names of all registered flags."""
- return self.FlagDict().keys()
-
- def FlagValuesDict(self):
- """Returns: a dictionary that maps flag names to flag values."""
- flag_values = {}
-
- for flag_name in self.RegisteredFlags():
- flag = self.FlagDict()[flag_name]
- flag_values[flag_name] = flag.value
-
- return flag_values
-
- def __str__(self):
- """Generates a help string for all known flags."""
- return self.GetHelp()
-
- def GetHelp(self, prefix=''):
- """Generates a help string for all known flags."""
- helplist = []
-
- flags_by_module = self.FlagsByModuleDict()
- if flags_by_module:
-
- modules = flags_by_module.keys()
- modules.sort()
-
- # Print the help for the main module first, if possible.
- main_module = _GetMainModule()
- if main_module in modules:
- modules.remove(main_module)
- modules = [main_module] + modules
-
- for module in modules:
- self.__RenderOurModuleFlags(module, helplist)
-
- self.__RenderModuleFlags('gflags',
- _SPECIAL_FLAGS.FlagDict().values(),
- helplist)
-
- else:
- # Just print one long list of flags.
- self.__RenderFlagList(
- self.FlagDict().values() + _SPECIAL_FLAGS.FlagDict().values(),
- helplist, prefix)
-
- return '\n'.join(helplist)
-
- def __RenderModuleFlags(self, module, flags, output_lines, prefix=""):
- """Generates a help string for a given module."""
- if not isinstance(module, str):
- module = module.__name__
- output_lines.append('\n%s%s:' % (prefix, module))
- self.__RenderFlagList(flags, output_lines, prefix + " ")
-
- def __RenderOurModuleFlags(self, module, output_lines, prefix=""):
- """Generates a help string for a given module."""
- flags = self._GetFlagsDefinedByModule(module)
- if flags:
- self.__RenderModuleFlags(module, flags, output_lines, prefix)
-
- def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=""):
- """Generates a help string for the key flags of a given module.
-
- Args:
- module: A module object or a module name (a string).
- output_lines: A list of strings. The generated help message
- lines will be appended to this list.
- prefix: A string that is prepended to each generated help line.
- """
- key_flags = self._GetKeyFlagsForModule(module)
- if key_flags:
- self.__RenderModuleFlags(module, key_flags, output_lines, prefix)
-
- def ModuleHelp(self, module):
- """Describe the key flags of a module.
-
- Args:
- module: A module object or a module name (a string).
-
- Returns:
- string describing the key flags of a module.
- """
- helplist = []
- self.__RenderOurModuleKeyFlags(module, helplist)
- return '\n'.join(helplist)
-
- def MainModuleHelp(self):
- """Describe the key flags of the main module.
-
- Returns:
- string describing the key flags of a module.
- """
- return self.ModuleHelp(_GetMainModule())
-
- def __RenderFlagList(self, flaglist, output_lines, prefix=" "):
- fl = self.FlagDict()
- special_fl = _SPECIAL_FLAGS.FlagDict()
- flaglist = [(flag.name, flag) for flag in flaglist]
- flaglist.sort()
- flagset = {}
- for (name, flag) in flaglist:
- # It's possible this flag got deleted or overridden since being
- # registered in the per-module flaglist. Check now against the
- # canonical source of current flag information, the FlagDict.
- if fl.get(name, None) != flag and special_fl.get(name, None) != flag:
- # a different flag is using this name now
- continue
- # only print help once
- if flagset.has_key(flag): continue
- flagset[flag] = 1
- flaghelp = ""
- if flag.short_name: flaghelp += "-%s," % flag.short_name
- if flag.boolean:
- flaghelp += "--[no]%s" % flag.name + ":"
- else:
- flaghelp += "--%s" % flag.name + ":"
- flaghelp += " "
- if flag.help:
- flaghelp += flag.help
- flaghelp = TextWrap(flaghelp, indent=prefix+" ",
- firstline_indent=prefix)
- if flag.default_as_str:
- flaghelp += "\n"
- flaghelp += TextWrap("(default: %s)" % flag.default_as_str,
- indent=prefix+" ")
- if flag.parser.syntactic_help:
- flaghelp += "\n"
- flaghelp += TextWrap("(%s)" % flag.parser.syntactic_help,
- indent=prefix+" ")
- output_lines.append(flaghelp)
-
- def get(self, name, default):
- """Returns the value of a flag (if not None) or a default value.
-
- Args:
- name: A string, the name of a flag.
- default: Default value to use if the flag value is None.
- """
-
- value = self.__getattr__(name)
- if value is not None: # Can't do if not value, b/c value might be '0' or ""
- return value
- else:
- return default
-
- def ShortestUniquePrefixes(self, fl):
- """Returns: dictionary; maps flag names to their shortest unique prefix."""
- # Sort the list of flag names
- sorted_flags = []
- for name, flag in fl.items():
- sorted_flags.append(name)
- if flag.boolean:
- sorted_flags.append('no%s' % name)
- sorted_flags.sort()
-
- # For each name in the sorted list, determine the shortest unique
- # prefix by comparing itself to the next name and to the previous
- # name (the latter check uses cached info from the previous loop).
- shortest_matches = {}
- prev_idx = 0
- for flag_idx in range(len(sorted_flags)):
- curr = sorted_flags[flag_idx]
- if flag_idx == (len(sorted_flags) - 1):
- next = None
- else:
- next = sorted_flags[flag_idx+1]
- next_len = len(next)
- for curr_idx in range(len(curr)):
- if (next is None
- or curr_idx >= next_len
- or curr[curr_idx] != next[curr_idx]):
- # curr longer than next or no more chars in common
- shortest_matches[curr] = curr[:max(prev_idx, curr_idx) + 1]
- prev_idx = curr_idx
- break
- else:
- # curr shorter than (or equal to) next
- shortest_matches[curr] = curr
- prev_idx = curr_idx + 1 # next will need at least one more char
- return shortest_matches
-
- def __IsFlagFileDirective(self, flag_string):
- """Checks whether flag_string contain a --flagfile=<foo> directive."""
- if isinstance(flag_string, type("")):
- if flag_string.startswith('--flagfile='):
- return 1
- elif flag_string == '--flagfile':
- return 1
- elif flag_string.startswith('-flagfile='):
- return 1
- elif flag_string == '-flagfile':
- return 1
- else:
- return 0
- return 0
-
- def ExtractFilename(self, flagfile_str):
- """Returns filename from a flagfile_str of form -[-]flagfile=filename.
-
- The cases of --flagfile foo and -flagfile foo shouldn't be hitting
- this function, as they are dealt with in the level above this
- function.
- """
- if flagfile_str.startswith('--flagfile='):
- return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip())
- elif flagfile_str.startswith('-flagfile='):
- return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip())
- else:
- raise FlagsError('Hit illegal --flagfile type: %s' % flagfile_str)
-
- def __GetFlagFileLines(self, filename, parsed_file_list):
- """Returns the useful (!=comments, etc) lines from a file with flags.
-
- Args:
- filename: A string, the name of the flag file.
- parsed_file_list: A list of the names of the files we have
- already read. MUTATED BY THIS FUNCTION.
-
- Returns:
- List of strings. See the note below.
-
- NOTE(springer): This function checks for a nested --flagfile=<foo>
- tag and handles the lower file recursively. It returns a list of
- all the lines that _could_ contain command flags. This is
- EVERYTHING except whitespace lines and comments (lines starting
- with '#' or '//').
- """
- line_list = [] # All line from flagfile.
- flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags.
- try:
- file_obj = open(filename, 'r')
- except IOError, e_msg:
- print e_msg
- print 'ERROR:: Unable to open flagfile: %s' % (filename)
- return flag_line_list
-
- line_list = file_obj.readlines()
- file_obj.close()
- parsed_file_list.append(filename)
-
- # This is where we check each line in the file we just read.
- for line in line_list:
- if line.isspace():
- pass
- # Checks for comment (a line that starts with '#').
- elif line.startswith('#') or line.startswith('//'):
- pass
- # Checks for a nested "--flagfile=<bar>" flag in the current file.
- # If we find one, recursively parse down into that file.
- elif self.__IsFlagFileDirective(line):
- sub_filename = self.ExtractFilename(line)
- # We do a little safety check for reparsing a file we've already done.
- if not sub_filename in parsed_file_list:
- included_flags = self.__GetFlagFileLines(sub_filename,
- parsed_file_list)
- flag_line_list.extend(included_flags)
- else: # Case of hitting a circularly included file.
- print >>sys.stderr, ('Warning: Hit circular flagfile dependency: %s'
- % sub_filename)
- else:
- # Any line that's not a comment or a nested flagfile should get
- # copied into 2nd position. This leaves earlier arguements
- # further back in the list, thus giving them higher priority.
- flag_line_list.append(line.strip())
- return flag_line_list
-
- def ReadFlagsFromFiles(self, argv, force_gnu=True):
- """Processes command line args, but also allow args to be read from file.
- Args:
- argv: A list of strings, usually sys.argv[1:], which may contain one or
- more flagfile directives of the form --flagfile="./filename".
- Note that the name of the program (sys.argv[0]) should be omitted.
- force_gnu: If False, --flagfile parsing obeys normal flag semantics.
- If True, --flagfile parsing instead follows gnu_getopt semantics.
- *** WARNING *** force_gnu=False may become the future default!
-
- Returns:
-
- A new list which has the original list combined with what we read
- from any flagfile(s).
-
- References: Global gflags.FLAG class instance.
-
- This function should be called before the normal FLAGS(argv) call.
- This function scans the input list for a flag that looks like:
- --flagfile=<somefile>. Then it opens <somefile>, reads all valid key
- and value pairs and inserts them into the input list between the
- first item of the list and any subsequent items in the list.
-
- Note that your application's flags are still defined the usual way
- using gflags DEFINE_flag() type functions.
-
- Notes (assuming we're getting a commandline of some sort as our input):
- --> Flags from the command line argv _should_ always take precedence!
- --> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
- It will be processed after the parent flag file is done.
- --> For duplicate flags, first one we hit should "win".
- --> In a flagfile, a line beginning with # or // is a comment.
- --> Entirely blank lines _should_ be ignored.
- """
- parsed_file_list = []
- rest_of_args = argv
- new_argv = []
- while rest_of_args:
- current_arg = rest_of_args[0]
- rest_of_args = rest_of_args[1:]
- if self.__IsFlagFileDirective(current_arg):
- # This handles the case of -(-)flagfile foo. In this case the
- # next arg really is part of this one.
- if current_arg == '--flagfile' or current_arg == '-flagfile':
- if not rest_of_args:
- raise IllegalFlagValue('--flagfile with no argument')
- flag_filename = os.path.expanduser(rest_of_args[0])
- rest_of_args = rest_of_args[1:]
- else:
- # This handles the case of (-)-flagfile=foo.
- flag_filename = self.ExtractFilename(current_arg)
- new_argv[0:0] = self.__GetFlagFileLines(flag_filename, parsed_file_list)
- else:
- new_argv.append(current_arg)
- # Stop parsing after '--', like getopt and gnu_getopt.
- if current_arg == '--':
- break
- # Stop parsing after a non-flag, like getopt.
- if not current_arg.startswith('-'):
- if not force_gnu and not self.__dict__['__use_gnu_getopt']:
- break
-
- if rest_of_args:
- new_argv.extend(rest_of_args)
-
- return new_argv
-
- def FlagsIntoString(self):
- """Returns a string with the flags assignments from this FlagValues object.
-
- This function ignores flags whose value is None. Each flag
- assignment is separated by a newline.
-
- NOTE: MUST mirror the behavior of the C++ function
- CommandlineFlagsIntoString from google3/base/commandlineflags.cc.
- """
- s = ''
- for flag in self.FlagDict().values():
- if flag.value is not None:
- s += flag.Serialize() + '\n'
- return s
-
- def AppendFlagsIntoFile(self, filename):
- """Appends all flags assignments from this FlagInfo object to a file.
-
- Output will be in the format of a flagfile.
-
- NOTE: MUST mirror the behavior of the C++ version of
- AppendFlagsIntoFile from google3/base/commandlineflags.cc.
- """
- out_file = open(filename, 'a')
- out_file.write(self.FlagsIntoString())
- out_file.close()
-
- def WriteHelpInXMLFormat(self, outfile=None):
- """Outputs flag documentation in XML format.
-
- NOTE: We use element names that are consistent with those used by
- the C++ command-line flag library, from
- google3/base/commandlineflags_reporting.cc. We also use a few new
- elements (e.g., <key>), but we do not interfere / overlap with
- existing XML elements used by the C++ library. Please maintain this
- consistency.
-
- Args:
- outfile: File object we write to. Default None means sys.stdout.
- """
- outfile = outfile or sys.stdout
-
- outfile.write('<?xml version=\"1.0\"?>\n')
- outfile.write('<AllFlags>\n')
- indent = ' '
- _WriteSimpleXMLElement(outfile, 'program', os.path.basename(sys.argv[0]),
- indent)
-
- usage_doc = sys.modules['__main__'].__doc__
- if not usage_doc:
- usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0]
- else:
- usage_doc = usage_doc.replace('%s', sys.argv[0])
- _WriteSimpleXMLElement(outfile, 'usage', usage_doc, indent)
-
- # Get list of key flags for the main module.
- key_flags = self._GetKeyFlagsForModule(_GetMainModule())
-
- # Sort flags by declaring module name and next by flag name.
- flags_by_module = self.FlagsByModuleDict()
- all_module_names = list(flags_by_module.keys())
- all_module_names.sort()
- for module_name in all_module_names:
- flag_list = [(f.name, f) for f in flags_by_module[module_name]]
- flag_list.sort()
- for unused_flag_name, flag in flag_list:
- is_key = flag in key_flags
- flag.WriteInfoInXMLFormat(outfile, module_name,
- is_key=is_key, indent=indent)
-
- outfile.write('</AllFlags>\n')
- outfile.flush()
-# end of FlagValues definition
-
-
-# The global FlagValues instance
-FLAGS = FlagValues()
-
-
-def _MakeXMLSafe(s):
- """Escapes <, >, and & from s, and removes XML 1.0-illegal chars."""
- s = cgi.escape(s) # Escape <, >, and &
- # Remove characters that cannot appear in an XML 1.0 document
- # (http://www.w3.org/TR/REC-xml/#charsets).
- #
- # NOTE: if there are problems with current solution, one may move to
- # XML 1.1, which allows such chars, if they're entity-escaped (&#xHH;).
- s = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f]', '', s)
- return s
-
-
-def _WriteSimpleXMLElement(outfile, name, value, indent):
- """Writes a simple XML element.
-
- Args:
- outfile: File object we write the XML element to.
- name: A string, the name of XML element.
- value: A Python object, whose string representation will be used
- as the value of the XML element.
- indent: A string, prepended to each line of generated output.
- """
- value_str = str(value)
- if isinstance(value, bool):
- # Display boolean values as the C++ flag library does: no caps.
- value_str = value_str.lower()
- outfile.write('%s<%s>%s</%s>\n' %
- (indent, name, _MakeXMLSafe(value_str), name))
-
-
-class Flag:
- """Information about a command-line flag.
-
- 'Flag' objects define the following fields:
- .name - the name for this flag
- .default - the default value for this flag
- .default_as_str - default value as repr'd string, e.g., "'true'" (or None)
- .value - the most recent parsed value of this flag; set by Parse()
- .help - a help string or None if no help is available
- .short_name - the single letter alias for this flag (or None)
- .boolean - if 'true', this flag does not accept arguments
- .present - true if this flag was parsed from command line flags.
- .parser - an ArgumentParser object
- .serializer - an ArgumentSerializer object
- .allow_override - the flag may be redefined without raising an error
-
- The only public method of a 'Flag' object is Parse(), but it is
- typically only called by a 'FlagValues' object. The Parse() method is
- a thin wrapper around the 'ArgumentParser' Parse() method. The parsed
- value is saved in .value, and the .present attribute is updated. If
- this flag was already present, a FlagsError is raised.
-
- Parse() is also called during __init__ to parse the default value and
- initialize the .value attribute. This enables other python modules to
- safely use flags even if the __main__ module neglects to parse the
- command line arguments. The .present attribute is cleared after
- __init__ parsing. If the default value is set to None, then the
- __init__ parsing step is skipped and the .value attribute is
- initialized to None.
-
- Note: The default value is also presented to the user in the help
- string, so it is important that it be a legal value for this flag.
- """
-
- def __init__(self, parser, serializer, name, default, help_string,
- short_name=None, boolean=0, allow_override=0):
- self.name = name
-
- if not help_string:
- help_string = '(no help available)'
-
- self.help = help_string
- self.short_name = short_name
- self.boolean = boolean
- self.present = 0
- self.parser = parser
- self.serializer = serializer
- self.allow_override = allow_override
- self.value = None
-
- self.SetDefault(default)
-
- def __GetParsedValueAsString(self, value):
- if value is None:
- return None
- if self.serializer:
- return repr(self.serializer.Serialize(value))
- if self.boolean:
- if value:
- return repr('true')
- else:
- return repr('false')
- return repr(str(value))
-
- def Parse(self, argument):
- try:
- self.value = self.parser.Parse(argument)
- except ValueError, e: # recast ValueError as IllegalFlagValue
- raise IllegalFlagValue("flag --%s=%s: %s" % (self.name, argument, e))
- self.present += 1
-
- def Unparse(self):
- if self.default is None:
- self.value = None
- else:
- self.Parse(self.default)
- self.present = 0
-
- def Serialize(self):
- if self.value is None:
- return ''
- if self.boolean:
- if self.value:
- return "--%s" % self.name
- else:
- return "--no%s" % self.name
- else:
- if not self.serializer:
- raise FlagsError("Serializer not present for flag %s" % self.name)
- return "--%s=%s" % (self.name, self.serializer.Serialize(self.value))
-
- def SetDefault(self, value):
- """Changes the default value (and current value too) for this Flag."""
- # We can't allow a None override because it may end up not being
- # passed to C++ code when we're overriding C++ flags. So we
- # cowardly bail out until someone fixes the semantics of trying to
- # pass None to a C++ flag. See swig_flags.Init() for details on
- # this behavior.
- if value is None and self.allow_override:
- raise DuplicateFlagCannotPropagateNoneToSwig(self.name)
-
- self.default = value
- self.Unparse()
- self.default_as_str = self.__GetParsedValueAsString(self.value)
-
- def Type(self):
- """Returns: a string that describes the type of this Flag."""
- # NOTE: we use strings, and not the types.*Type constants because
- # our flags can have more exotic types, e.g., 'comma separated list
- # of strings', 'whitespace separated list of strings', etc.
- return self.parser.Type()
-
- def WriteInfoInXMLFormat(self, outfile, module_name, is_key=False, indent=''):
- """Writes common info about this flag, in XML format.
-
- This is information that is relevant to all flags (e.g., name,
- meaning, etc.). If you defined a flag that has some other pieces of
- info, then please override _WriteCustomInfoInXMLFormat.
-
- Please do NOT override this method.
-
- Args:
- outfile: File object we write to.
- module_name: A string, the name of the module that defines this flag.
- is_key: A boolean, True iff this flag is key for main module.
- indent: A string that is prepended to each generated line.
- """
- outfile.write(indent + '<flag>\n')
- inner_indent = indent + ' '
- if is_key:
- _WriteSimpleXMLElement(outfile, 'key', 'yes', inner_indent)
- _WriteSimpleXMLElement(outfile, 'file', module_name, inner_indent)
- # Print flag features that are relevant for all flags.
- _WriteSimpleXMLElement(outfile, 'name', self.name, inner_indent)
- if self.short_name:
- _WriteSimpleXMLElement(outfile, 'short_name', self.short_name,
- inner_indent)
- if self.help:
- _WriteSimpleXMLElement(outfile, 'meaning', self.help, inner_indent)
- # The default flag value can either be represented as a string like on the
- # command line, or as a Python object. We serialize this value in the
- # latter case in order to remain consistent.
- if self.serializer and not isinstance(self.default, str):
- default_serialized = self.serializer.Serialize(self.default)
- else:
- default_serialized = self.default
- _WriteSimpleXMLElement(outfile, 'default', default_serialized, inner_indent)
- _WriteSimpleXMLElement(outfile, 'current', self.value, inner_indent)
- _WriteSimpleXMLElement(outfile, 'type', self.Type(), inner_indent)
- # Print extra flag features this flag may have.
- self._WriteCustomInfoInXMLFormat(outfile, inner_indent)
- outfile.write(indent + '</flag>\n')
-
- def _WriteCustomInfoInXMLFormat(self, outfile, indent):
- """Writes extra info about this flag, in XML format.
-
- "Extra" means "not already printed by WriteInfoInXMLFormat above."
-
- Args:
- outfile: File object we write to.
- indent: A string that is prepended to each generated line.
- """
- # Usually, the parser knows the extra details about the flag, so
- # we just forward the call to it.
- self.parser.WriteCustomInfoInXMLFormat(outfile, indent)
-# End of Flag definition
-
-
-class ArgumentParser:
- """Base class used to parse and convert arguments.
-
- The Parse() method checks to make sure that the string argument is a
- legal value and convert it to a native type. If the value cannot be
- converted, it should throw a 'ValueError' exception with a human
- readable explanation of why the value is illegal.
-
- Subclasses should also define a syntactic_help string which may be
- presented to the user to describe the form of the legal values.
- """
- syntactic_help = ""
-
- def Parse(self, argument):
- """Default implementation: always returns its argument unmodified."""
- return argument
-
- def Type(self):
- return 'string'
-
- def WriteCustomInfoInXMLFormat(self, outfile, indent):
- pass
-
-
-class ArgumentSerializer:
- """Base class for generating string representations of a flag value."""
-
- def Serialize(self, value):
- return str(value)
-
-
-class ListSerializer(ArgumentSerializer):
-
- def __init__(self, list_sep):
- self.list_sep = list_sep
-
- def Serialize(self, value):
- return self.list_sep.join([str(x) for x in value])
-
-
-# The DEFINE functions are explained in mode details in the module doc string.
-
-
-def DEFINE(parser, name, default, help, flag_values=FLAGS, serializer=None,
- **args):
- """Registers a generic Flag object.
-
- NOTE: in the docstrings of all DEFINE* functions, "registers" is short
- for "creates a new flag and registers it".
-
- Auxiliary function: clients should use the specialized DEFINE_<type>
- function instead.
-
- Args:
- parser: ArgumentParser that is used to parse the flag arguments.
- name: A string, the flag name.
- default: The default value of the flag.
- help: A help string.
- flag_values: FlagValues object the flag will be registered with.
- serializer: ArgumentSerializer that serializes the flag value.
- args: Dictionary with extra keyword args that are passes to the
- Flag __init__.
- """
- DEFINE_flag(Flag(parser, serializer, name, default, help, **args),
- flag_values)
-
-
-def DEFINE_flag(flag, flag_values=FLAGS):
- """Registers a 'Flag' object with a 'FlagValues' object.
-
- By default, the global FLAGS 'FlagValue' object is used.
-
- Typical users will use one of the more specialized DEFINE_xxx
- functions, such as DEFINE_string or DEFINE_integer. But developers
- who need to create Flag objects themselves should use this function
- to register their flags.
- """
- # copying the reference to flag_values prevents pychecker warnings
- fv = flag_values
- fv[flag.name] = flag
- # Tell flag_values who's defining the flag.
- if isinstance(flag_values, FlagValues):
- # Regarding the above isinstance test: some users pass funny
- # values of flag_values (e.g., {}) in order to avoid the flag
- # registration (in the past, there used to be a flag_values ==
- # FLAGS test here) and redefine flags with the same name (e.g.,
- # debug). To avoid breaking their code, we perform the
- # registration only if flag_values is a real FlagValues object.
- flag_values._RegisterFlagByModule(_GetCallingModule(), flag)
-
-
-def _InternalDeclareKeyFlags(flag_names,
- flag_values=FLAGS, key_flag_values=None):
- """Declares a flag as key for the calling module.
-
- Internal function. User code should call DECLARE_key_flag or
- ADOPT_module_key_flags instead.
-
- Args:
- flag_names: A list of strings that are names of already-registered
- Flag objects.
- flag_values: A FlagValues object that the flags listed in
- flag_names have registered with (the value of the flag_values
- argument from the DEFINE_* calls that defined those flags).
- This should almost never need to be overridden.
- key_flag_values: A FlagValues object that (among possibly many
- other things) keeps track of the key flags for each module.
- Default None means "same as flag_values". This should almost
- never need to be overridden.
-
- Raises:
- UnrecognizedFlagError: when we refer to a flag that was not
- defined yet.
- """
- key_flag_values = key_flag_values or flag_values
-
- module = _GetCallingModule()
-
- for flag_name in flag_names:
- if flag_name not in flag_values:
- raise UnrecognizedFlagError(flag_name)
- flag = flag_values.FlagDict()[flag_name]
- key_flag_values._RegisterKeyFlagForModule(module, flag)
-
-
-def DECLARE_key_flag(flag_name, flag_values=FLAGS):
- """Declares one flag as key to the current module.
-
- Key flags are flags that are deemed really important for a module.
- They are important when listing help messages; e.g., if the
- --helpshort command-line flag is used, then only the key flags of the
- main module are listed (instead of all flags, as in the case of
- --help).
-
- Sample usage:
-
- flags.DECLARED_key_flag('flag_1')
-
- Args:
- flag_name: A string, the name of an already declared flag.
- (Redeclaring flags as key, including flags implicitly key
- because they were declared in this module, is a no-op.)
- flag_values: A FlagValues object. This should almost never
- need to be overridden.
- """
- if flag_name in _SPECIAL_FLAGS:
- # Take care of the special flags, e.g., --flagfile, --undefok.
- # These flags are defined in _SPECIAL_FLAGS, and are treated
- # specially during flag parsing, taking precedence over the
- # user-defined flags.
- _InternalDeclareKeyFlags([flag_name],
- flag_values=_SPECIAL_FLAGS,
- key_flag_values=flag_values)
- return
- _InternalDeclareKeyFlags([flag_name], flag_values=flag_values)
-
-
-def ADOPT_module_key_flags(module, flag_values=FLAGS):
- """Declares that all flags key to a module are key to the current module.
-
- Args:
- module: A module object.
- flag_values: A FlagValues object. This should almost never need
- to be overridden.
-
- Raises:
- FlagsError: When given an argument that is a module name (a
- string), instead of a module object.
- """
- # NOTE(salcianu): an even better test would be if not
- # isinstance(module, types.ModuleType) but I didn't want to import
- # types for such a tiny use.
- if isinstance(module, str):
- raise FlagsError('Received module name %s; expected a module object.'
- % module)
- _InternalDeclareKeyFlags(
- [f.name for f in flag_values._GetKeyFlagsForModule(module.__name__)],
- flag_values=flag_values)
- # If module is this flag module, take _SPECIAL_FLAGS into account.
- if module == _GetThisModuleObjectAndName()[0]:
- _InternalDeclareKeyFlags(
- # As we associate flags with _GetCallingModule(), the special
- # flags defined in this module are incorrectly registered with
- # a different module. So, we can't use _GetKeyFlagsForModule.
- # Instead, we take all flags from _SPECIAL_FLAGS (a private
- # FlagValues, where no other module should register flags).
- [f.name for f in _SPECIAL_FLAGS.FlagDict().values()],
- flag_values=_SPECIAL_FLAGS,
- key_flag_values=flag_values)
-
-
-#
-# STRING FLAGS
-#
-
-
-def DEFINE_string(name, default, help, flag_values=FLAGS, **args):
- """Registers a flag whose value can be any string."""
- parser = ArgumentParser()
- serializer = ArgumentSerializer()
- DEFINE(parser, name, default, help, flag_values, serializer, **args)
-
-
-#
-# BOOLEAN FLAGS
-#
-# and the special HELP flags.
-
-class BooleanParser(ArgumentParser):
- """Parser of boolean values."""
-
- def Convert(self, argument):
- """Converts the argument to a boolean; raise ValueError on errors."""
- if type(argument) == str:
- if argument.lower() in ['true', 't', '1']:
- return True
- elif argument.lower() in ['false', 'f', '0']:
- return False
-
- bool_argument = bool(argument)
- if argument == bool_argument:
- # The argument is a valid boolean (True, False, 0, or 1), and not just
- # something that always converts to bool (list, string, int, etc.).
- return bool_argument
-
- raise ValueError('Non-boolean argument to boolean flag', argument)
-
- def Parse(self, argument):
- val = self.Convert(argument)
- return val
-
- def Type(self):
- return 'bool'
-
-
-class BooleanFlag(Flag):
- """Basic boolean flag.
-
- Boolean flags do not take any arguments, and their value is either
- True (1) or False (0). The false value is specified on the command
- line by prepending the word 'no' to either the long or the short flag
- name.
-
- For example, if a Boolean flag was created whose long name was
- 'update' and whose short name was 'x', then this flag could be
- explicitly unset through either --noupdate or --nox.
- """
-
- def __init__(self, name, default, help, short_name=None, **args):
- p = BooleanParser()
- Flag.__init__(self, p, None, name, default, help, short_name, 1, **args)
- if not self.help: self.help = "a boolean value"
-
-
-def DEFINE_boolean(name, default, help, flag_values=FLAGS, **args):
- """Registers a boolean flag.
-
- Such a boolean flag does not take an argument. If a user wants to
- specify a false value explicitly, the long option beginning with 'no'
- must be used: i.e. --noflag
-
- This flag will have a value of None, True or False. None is possible
- if default=None and the user does not specify the flag on the command
- line.
- """
- DEFINE_flag(BooleanFlag(name, default, help, **args), flag_values)
-
-# Match C++ API to unconfuse C++ people.
-DEFINE_bool = DEFINE_boolean
-
-class HelpFlag(BooleanFlag):
- """
- HelpFlag is a special boolean flag that prints usage information and
- raises a SystemExit exception if it is ever found in the command
- line arguments. Note this is called with allow_override=1, so other
- apps can define their own --help flag, replacing this one, if they want.
- """
- def __init__(self):
- BooleanFlag.__init__(self, "help", 0, "show this help",
- short_name="?", allow_override=1)
- def Parse(self, arg):
- if arg:
- doc = sys.modules["__main__"].__doc__
- flags = str(FLAGS)
- print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
- if flags:
- print "flags:"
- print flags
- sys.exit(1)
-
-
-class HelpXMLFlag(BooleanFlag):
- """Similar to HelpFlag, but generates output in XML format."""
-
- def __init__(self):
- BooleanFlag.__init__(self, 'helpxml', False,
- 'like --help, but generates XML output',
- allow_override=1)
-
- def Parse(self, arg):
- if arg:
- FLAGS.WriteHelpInXMLFormat(sys.stdout)
- sys.exit(1)
-
-
-class HelpshortFlag(BooleanFlag):
- """
- HelpshortFlag is a special boolean flag that prints usage
- information for the "main" module, and rasies a SystemExit exception
- if it is ever found in the command line arguments. Note this is
- called with allow_override=1, so other apps can define their own
- --helpshort flag, replacing this one, if they want.
- """
- def __init__(self):
- BooleanFlag.__init__(self, "helpshort", 0,
- "show usage only for this module", allow_override=1)
- def Parse(self, arg):
- if arg:
- doc = sys.modules["__main__"].__doc__
- flags = FLAGS.MainModuleHelp()
- print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0])
- if flags:
- print "flags:"
- print flags
- sys.exit(1)
-
-#
-# Numeric parser - base class for Integer and Float parsers
-#
-
-
-class NumericParser(ArgumentParser):
- """Parser of numeric values.
-
- Parsed value may be bounded to a given upper and lower bound.
- """
-
- def Parse(self, argument):
- val = self.Convert(argument)
- if ((self.lower_bound is not None and val < self.lower_bound) or
- (self.upper_bound is not None and val > self.upper_bound)):
- raise ValueError("%s is not %s" % (val, self.syntactic_help))
- return val
-
- def WriteCustomInfoInXMLFormat(self, outfile, indent):
- if self.lower_bound is not None:
- _WriteSimpleXMLElement(outfile, 'lower_bound', self.lower_bound, indent)
- if self.upper_bound is not None:
- _WriteSimpleXMLElement(outfile, 'upper_bound', self.upper_bound, indent)
-
- def Convert(self, argument):
- """Default implementation: always returns its argument unmodified."""
- return argument
-
-# End of Numeric Parser
-
-#
-# FLOAT FLAGS
-#
-
-class FloatParser(NumericParser):
- """Parser of floating point values.
-
- Parsed value may be bounded to a given upper and lower bound.
- """
- number_article = "a"
- number_name = "number"
- syntactic_help = " ".join((number_article, number_name))
-
- def __init__(self, lower_bound=None, upper_bound=None):
- self.lower_bound = lower_bound
- self.upper_bound = upper_bound
- sh = self.syntactic_help
- if lower_bound is not None and upper_bound is not None:
- sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
- elif lower_bound == 0:
- sh = "a non-negative %s" % self.number_name
- elif upper_bound == 0:
- sh = "a non-positive %s" % self.number_name
- elif upper_bound is not None:
- sh = "%s <= %s" % (self.number_name, upper_bound)
- elif lower_bound is not None:
- sh = "%s >= %s" % (self.number_name, lower_bound)
- self.syntactic_help = sh
-
- def Convert(self, argument):
- """Converts argument to a float; raises ValueError on errors."""
- return float(argument)
-
- def Type(self):
- return 'float'
-# End of FloatParser
-
-
-def DEFINE_float(name, default, help, lower_bound=None, upper_bound=None,
- flag_values=FLAGS, **args):
- """Registers a flag whose value must be a float.
-
- If lower_bound or upper_bound are set, then this flag must be
- within the given range.
- """
- parser = FloatParser(lower_bound, upper_bound)
- serializer = ArgumentSerializer()
- DEFINE(parser, name, default, help, flag_values, serializer, **args)
-
-
-#
-# INTEGER FLAGS
-#
-
-
-class IntegerParser(NumericParser):
- """Parser of an integer value.
-
- Parsed value may be bounded to a given upper and lower bound.
- """
- number_article = "an"
- number_name = "integer"
- syntactic_help = " ".join((number_article, number_name))
-
- def __init__(self, lower_bound=None, upper_bound=None):
- self.lower_bound = lower_bound
- self.upper_bound = upper_bound
- sh = self.syntactic_help
- if lower_bound is not None and upper_bound is not None:
- sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound))
- elif lower_bound == 1:
- sh = "a positive %s" % self.number_name
- elif upper_bound == -1:
- sh = "a negative %s" % self.number_name
- elif lower_bound == 0:
- sh = "a non-negative %s" % self.number_name
- elif upper_bound == 0:
- sh = "a non-positive %s" % self.number_name
- elif upper_bound is not None:
- sh = "%s <= %s" % (self.number_name, upper_bound)
- elif lower_bound is not None:
- sh = "%s >= %s" % (self.number_name, lower_bound)
- self.syntactic_help = sh
-
- def Convert(self, argument):
- __pychecker__ = 'no-returnvalues'
- if type(argument) == str:
- base = 10
- if len(argument) > 2 and argument[0] == "0" and argument[1] == "x":
- base = 16
- try:
- return int(argument, base)
- # ValueError is thrown when argument is a string, and overflows an int.
- except ValueError:
- return long(argument, base)
- else:
- try:
- return int(argument)
- # OverflowError is thrown when argument is numeric, and overflows an int.
- except OverflowError:
- return long(argument)
-
- def Type(self):
- return 'int'
-
-
-def DEFINE_integer(name, default, help, lower_bound=None, upper_bound=None,
- flag_values=FLAGS, **args):
- """Registers a flag whose value must be an integer.
-
- If lower_bound, or upper_bound are set, then this flag must be
- within the given range.
- """
- parser = IntegerParser(lower_bound, upper_bound)
- serializer = ArgumentSerializer()
- DEFINE(parser, name, default, help, flag_values, serializer, **args)
-
-
-#
-# ENUM FLAGS
-#
-
-
-class EnumParser(ArgumentParser):
- """Parser of a string enum value (a string value from a given set).
-
- If enum_values (see below) is not specified, any string is allowed.
- """
-
- def __init__(self, enum_values=None):
- self.enum_values = enum_values
-
- def Parse(self, argument):
- if self.enum_values and argument not in self.enum_values:
- raise ValueError("value should be one of <%s>" %
- "|".join(self.enum_values))
- return argument
-
- def Type(self):
- return 'string enum'
-
-
-class EnumFlag(Flag):
- """Basic enum flag; its value can be any string from list of enum_values."""
-
- def __init__(self, name, default, help, enum_values=None,
- short_name=None, **args):
- enum_values = enum_values or []
- p = EnumParser(enum_values)
- g = ArgumentSerializer()
- Flag.__init__(self, p, g, name, default, help, short_name, **args)
- if not self.help: self.help = "an enum string"
- self.help = "<%s>: %s" % ("|".join(enum_values), self.help)
-
- def _WriteCustomInfoInXMLFormat(self, outfile, indent):
- for enum_value in self.parser.enum_values:
- _WriteSimpleXMLElement(outfile, 'enum_value', enum_value, indent)
-
-
-def DEFINE_enum(name, default, enum_values, help, flag_values=FLAGS,
- **args):
- """Registers a flag whose value can be any string from enum_values."""
- DEFINE_flag(EnumFlag(name, default, help, enum_values, ** args),
- flag_values)
-
-
-#
-# LIST FLAGS
-#
-
-
-class BaseListParser(ArgumentParser):
- """Base class for a parser of lists of strings.
-
- To extend, inherit from this class; from the subclass __init__, call
-
- BaseListParser.__init__(self, token, name)
-
- where token is a character used to tokenize, and name is a description
- of the separator.
- """
-
- def __init__(self, token=None, name=None):
- assert name
- self._token = token
- self._name = name
- self.syntactic_help = "a %s separated list" % self._name
-
- def Parse(self, argument):
- if isinstance(argument, list):
- return argument
- elif argument == '':
- return []
- else:
- return [s.strip() for s in argument.split(self._token)]
-
- def Type(self):
- return '%s separated list of strings' % self._name
-
-
-class ListParser(BaseListParser):
- """Parser for a comma-separated list of strings."""
-
- def __init__(self):
- BaseListParser.__init__(self, ',', 'comma')
-
- def WriteCustomInfoInXMLFormat(self, outfile, indent):
- BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
- _WriteSimpleXMLElement(outfile, 'list_separator', repr(','), indent)
-
-
-class WhitespaceSeparatedListParser(BaseListParser):
- """Parser for a whitespace-separated list of strings."""
-
- def __init__(self):
- BaseListParser.__init__(self, None, 'whitespace')
-
- def WriteCustomInfoInXMLFormat(self, outfile, indent):
- BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent)
- separators = list(string.whitespace)
- separators.sort()
- for ws_char in string.whitespace:
- _WriteSimpleXMLElement(outfile, 'list_separator', repr(ws_char), indent)
-
-
-def DEFINE_list(name, default, help, flag_values=FLAGS, **args):
- """Registers a flag whose value is a comma-separated list of strings."""
- parser = ListParser()
- serializer = ListSerializer(',')
- DEFINE(parser, name, default, help, flag_values, serializer, **args)
-
-
-def DEFINE_spaceseplist(name, default, help, flag_values=FLAGS, **args):
- """Registers a flag whose value is a whitespace-separated list of strings.
-
- Any whitespace can be used as a separator.
- """
- parser = WhitespaceSeparatedListParser()
- serializer = ListSerializer(' ')
- DEFINE(parser, name, default, help, flag_values, serializer, **args)
-
-
-#
-# MULTI FLAGS
-#
-
-
-class MultiFlag(Flag):
- """A flag that can appear multiple time on the command-line.
-
- The value of such a flag is a list that contains the individual values
- from all the appearances of that flag on the command-line.
-
- See the __doc__ for Flag for most behavior of this class. Only
- differences in behavior are described here:
-
- * The default value may be either a single value or a list of values.
- A single value is interpreted as the [value] singleton list.
-
- * The value of the flag is always a list, even if the option was
- only supplied once, and even if the default value is a single
- value
- """
-
- def __init__(self, *args, **kwargs):
- Flag.__init__(self, *args, **kwargs)
- self.help += ';\n repeat this option to specify a list of values'
-
- def Parse(self, arguments):
- """Parses one or more arguments with the installed parser.
-
- Args:
- arguments: a single argument or a list of arguments (typically a
- list of default values); a single argument is converted
- internally into a list containing one item.
- """
- if not isinstance(arguments, list):
- # Default value may be a list of values. Most other arguments
- # will not be, so convert them into a single-item list to make
- # processing simpler below.
- arguments = [arguments]
-
- if self.present:
- # keep a backup reference to list of previously supplied option values
- values = self.value
- else:
- # "erase" the defaults with an empty list
- values = []
-
- for item in arguments:
- # have Flag superclass parse argument, overwriting self.value reference
- Flag.Parse(self, item) # also increments self.present
- values.append(self.value)
-
- # put list of option values back in the 'value' attribute
- self.value = values
-
- def Serialize(self):
- if not self.serializer:
- raise FlagsError("Serializer not present for flag %s" % self.name)
- if self.value is None:
- return ''
-
- s = ''
-
- multi_value = self.value
-
- for self.value in multi_value:
- if s: s += ' '
- s += Flag.Serialize(self)
-
- self.value = multi_value
-
- return s
-
- def Type(self):
- return 'multi ' + self.parser.Type()
-
-
-def DEFINE_multi(parser, serializer, name, default, help, flag_values=FLAGS,
- **args):
- """Registers a generic MultiFlag that parses its args with a given parser.
-
- Auxiliary function. Normal users should NOT use it directly.
-
- Developers who need to create their own 'Parser' classes for options
- which can appear multiple times can call this module function to
- register their flags.
- """
- DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args),
- flag_values)
-
-
-def DEFINE_multistring(name, default, help, flag_values=FLAGS, **args):
- """Registers a flag whose value can be a list of any strings.
-
- Use the flag on the command line multiple times to place multiple
- string values into the list. The 'default' may be a single string
- (which will be converted into a single-element list) or a list of
- strings.
- """
- parser = ArgumentParser()
- serializer = ArgumentSerializer()
- DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
-
-
-def DEFINE_multi_int(name, default, help, lower_bound=None, upper_bound=None,
- flag_values=FLAGS, **args):
- """Registers a flag whose value can be a list of arbitrary integers.
-
- Use the flag on the command line multiple times to place multiple
- integer values into the list. The 'default' may be a single integer
- (which will be converted into a single-element list) or a list of
- integers.
- """
- parser = IntegerParser(lower_bound, upper_bound)
- serializer = ArgumentSerializer()
- DEFINE_multi(parser, serializer, name, default, help, flag_values, **args)
-
-
-# Now register the flags that we want to exist in all applications.
-# These are all defined with allow_override=1, so user-apps can use
-# these flagnames for their own purposes, if they want.
-DEFINE_flag(HelpFlag())
-DEFINE_flag(HelpshortFlag())
-DEFINE_flag(HelpXMLFlag())
-
-# Define special flags here so that help may be generated for them.
-# NOTE: Please do NOT use _SPECIAL_FLAGS from outside this module.
-_SPECIAL_FLAGS = FlagValues()
-
-
-DEFINE_string(
- 'flagfile', "",
- "Insert flag definitions from the given file into the command line.",
- _SPECIAL_FLAGS)
-
-DEFINE_string(
- 'undefok', "",
- "comma-separated list of flag names that it is okay to specify "
- "on the command line even if the program does not define a flag "
- "with that name. IMPORTANT: flags in this list that have "
- "arguments MUST use the --flag=value format.", _SPECIAL_FLAGS)
diff --git a/tools/closure_linter/setup.cfg b/tools/closure_linter/setup.cfg
deleted file mode 100644
index 861a9f5542..0000000000
--- a/tools/closure_linter/setup.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-[egg_info]
-tag_build =
-tag_date = 0
-tag_svn_revision = 0
-
diff --git a/tools/closure_linter/setup.py b/tools/closure_linter/setup.py
index 1d1764f2c9..d320b65d17 100755
--- a/tools/closure_linter/setup.py
+++ b/tools/closure_linter/setup.py
@@ -20,7 +20,7 @@ except ImportError:
from distutils.core import setup
setup(name='closure_linter',
- version='2.2.6',
+ version='2.3.17',
description='Closure Linter',
license='Apache',
author='The Closure Linter Authors',