Initial commit

This commit is contained in:
Kevin Rode 2019-09-27 23:56:01 -04:00
commit a98a28805b
796 changed files with 196233 additions and 0 deletions

43
amt.py Normal file
View File

@ -0,0 +1,43 @@
import adbutils
import os
import re
import requests
from lxml import html
def adb_start():
if "platform-tools" in os.environ['PATH']:
print("ADB found in PATH")
else:
os.environ['PATH'] += ';'+os.getcwd()+'\\platform-tools'
def adb_connect():
adb = adbutils.AdbClient(host="127.0.0.1", port=5037)
print(adb.device_list())
d = adb.device()
return d
def twrp_download(d):
cpu = d.shell('cat /proc/cpuinfo | grep Hardware')
cpu = cpu.replace(" ","")
cpu = re.sub(r'(.+:)', '', cpu)
r = requests.get('https://dl.twrp.me/'+cpu)
tree = html.fromstring(r.text)
urls = tree.xpath('//a/@href')
downloads = []
for i in urls:
if "img" in i:
downloads.append(i)
url_to_download = "https://dl.twrp.me"+downloads[0]
url_to_download = url_to_download.replace('.html', '')
print("Use this link to download twrp for your connected device: "+url_to_download)
print("Ensure that the downloaded file is moved to the same folder as the script before continuing")
def main():
adb_start()
device = adb_connect()
twrp_download(device)
main()

Binary file not shown.

Binary file not shown.

5809
platform-tools/NOTICE.txt Normal file

File diff suppressed because it is too large Load Diff

BIN
platform-tools/adb.exe Normal file

Binary file not shown.

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

BIN
platform-tools/etc1tool.exe Normal file

Binary file not shown.

BIN
platform-tools/fastboot.exe Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,53 @@
[defaults]
base_features = sparse_super,large_file,filetype,resize_inode,dir_index,ext_attr
default_mntopts = acl,user_xattr
enable_periodic_fsck = 0
blocksize = 4096
inode_size = 256
inode_ratio = 16384
reserved_ratio = 1.0
[fs_types]
ext3 = {
features = has_journal
}
ext4 = {
features = has_journal,extent,huge_file,dir_nlink,extra_isize,uninit_bg
inode_size = 256
}
ext4dev = {
features = has_journal,extent,huge_file,flex_bg,inline_data,64bit,dir_nlink,extra_isize
inode_size = 256
options = test_fs=1
}
small = {
blocksize = 1024
inode_size = 128
inode_ratio = 4096
}
floppy = {
blocksize = 1024
inode_size = 128
inode_ratio = 8192
}
big = {
inode_ratio = 32768
}
huge = {
inode_ratio = 65536
}
news = {
inode_ratio = 4096
}
largefile = {
inode_ratio = 1048576
blocksize = -1
}
largefile4 = {
inode_ratio = 4194304
blocksize = -1
}
hurd = {
blocksize = 4096
inode_size = 128
}

BIN
platform-tools/mke2fs.exe Normal file

Binary file not shown.

141
platform-tools/package.xml Normal file
View File

@ -0,0 +1,141 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?><ns2:repository xmlns:ns2="http://schemas.android.com/repository/android/common/01" xmlns:ns3="http://schemas.android.com/repository/android/generic/01" xmlns:ns4="http://schemas.android.com/sdk/android/repo/addon2/01" xmlns:ns5="http://schemas.android.com/sdk/android/repo/repository2/01" xmlns:ns6="http://schemas.android.com/sdk/android/repo/sys-img2/01"><license id="android-sdk-license" type="text">Terms and Conditions
This is the Android Software Development Kit License Agreement
1. Introduction
1.1 The Android Software Development Kit (referred to in the License Agreement as the "SDK" and specifically including the Android system files, packaged APIs, and Google APIs add-ons) is licensed to you subject to the terms of the License Agreement. The License Agreement forms a legally binding contract between you and Google in relation to your use of the SDK.
1.2 "Android" means the Android software stack for devices, as made available under the Android Open Source Project, which is located at the following URL: http://source.android.com/, as updated from time to time.
1.3 A "compatible implementation" means any Android device that (i) complies with the Android Compatibility Definition document, which can be found at the Android compatibility website (http://source.android.com/compatibility) and which may be updated from time to time; and (ii) successfully passes the Android Compatibility Test Suite (CTS).
1.4 "Google" means Google Inc., a Delaware corporation with principal place of business at 1600 Amphitheatre Parkway, Mountain View, CA 94043, United States.
2. Accepting the License Agreement
2.1 In order to use the SDK, you must first agree to the License Agreement. You may not use the SDK if you do not accept the License Agreement.
2.2 By clicking to accept, you hereby agree to the terms of the License Agreement.
2.3 You may not use the SDK and may not accept the License Agreement if you are a person barred from receiving the SDK under the laws of the United States or other countries, including the country in which you are resident or from which you use the SDK.
2.4 If you are agreeing to be bound by the License Agreement on behalf of your employer or other entity, you represent and warrant that you have full legal authority to bind your employer or such entity to the License Agreement. If you do not have the requisite authority, you may not accept the License Agreement or use the SDK on behalf of your employer or other entity.
3. SDK License from Google
3.1 Subject to the terms of the License Agreement, Google grants you a limited, worldwide, royalty-free, non-assignable, non-exclusive, and non-sublicensable license to use the SDK solely to develop applications for compatible implementations of Android.
3.2 You may not use this SDK to develop applications for other platforms (including non-compatible implementations of Android) or to develop another SDK. You are of course free to develop applications for other platforms, including non-compatible implementations of Android, provided that this SDK is not used for that purpose.
3.3 You agree that Google or third parties own all legal right, title and interest in and to the SDK, including any Intellectual Property Rights that subsist in the SDK. "Intellectual Property Rights" means any and all rights under patent law, copyright law, trade secret law, trademark law, and any and all other proprietary rights. Google reserves all rights not expressly granted to you.
3.4 You may not use the SDK for any purpose not expressly permitted by the License Agreement. Except to the extent required by applicable third party licenses, you may not copy (except for backup purposes), modify, adapt, redistribute, decompile, reverse engineer, disassemble, or create derivative works of the SDK or any part of the SDK.
3.5 Use, reproduction and distribution of components of the SDK licensed under an open source software license are governed solely by the terms of that open source software license and not the License Agreement.
3.6 You agree that the form and nature of the SDK that Google provides may change without prior notice to you and that future versions of the SDK may be incompatible with applications developed on previous versions of the SDK. You agree that Google may stop (permanently or temporarily) providing the SDK (or any features within the SDK) to you or to users generally at Google's sole discretion, without prior notice to you.
3.7 Nothing in the License Agreement gives you a right to use any of Google's trade names, trademarks, service marks, logos, domain names, or other distinctive brand features.
3.8 You agree that you will not remove, obscure, or alter any proprietary rights notices (including copyright and trademark notices) that may be affixed to or contained within the SDK.
4. Use of the SDK by You
4.1 Google agrees that it obtains no right, title or interest from you (or your licensors) under the License Agreement in or to any software applications that you develop using the SDK, including any intellectual property rights that subsist in those applications.
4.2 You agree to use the SDK and write applications only for purposes that are permitted by (a) the License Agreement and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions (including any laws regarding the export of data or software to and from the United States or other relevant countries).
4.3 You agree that if you use the SDK to develop applications for general public users, you will protect the privacy and legal rights of those users. If the users provide you with user names, passwords, or other login information or personal information, you must make the users aware that the information will be available to your application, and you must provide legally adequate privacy notice and protection for those users. If your application stores personal or sensitive information provided by users, it must do so securely. If the user provides your application with Google Account information, your application may only use that information to access the user's Google Account when, and for the limited purposes for which, the user has given you permission to do so.
4.4 You agree that you will not engage in any activity with the SDK, including the development or distribution of an application, that interferes with, disrupts, damages, or accesses in an unauthorized manner the servers, networks, or other properties or services of any third party including, but not limited to, Google or any mobile communications carrier.
4.5 You agree that you are solely responsible for (and that Google has no responsibility to you or to any third party for) any data, content, or resources that you create, transmit or display through Android and/or applications for Android, and for the consequences of your actions (including any loss or damage which Google may suffer) by doing so.
4.6 You agree that you are solely responsible for (and that Google has no responsibility to you or to any third party for) any breach of your obligations under the License Agreement, any applicable third party contract or Terms of Service, or any applicable law or regulation, and for the consequences (including any loss or damage which Google or any third party may suffer) of any such breach.
5. Your Developer Credentials
5.1 You agree that you are responsible for maintaining the confidentiality of any developer credentials that may be issued to you by Google or which you may choose yourself and that you will be solely responsible for all applications that are developed under your developer credentials.
6. Privacy and Information
6.1 In order to continually innovate and improve the SDK, Google may collect certain usage statistics from the software including but not limited to a unique identifier, associated IP address, version number of the software, and information on which tools and/or services in the SDK are being used and how they are being used. Before any of this information is collected, the SDK will notify you and seek your consent. If you withhold consent, the information will not be collected.
6.2 The data collected is examined in the aggregate to improve the SDK and is maintained in accordance with Google's Privacy Policy.
7. Third Party Applications
7.1 If you use the SDK to run applications developed by a third party or that access data, content or resources provided by a third party, you agree that Google is not responsible for those applications, data, content, or resources. You understand that all data, content or resources which you may access through such third party applications are the sole responsibility of the person from which they originated and that Google is not liable for any loss or damage that you may experience as a result of the use or access of any of those third party applications, data, content, or resources.
7.2 You should be aware the data, content, and resources presented to you through such a third party application may be protected by intellectual property rights which are owned by the providers (or by other persons or companies on their behalf). You may not modify, rent, lease, loan, sell, distribute or create derivative works based on these data, content, or resources (either in whole or in part) unless you have been specifically given permission to do so by the relevant owners.
7.3 You acknowledge that your use of such third party applications, data, content, or resources may be subject to separate terms between you and the relevant third party. In that case, the License Agreement does not affect your legal relationship with these third parties.
8. Using Android APIs
8.1 Google Data APIs
8.1.1 If you use any API to retrieve data from Google, you acknowledge that the data may be protected by intellectual property rights which are owned by Google or those parties that provide the data (or by other persons or companies on their behalf). Your use of any such API may be subject to additional Terms of Service. You may not modify, rent, lease, loan, sell, distribute or create derivative works based on this data (either in whole or in part) unless allowed by the relevant Terms of Service.
8.1.2 If you use any API to retrieve a user's data from Google, you acknowledge and agree that you shall retrieve data only with the user's explicit consent and only when, and for the limited purposes for which, the user has given you permission to do so. If you use the Android Recognition Service API, documented at the following URL: https://developer.android.com/reference/android/speech/RecognitionService, as updated from time to time, you acknowledge that the use of the API is subject to the Data Processing Addendum for Products where Google is a Data Processor, which is located at the following URL: https://privacy.google.com/businesses/gdprprocessorterms/, as updated from time to time. By clicking to accept, you hereby agree to the terms of the Data Processing Addendum for Products where Google is a Data Processor.
9. Terminating the License Agreement
9.1 The License Agreement will continue to apply until terminated by either you or Google as set out below.
9.2 If you want to terminate the License Agreement, you may do so by ceasing your use of the SDK and any relevant developer credentials.
9.3 Google may at any time, terminate the License Agreement with you if: (A) you have breached any provision of the License Agreement; or (B) Google is required to do so by law; or (C) the partner with whom Google offered certain parts of SDK (such as APIs) to you has terminated its relationship with Google or ceased to offer certain parts of the SDK to you; or (D) Google decides to no longer provide the SDK or certain parts of the SDK to users in the country in which you are resident or from which you use the service, or the provision of the SDK or certain SDK services to you by Google is, in Google's sole discretion, no longer commercially viable.
9.4 When the License Agreement comes to an end, all of the legal rights, obligations and liabilities that you and Google have benefited from, been subject to (or which have accrued over time whilst the License Agreement has been in force) or which are expressed to continue indefinitely, shall be unaffected by this cessation, and the provisions of paragraph 14.7 shall continue to apply to such rights, obligations and liabilities indefinitely.
10. DISCLAIMER OF WARRANTIES
10.1 YOU EXPRESSLY UNDERSTAND AND AGREE THAT YOUR USE OF THE SDK IS AT YOUR SOLE RISK AND THAT THE SDK IS PROVIDED "AS IS" AND "AS AVAILABLE" WITHOUT WARRANTY OF ANY KIND FROM GOOGLE.
10.2 YOUR USE OF THE SDK AND ANY MATERIAL DOWNLOADED OR OTHERWISE OBTAINED THROUGH THE USE OF THE SDK IS AT YOUR OWN DISCRETION AND RISK AND YOU ARE SOLELY RESPONSIBLE FOR ANY DAMAGE TO YOUR COMPUTER SYSTEM OR OTHER DEVICE OR LOSS OF DATA THAT RESULTS FROM SUCH USE.
10.3 GOOGLE FURTHER EXPRESSLY DISCLAIMS ALL WARRANTIES AND CONDITIONS OF ANY KIND, WHETHER EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES AND CONDITIONS OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
11. LIMITATION OF LIABILITY
11.1 YOU EXPRESSLY UNDERSTAND AND AGREE THAT GOOGLE, ITS SUBSIDIARIES AND AFFILIATES, AND ITS LICENSORS SHALL NOT BE LIABLE TO YOU UNDER ANY THEORY OF LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, CONSEQUENTIAL OR EXEMPLARY DAMAGES THAT MAY BE INCURRED BY YOU, INCLUDING ANY LOSS OF DATA, WHETHER OR NOT GOOGLE OR ITS REPRESENTATIVES HAVE BEEN ADVISED OF OR SHOULD HAVE BEEN AWARE OF THE POSSIBILITY OF ANY SUCH LOSSES ARISING.
12. Indemnification
12.1 To the maximum extent permitted by law, you agree to defend, indemnify and hold harmless Google, its affiliates and their respective directors, officers, employees and agents from and against any and all claims, actions, suits or proceedings, as well as any and all losses, liabilities, damages, costs and expenses (including reasonable attorneys fees) arising out of or accruing from (a) your use of the SDK, (b) any application you develop on the SDK that infringes any copyright, trademark, trade secret, trade dress, patent or other intellectual property right of any person or defames any person or violates their rights of publicity or privacy, and (c) any non-compliance by you with the License Agreement.
13. Changes to the License Agreement
13.1 Google may make changes to the License Agreement as it distributes new versions of the SDK. When these changes are made, Google will make a new version of the License Agreement available on the website where the SDK is made available.
14. General Legal Terms
14.1 The License Agreement constitutes the whole legal agreement between you and Google and governs your use of the SDK (excluding any services which Google may provide to you under a separate written agreement), and completely replaces any prior agreements between you and Google in relation to the SDK.
14.2 You agree that if Google does not exercise or enforce any legal right or remedy which is contained in the License Agreement (or which Google has the benefit of under any applicable law), this will not be taken to be a formal waiver of Google's rights and that those rights or remedies will still be available to Google.
14.3 If any court of law, having the jurisdiction to decide on this matter, rules that any provision of the License Agreement is invalid, then that provision will be removed from the License Agreement without affecting the rest of the License Agreement. The remaining provisions of the License Agreement will continue to be valid and enforceable.
14.4 You acknowledge and agree that each member of the group of companies of which Google is the parent shall be third party beneficiaries to the License Agreement and that such other companies shall be entitled to directly enforce, and rely upon, any provision of the License Agreement that confers a benefit on (or rights in favor of) them. Other than this, no other person or company shall be third party beneficiaries to the License Agreement.
14.5 EXPORT RESTRICTIONS. THE SDK IS SUBJECT TO UNITED STATES EXPORT LAWS AND REGULATIONS. YOU MUST COMPLY WITH ALL DOMESTIC AND INTERNATIONAL EXPORT LAWS AND REGULATIONS THAT APPLY TO THE SDK. THESE LAWS INCLUDE RESTRICTIONS ON DESTINATIONS, END USERS AND END USE.
14.6 The rights granted in the License Agreement may not be assigned or transferred by either you or Google without the prior written approval of the other party. Neither you nor Google shall be permitted to delegate their responsibilities or obligations under the License Agreement without the prior written approval of the other party.
14.7 The License Agreement, and your relationship with Google under the License Agreement, shall be governed by the laws of the State of California without regard to its conflict of laws provisions. You and Google agree to submit to the exclusive jurisdiction of the courts located within the county of Santa Clara, California to resolve any legal matter arising from the License Agreement. Notwithstanding this, you agree that Google shall still be allowed to apply for injunctive remedies (or an equivalent type of urgent legal relief) in any jurisdiction.
January 16, 2019</license><localPackage path="platform-tools" obsolete="false"><type-details xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="ns3:genericDetailsType"/><revision><major>29</major><minor>0</minor><micro>4</micro></revision><display-name>Android SDK Platform-Tools</display-name><uses-license ref="android-sdk-license"/></localPackage></ns2:repository>

View File

@ -0,0 +1,2 @@
Pkg.UserSrc=false
Pkg.Revision=29.0.4

BIN
platform-tools/sqlite3.exe Normal file

Binary file not shown.

View File

@ -0,0 +1,205 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@ -0,0 +1 @@
cad35e22dcad126c6a20663ded101565e6326d82

View File

@ -0,0 +1,28 @@
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
_CATAPULT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
_TESTS = [
{'path': os.path.join(
_CATAPULT_PATH, 'common', 'eslint', 'bin', 'run_tests')},
{'path': os.path.join(
_CATAPULT_PATH, 'common', 'py_trace_event', 'bin', 'run_tests')},
{'path': os.path.join(
_CATAPULT_PATH, 'common', 'py_utils', 'bin', 'run_tests')},
{'path': os.path.join(
_CATAPULT_PATH, 'common', 'py_vulcanize', 'bin', 'run_py_tests')},
]
if __name__ == '__main__':
sys.path.append(_CATAPULT_PATH)
from catapult_build import test_runner
sys.exit(test_runner.Main('project', _TESTS, sys.argv))

View File

@ -0,0 +1,229 @@
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Updates the Chrome reference builds.
Usage:
$ /path/to/update_reference_build.py
$ git commit -a
$ git cl upload
"""
import collections
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import urllib2
import zipfile
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'py_utils'))
from py_utils import cloud_storage
from dependency_manager import base_config
def BuildNotFoundError(error_string):
raise ValueError(error_string)
_CHROME_BINARIES_CONFIG = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..', 'common',
'py_utils', 'py_utils', 'chrome_binaries.json')
CHROME_GS_BUCKET = 'chrome-unsigned'
# Remove a platform name from this list to disable updating it.
# Add one to enable updating it. (Must also update _PLATFORM_MAP.)
_PLATFORMS_TO_UPDATE = ['mac_x86_64', 'win_x86', 'win_AMD64', 'linux_x86_64',
'android_k_armeabi-v7a', 'android_l_arm64-v8a',
'android_l_armeabi-v7a', 'android_n_armeabi-v7a',
'android_n_arm64-v8a']
# Remove a channel name from this list to disable updating it.
# Add one to enable updating it.
_CHANNELS_TO_UPDATE = ['stable', 'canary', 'dev']
# Omaha is Chrome's autoupdate server. It reports the current versions used
# by each platform on each channel.
_OMAHA_PLATFORMS = { 'stable': ['mac', 'linux', 'win', 'android'],
'dev': ['linux'], 'canary': ['mac', 'win']}
# All of the information we need to update each platform.
# omaha: name omaha uses for the platforms.
# zip_name: name of the zip file to be retrieved from cloud storage.
# gs_build: name of the Chrome build platform used in cloud storage.
# destination: Name of the folder to download the reference build to.
UpdateInfo = collections.namedtuple('UpdateInfo',
'omaha, gs_folder, gs_build, zip_name')
_PLATFORM_MAP = {'mac_x86_64': UpdateInfo(omaha='mac',
gs_folder='desktop-*',
gs_build='mac64',
zip_name='chrome-mac.zip'),
'win_x86': UpdateInfo(omaha='win',
gs_folder='desktop-*',
gs_build='win-clang',
zip_name='chrome-win-clang.zip'),
'win_AMD64': UpdateInfo(omaha='win',
gs_folder='desktop-*',
gs_build='win64-clang',
zip_name='chrome-win64-clang.zip'),
'linux_x86_64': UpdateInfo(omaha='linux',
gs_folder='desktop-*',
gs_build='linux64',
zip_name='chrome-linux64.zip'),
'android_k_armeabi-v7a': UpdateInfo(omaha='android',
gs_folder='android-*',
gs_build='arm',
zip_name='Chrome.apk'),
'android_l_arm64-v8a': UpdateInfo(omaha='android',
gs_folder='android-*',
gs_build='arm_64',
zip_name='ChromeModern.apk'),
'android_l_armeabi-v7a': UpdateInfo(omaha='android',
gs_folder='android-*',
gs_build='arm',
zip_name='Chrome.apk'),
'android_n_armeabi-v7a': UpdateInfo(omaha='android',
gs_folder='android-*',
gs_build='arm',
zip_name='Monochrome.apk'),
'android_n_arm64-v8a': UpdateInfo(omaha='android',
gs_folder='android-*',
gs_build='arm_64',
zip_name='Monochrome.apk'),
}
def _ChannelVersionsMap(channel):
rows = _OmahaReportVersionInfo(channel)
omaha_versions_map = _OmahaVersionsMap(rows, channel)
channel_versions_map = {}
for platform in _PLATFORMS_TO_UPDATE:
omaha_platform = _PLATFORM_MAP[platform].omaha
if omaha_platform in omaha_versions_map:
channel_versions_map[platform] = omaha_versions_map[omaha_platform]
return channel_versions_map
def _OmahaReportVersionInfo(channel):
url ='https://omahaproxy.appspot.com/all?channel=%s' % channel
lines = urllib2.urlopen(url).readlines()
return [l.split(',') for l in lines]
def _OmahaVersionsMap(rows, channel):
platforms = _OMAHA_PLATFORMS.get(channel, [])
if (len(rows) < 1 or
not rows[0][0:3] == ['os', 'channel', 'current_version']):
raise ValueError(
'Omaha report is not in the expected form: %s.' % rows)
versions_map = {}
for row in rows[1:]:
if row[1] != channel:
raise ValueError(
'Omaha report contains a line with the channel %s' % row[1])
if row[0] in platforms:
versions_map[row[0]] = row[2]
logging.warn('versions map: %s' % versions_map)
if not all(platform in versions_map for platform in platforms):
raise ValueError(
'Omaha report did not contain all desired platforms for channel %s' % channel)
return versions_map
def _QueuePlatformUpdate(platform, version, config, channel):
""" platform: the name of the platform for the browser to
be downloaded & updated from cloud storage. """
platform_info = _PLATFORM_MAP[platform]
filename = platform_info.zip_name
# remote_path example: desktop-*/30.0.1595.0/precise32/chrome-precise32.zip
remote_path = '%s/%s/%s/%s' % (
platform_info.gs_folder, version, platform_info.gs_build, filename)
if not cloud_storage.Exists(CHROME_GS_BUCKET, remote_path):
cloud_storage_path = 'gs://%s/%s' % (CHROME_GS_BUCKET, remote_path)
raise BuildNotFoundError(
'Failed to find %s build for version %s at path %s.' % (
platform, version, cloud_storage_path))
reference_builds_folder = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'chrome_telemetry_build',
'reference_builds', channel)
if not os.path.exists(reference_builds_folder):
os.makedirs(reference_builds_folder)
local_dest_path = os.path.join(reference_builds_folder, filename)
cloud_storage.Get(CHROME_GS_BUCKET, remote_path, local_dest_path)
_ModifyBuildIfNeeded(local_dest_path, platform)
config.AddCloudStorageDependencyUpdateJob(
'chrome_%s' % channel, platform, local_dest_path, version=version,
execute_job=False)
def _ModifyBuildIfNeeded(location, platform):
"""Hook to modify the build before saving it for Telemetry to use.
This can be used to remove various utilities that cause noise in a
test environment. Right now, it is just used to remove Keystone,
which is a tool used to autoupdate Chrome.
"""
if platform == 'mac_x86_64':
_RemoveKeystoneFromBuild(location)
return
if 'mac' in platform:
raise NotImplementedError(
'Platform <%s> sounds like it is an OSX version. If so, we may need to '
'remove Keystone from it per crbug.com/932615. Please edit this script'
' and teach it what needs to be done :).')
def _RemoveKeystoneFromBuild(location):
"""Removes the Keystone autoupdate binary from the chrome mac zipfile."""
logging.info('Removing keystone from mac build at %s' % location)
temp_folder = tempfile.mkdtemp(prefix='RemoveKeystoneFromBuild')
try:
subprocess.check_call(['unzip', '-q', location, '-d', temp_folder])
keystone_folder = os.path.join(
temp_folder, 'chrome-mac', 'Google Chrome.app', 'Contents',
'Frameworks', 'Google Chrome Framework.framework', 'Frameworks',
'KeystoneRegistration.framework')
shutil.rmtree(keystone_folder)
os.remove(location)
subprocess.check_call(['zip', '--quiet', '--recurse-paths', '--symlinks',
location, 'chrome-mac'],
cwd=temp_folder)
finally:
shutil.rmtree(temp_folder)
def UpdateBuilds():
config = base_config.BaseConfig(_CHROME_BINARIES_CONFIG, writable=True)
for channel in _CHANNELS_TO_UPDATE:
channel_versions_map = _ChannelVersionsMap(channel)
for platform in channel_versions_map:
print 'Downloading Chrome (%s channel) on %s' % (channel, platform)
current_version = config.GetVersion('chrome_%s' % channel, platform)
channel_version = channel_versions_map.get(platform)
print 'current: %s, channel: %s' % (current_version, channel_version)
if current_version and current_version == channel_version:
continue
_QueuePlatformUpdate(platform, channel_version, config, channel)
print 'Updating chrome builds with downloaded binaries'
config.ExecuteUpdateJobs(force=True)
def main():
logging.getLogger().setLevel(logging.DEBUG)
UpdateBuilds()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,20 @@
ESLint
Copyright jQuery Foundation and other contributors, https://jquery.org/
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -0,0 +1,5 @@
This directory contains the Catapult eslint config, custom Catapult eslint rules,
and tests for those rules.
Some of our custom rules are modified versions of those included with eslint, as
suggested in https://goo.gl/uAxFHq.

View File

@ -0,0 +1,54 @@
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import sys
_CATAPULT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__),
os.path.pardir, os.path.pardir, os.path.pardir))
_ESLINT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir))
DIRECTORIES_TO_LINT = [
os.path.join(_CATAPULT_PATH, 'dashboard', 'dashboard'),
os.path.join(_CATAPULT_PATH, 'tracing', 'tracing')
]
def _AddToPathIfNeeded(path):
if path not in sys.path:
sys.path.insert(0, path)
if __name__ == '__main__':
_AddToPathIfNeeded(_ESLINT_PATH)
import eslint
parser = argparse.ArgumentParser(
description='Wrapper script to run eslint on Catapult code')
parser.add_argument('--paths', '-p', default=None, nargs='+', metavar='PATH',
help='List of paths to lint')
parser.add_argument('--all', default=None, action='store_true',
help='Runs eslint on all applicable Catapult code')
parser.add_argument('--extra-args', default=None, type=str,
help='A string of extra arguments to pass to eslint')
args = parser.parse_args(sys.argv[1:])
if ((args.paths is not None and args.all is not None) or
(args.paths is None and args.all is None)):
print 'Either --paths or --all must be used, but not both.\n'
parser.print_help()
sys.exit(1)
paths = DIRECTORIES_TO_LINT if args.all else args.paths
success, output = eslint.RunEslint(paths, extra_args=args.extra_args)
print output
sys.exit(not success)

View File

@ -0,0 +1,35 @@
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
_CATAPULT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__),
os.path.pardir, os.path.pardir, os.path.pardir))
_ESLINT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir))
def _RunTestsOrDie(top_level_dir):
exit_code = run_with_typ.Run(top_level_dir, path=[_ESLINT_PATH])
if exit_code:
sys.exit(exit_code)
def _AddToPathIfNeeded(path):
if path not in sys.path:
sys.path.insert(0, path)
if __name__ == '__main__':
_AddToPathIfNeeded(_CATAPULT_PATH)
from catapult_build import run_with_typ
_RunTestsOrDie(os.path.join(_ESLINT_PATH, 'eslint'))

View File

@ -0,0 +1,68 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
_CATAPULT_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
os.path.pardir, os.path.pardir, os.path.pardir)
def _AddToPathIfNeeded(path):
if path not in sys.path:
sys.path.insert(0, path)
def _UpdateSysPathIfNeeded():
_AddToPathIfNeeded(os.path.join(_CATAPULT_PATH, 'common', 'node_runner'))
_AddToPathIfNeeded(os.path.join(_CATAPULT_PATH, 'common', 'py_utils'))
_UpdateSysPathIfNeeded()
import py_utils
from node_runner import node_util
BASE_ESLINT_CMD = [
node_util.GetNodePath(),
os.path.join(node_util.GetNodeModulesPath(), 'eslint', 'bin', 'eslint.js'),
'--color'
]
DEFAULT_ESLINT_RULES_DIR = os.path.join(
py_utils.GetCatapultDir(), 'common', 'eslint', 'rules')
def _CreateEslintCommand(rulesdir, extra_args):
eslint_cmd = BASE_ESLINT_CMD + [
'--rulesdir', rulesdir, '--ext', '.js,.html'
]
if extra_args:
eslint_cmd.extend(extra_args.strip().split(' '))
return eslint_cmd
def RunEslint(paths, rules_dir=DEFAULT_ESLINT_RULES_DIR, extra_args=None):
"""Runs eslint on a list of paths.
Args:
paths: A list of paths to run eslint on.
rules_dir: A directory of custom eslint rules.
extra_args: A string to append to the end of the eslint command.
"""
if type(paths) is not list or len(paths) == 0:
raise ValueError('Must specify a non-empty list of paths to lint.')
try:
eslint_cmd = _CreateEslintCommand(rules_dir, extra_args)
return True, subprocess.check_output(eslint_cmd + paths,
stderr=subprocess.STDOUT).rstrip()
except subprocess.CalledProcessError as e:
return False, e.output.rstrip()

View File

@ -0,0 +1,36 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import eslint
import os
import tempfile
import unittest
_TEMP_FILE_CONTENTS = '''<!DOCTYPE html>
<!--
Copyright 2016 The Chromium Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
-->
<script>
// This should cause a linter error because we require camelCase.
var non_camel_case = 0;
</script>
'''
class SmokeTest(unittest.TestCase):
def testEslintFindsError(self):
try:
tmp_file = tempfile.NamedTemporaryFile(
delete=False, dir=os.path.dirname(__file__), suffix=".html")
tmp_file.write(_TEMP_FILE_CONTENTS)
tmp_file.close()
success, output = eslint.RunEslint([tmp_file.name])
self.assertFalse(success)
self.assertTrue('is not in camel case' in output)
finally:
os.remove(tmp_file.name)

View File

@ -0,0 +1,154 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/* eslint-disable */
/**
* @fileoverview Rule to flag non-camelcased identifiers
* @author Nicholas C. Zakas
*/
'use strict';
//------------------------------------------------------------------------------
// Rule Definition
//------------------------------------------------------------------------------
module.exports = {
meta: {
docs: {
description: "enforce Catapult camelcase naming convention",
category: "Stylistic Issues",
recommended: false
},
schema: [
{
type: "object",
properties: {
properties: {
enum: ["always", "never"]
}
},
additionalProperties: false
}
]
},
create(context) {
//--------------------------------------------------------------------------
// Helpers
//--------------------------------------------------------------------------
// contains reported nodes to avoid reporting twice on destructuring with shorthand notation
var reported = [];
/**
* Checks if a string contains an underscore and isn't all upper-case
* @param {string} name The string to check.
* @returns {boolean} if the string is underscored
* @private
*/
function isUnderscored(name) {
// if there's an underscore, it might be A_VARANT, which is okay
return name.indexOf("_") > -1 && name !== name.toUpperCase();
}
/**
* Reports an AST node as a rule violation.
* @param {ASTNode} node The node to report.
* @returns {void}
* @private
*/
function report(node) {
if (reported.indexOf(node) < 0) {
reported.push(node);
context.report(node, "Identifier '{{name}}' is not in camel case.", { name: node.name });
}
}
var options = context.options[0] || {};
let properties = options.properties || "";
if (properties !== "always" && properties !== "never") {
properties = "always";
}
return {
Identifier(node) {
/*
* Leading and trailing underscores are commonly used to flag
* private/protected identifiers, strip them.
*
* NOTE: This has four Catapult-specific style exceptions:
*
* - The prefix opt_
* - The prefix g_
* - The suffix _smallerIsBetter
* - The suffix _biggerIsBetter
*/
var name = node.name.replace(/(?:^opt_)|^(?:^g_)|^_+|_+$|(?:_smallerIsBetter)$|(?:_biggerIsBetter)$/g, ""),
effectiveParent = (node.parent.type === "MemberExpression") ? node.parent.parent : node.parent;
// MemberExpressions get special rules
if (node.parent.type === "MemberExpression") {
// "never" check properties
if (properties === "never") {
return;
}
// Always report underscored object names
if (node.parent.object.type === "Identifier" &&
node.parent.object.name === node.name &&
isUnderscored(name)) {
report(node);
// Report AssignmentExpressions only if they are the left side of the assignment
} else if (effectiveParent.type === "AssignmentExpression" &&
isUnderscored(name) &&
(effectiveParent.right.type !== "MemberExpression" ||
effectiveParent.left.type === "MemberExpression" &&
effectiveParent.left.property.name === node.name)) {
report(node);
}
// Properties have their own rules
} else if (node.parent.type === "Property") {
// "never" check properties
if (properties === "never") {
return;
}
if (node.parent.parent && node.parent.parent.type === "ObjectPattern" &&
node.parent.key === node && node.parent.value !== node) {
return;
}
if (isUnderscored(name) && effectiveParent.type !== "CallExpression") {
report(node);
}
// Check if it's an import specifier
} else if (["ImportSpecifier", "ImportNamespaceSpecifier", "ImportDefaultSpecifier"].indexOf(node.parent.type) >= 0) {
// Report only if the local imported identifier is underscored
if (node.parent.local && node.parent.local.name === node.name && isUnderscored(name)) {
report(node);
}
// Report anything that is underscored that isn't a CallExpression
} else if (isUnderscored(name) && effectiveParent.type !== "CallExpression") {
report(node);
}
}
};
}
};

View File

@ -0,0 +1,324 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/* eslint-disable */
/**
* @fileoverview Tests for camelcase rule.
* @author Nicholas C. Zakas
*/
'use strict';
//------------------------------------------------------------------------------
// Requirements
//------------------------------------------------------------------------------
var rule = require("../rules/catapult-camelcase"),
RuleTester = require("../../node_runner/node_runner/node_modules/eslint/lib/testers/rule-tester");
//------------------------------------------------------------------------------
// Tests
//------------------------------------------------------------------------------
var ruleTester = new RuleTester();
ruleTester.run("camelcase", rule, {
valid: [
"firstName = \"Nicholas\"",
"FIRST_NAME = \"Nicholas\"",
"__myPrivateVariable = \"Patrick\"",
"myPrivateVariable_ = \"Patrick\"",
"function doSomething(){}",
"do_something()",
"foo.do_something()",
"var foo = bar.baz_boom;",
"var foo = bar.baz_boom.something;",
"foo.boom_pow.qux = bar.baz_boom.something;",
"if (bar.baz_boom) {}",
"var obj = { key: foo.bar_baz };",
"var arr = [foo.bar_baz];",
"[foo.bar_baz]",
"var arr = [foo.bar_baz.qux];",
"[foo.bar_baz.nesting]",
"if (foo.bar_baz === boom.bam_pow) { [foo.baz_boom] }",
// These tests are for Catapult-specific exceptions.
"opt_firstName = \"Nicholas\"",
"g_firstName = \"Nicholas\"",
"sizeInBytes_smallerIsBetter = \"Nicholas\"",
"sizeInBytes_biggerIsBetter = \"Nicholas\"",
{
code: "var o = {key: 1}",
options: [{properties: "always"}]
},
{
code: "var o = {bar_baz: 1}",
options: [{properties: "never"}]
},
{
code: "obj.a_b = 2;",
options: [{properties: "never"}]
},
{
code: "var obj = {\n a_a: 1 \n};\n obj.a_b = 2;",
options: [{properties: "never"}]
},
{
code: "obj.foo_bar = function(){};",
options: [{properties: "never"}]
},
{
code: "var { category_id: category } = query;",
parserOptions: { ecmaVersion: 6 }
},
{
code: "var { category_id: category } = query;",
parserOptions: { ecmaVersion: 6 },
options: [{properties: "never"}]
},
{
code: "import { camelCased } from \"external module\";",
parserOptions: { ecmaVersion: 6, sourceType: "module" }
},
{
code: "import { no_camelcased as camelCased } from \"external-module\";",
parserOptions: { ecmaVersion: 6, sourceType: "module" }
},
{
code: "import { no_camelcased as camelCased, anoterCamelCased } from \"external-module\";",
parserOptions: { ecmaVersion: 6, sourceType: "module" }
}
],
invalid: [
{
code: "first_name = \"Nicholas\"",
errors: [
{
message: "Identifier 'first_name' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "__private_first_name = \"Patrick\"",
errors: [
{
message: "Identifier '__private_first_name' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "function foo_bar(){}",
errors: [
{
message: "Identifier 'foo_bar' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "obj.foo_bar = function(){};",
errors: [
{
message: "Identifier 'foo_bar' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "bar_baz.foo = function(){};",
errors: [
{
message: "Identifier 'bar_baz' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "[foo_bar.baz]",
errors: [
{
message: "Identifier 'foo_bar' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "if (foo.bar_baz === boom.bam_pow) { [foo_bar.baz] }",
errors: [
{
message: "Identifier 'foo_bar' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "foo.bar_baz = boom.bam_pow",
errors: [
{
message: "Identifier 'bar_baz' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "var foo = { bar_baz: boom.bam_pow }",
errors: [
{
message: "Identifier 'bar_baz' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "foo.qux.boom_pow = { bar: boom.bam_pow }",
errors: [
{
message: "Identifier 'boom_pow' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "var o = {bar_baz: 1}",
options: [{properties: "always"}],
errors: [
{
message: "Identifier 'bar_baz' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "obj.a_b = 2;",
options: [{properties: "always"}],
errors: [
{
message: "Identifier 'a_b' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "obj.a_b = 2;",
options: [{properties: "always"}],
errors: [
{
message: "Identifier 'a_b' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "var { category_id: category_id } = query;",
parserOptions: { ecmaVersion: 6 },
errors: [
{
message: "Identifier 'category_id' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "var { category_id } = query;",
parserOptions: { ecmaVersion: 6 },
errors: [
{
message: "Identifier 'category_id' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "import no_camelcased from \"external-module\";",
parserOptions: { ecmaVersion: 6, sourceType: "module" },
errors: [
{
message: "Identifier 'no_camelcased' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "import * as no_camelcased from \"external-module\";",
parserOptions: { ecmaVersion: 6, sourceType: "module" },
errors: [
{
message: "Identifier 'no_camelcased' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "import { no_camelcased } from \"external-module\";",
parserOptions: { ecmaVersion: 6, sourceType: "module" },
errors: [
{
message: "Identifier 'no_camelcased' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "import { no_camelcased as no_camel_cased } from \"external module\";",
parserOptions: { ecmaVersion: 6, sourceType: "module" },
errors: [
{
message: "Identifier 'no_camel_cased' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "import { camelCased as no_camel_cased } from \"external module\";",
parserOptions: { ecmaVersion: 6, sourceType: "module" },
errors: [
{
message: "Identifier 'no_camel_cased' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "import { camelCased, no_camelcased } from \"external-module\";",
parserOptions: { ecmaVersion: 6, sourceType: "module" },
errors: [
{
message: "Identifier 'no_camelcased' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "import { no_camelcased as camelCased, another_no_camelcased } from \"external-module\";",
parserOptions: { ecmaVersion: 6, sourceType: "module" },
errors: [
{
message: "Identifier 'another_no_camelcased' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "import camelCased, { no_camelcased } from \"external-module\";",
parserOptions: { ecmaVersion: 6, sourceType: "module" },
errors: [
{
message: "Identifier 'no_camelcased' is not in camel case.",
type: "Identifier"
}
]
},
{
code: "import no_camelcased, { another_no_camelcased as camelCased } from \"external-module\";",
parserOptions: { ecmaVersion: 6, sourceType: "module" },
errors: [
{
message: "Identifier 'no_camelcased' is not in camel case.",
type: "Identifier"
}
]
}
]
});

View File

@ -0,0 +1,105 @@
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Print statistics about the rate of commits to a repository."""
import datetime
import itertools
import json
import math
import urllib
import urllib2
_BASE_URL = 'https://chromium.googlesource.com'
# Can be up to 10,000.
_REVISION_COUNT = 10000
_REPOSITORIES = [
'chromium/src',
'angle/angle',
'skia',
'v8/v8',
]
def Pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2, s3), ..."""
a, b = itertools.tee(iterable)
next(b, None)
return itertools.izip(a, b)
def Percentile(data, percentile):
"""Find a percentile of a list of values.
Parameters:
data: A sorted list of values.
percentile: The percentile to look up, from 0.0 to 1.0.
Returns:
The percentile.
Raises:
ValueError: If data is empty.
"""
if not data:
raise ValueError()
k = (len(data) - 1) * percentile
f = math.floor(k)
c = math.ceil(k)
if f == c:
return data[int(k)]
return data[int(f)] * (c - k) + data[int(c)] * (k - f)
def CommitTimes(repository, revision_count):
parameters = urllib.urlencode((('n', revision_count), ('format', 'JSON')))
url = '%s/%s/+log?%s' % (_BASE_URL, urllib.quote(repository), parameters)
data = json.loads(''.join(urllib2.urlopen(url).read().splitlines()[1:]))
commit_times = []
for revision in data['log']:
commit_time_string = revision['committer']['time']
commit_time = datetime.datetime.strptime(
commit_time_string, '%a %b %d %H:%M:%S %Y')
commit_times.append(commit_time - datetime.timedelta(hours=7))
return commit_times
def IsWeekday(time):
return time.weekday() >= 0 and time.weekday() < 5
def main():
for repository in _REPOSITORIES:
commit_times = CommitTimes(repository, _REVISION_COUNT)
commit_durations = []
for time1, time2 in Pairwise(commit_times):
#if not (IsWeekday(time1) and IsWeekday(time2)):
# continue
commit_durations.append((time1 - time2).total_seconds() / 60.)
commit_durations.sort()
print 'REPOSITORY:', repository
print 'Start Date:', min(commit_times), 'PDT'
print ' End Date:', max(commit_times), 'PDT'
print ' Duration:', max(commit_times) - min(commit_times)
print ' n:', len(commit_times)
for p in (0.25, 0.50, 0.90):
percentile = Percentile(commit_durations, p)
print '%3d%% commit duration:' % (p * 100), '%6.1fm' % percentile
mean = math.fsum(commit_durations) / len(commit_durations)
print 'Mean commit duration:', '%6.1fm' % mean
print
if __name__ == '__main__':
main()

View File

@ -0,0 +1,93 @@
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Query build slave hardware info, and print it to stdout as csv."""
import csv
import json
import logging
import sys
import urllib2
_MASTERS = [
'chromium.perf',
'chromium.perf.fyi',
'client.catapult',
'tryserver.chromium.perf',
'tryserver.client.catapult',
]
_KEYS = [
'master', 'builder', 'hostname',
'os family', 'os version', 'bitness (userland)',
'product name', 'architecture', 'processor count', 'processor type',
'memory total',
'facter version', 'git version', 'puppet version', 'python version',
'ruby version',
'android device 1', 'android device 2', 'android device 3',
'android device 4', 'android device 5', 'android device 6',
'android device 7', 'android device 8',
]
_EXCLUDED_KEYS = frozenset([
'architecture (userland)',
'b directory',
'last puppet run',
'uptime',
'windows version',
])
def main():
writer = csv.DictWriter(sys.stdout, _KEYS)
writer.writeheader()
for master_name in _MASTERS:
master_data = json.load(urllib2.urlopen(
'http://build.chromium.org/p/%s/json/slaves' % master_name))
slaves = sorted(master_data.iteritems(),
key=lambda x: (x[1]['builders'].keys(), x[0]))
for slave_name, slave_data in slaves:
for builder_name in slave_data['builders']:
row = {
'master': master_name,
'builder': builder_name,
'hostname': slave_name,
}
host_data = slave_data['host']
if host_data:
host_data = host_data.splitlines()
if len(host_data) > 1:
for line in host_data:
if not line:
continue
key, value = line.split(': ')
if key in _EXCLUDED_KEYS:
continue
row[key] = value
# Munge keys.
row = {key.replace('_', ' '): value for key, value in row.iteritems()}
if 'osfamily' in row:
row['os family'] = row.pop('osfamily')
if 'product name' not in row and slave_name.startswith('slave'):
row['product name'] = 'Google Compute Engine'
try:
writer.writerow(row)
except ValueError:
logging.error(row)
raise
if __name__ == '__main__':
main()

View File

@ -0,0 +1,15 @@
#!/bin/sh
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Script to SSH into a list of bots and set up their keychains for Telemetry.
# https://www.chromium.org/developers/telemetry/telemetry-mac-keychain-setup
for hostname in "$@"
do
ssh -t "$hostname" 'security unlock-keychain login.keychain
security delete-generic-password -s "Chrome Safe Storage" login.keychain
security add-generic-password -a Chrome -w "+NTclOvR4wLMgRlLIL9bHQ==" \
-s "Chrome Safe Storage" -A login.keychain'
done

View File

@ -0,0 +1,11 @@
Update binaries:
1. Download archives pre-compiled binaries.
2. Unzip archives.
3. Re-zip just the binary:
`zip new.zip node-v10.14.1-linux-x64/bin/node`
4. Use the update script:
`./dependency_manager/bin/update --config
common/node_runner/node_runner/node_binaries.json --dependency node --path
new.zip --platform linux_x86_64`
5. Mail out the automated change to `node_binaries.json` for review and CQ.

View File

@ -0,0 +1,4 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

View File

@ -0,0 +1,53 @@
#!/usr/bin/env node
'use strict';
/*
Copyright 2018 The Chromium Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
This script wraps common HTML transformations including stripping whitespace and
comments from HTML, CSS, and Javascript.
*/
const dom5 = require('dom5');
const escodegen = require('escodegen');
const espree = require('espree');
const fs = require('fs');
const nopt = require('nopt');
const args = nopt();
const filename = args.argv.remain[0];
let html = fs.readFileSync(filename).toString('utf8');
let parsedHtml = dom5.parse(html);
// First, collapse text nodes around comments (by removing comment nodes,
// re-serializing, and re-parsing) in order to prevent multiple extraneous
// newlines.
for (const node of dom5.nodeWalkAll(parsedHtml, () => true)) {
if (dom5.isCommentNode(node)) {
dom5.remove(node);
}
}
html = dom5.serialize(parsedHtml);
parsedHtml = dom5.parse(html);
// Some of these transformations are based on polyclean:
// https://github.com/googlearchive/polyclean
for (const node of dom5.nodeWalkAll(parsedHtml, () => true)) {
if (dom5.isTextNode(node)) {
dom5.setTextContent(node, dom5.getTextContent(node)
.replace(/ *\n+ */g, '\n')
.replace(/\n+/g, '\n'));
} else if (dom5.predicates.hasTagName('script')(node) &&
!dom5.predicates.hasAttr('src')(node)) {
let text = dom5.getTextContent(node);
const ast = espree.parse(text, {ecmaVersion: 2018});
text = escodegen.generate(ast, {format: {indent: {style: ''}}});
dom5.setTextContent(node, text);
} else if (dom5.predicates.hasTagName('style')(node)) {
dom5.setTextContent(node, dom5.getTextContent(node)
.replace(/[\r\n]/g, '')
.replace(/ {2,}/g, ' ')
.replace(/(^|[;,\:\{\}]) /g, '$1')
.replace(/ ($|[;,\{\}])/g, '$1'));
}
}
fs.writeFileSync(filename, dom5.serialize(parsedHtml));

View File

@ -0,0 +1,21 @@
#!/usr/bin/env node
'use strict';
/*
Copyright 2019 The Chromium Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
This script strips whitespace and comments from Javascript.
*/
const escodegen = require('escodegen');
const espree = require('espree');
const fs = require('fs');
const nopt = require('nopt');
const args = nopt();
const filename = args.argv.remain[0];
let text = fs.readFileSync(filename).toString('utf8');
const ast = espree.parse(text, {ecmaVersion: 2018});
text = escodegen.generate(ast, {format: {indent: {style: ''}}});
fs.writeFileSync(filename, text);

View File

@ -0,0 +1,53 @@
{
"config_type": "BaseConfig",
"dependencies": {
"node": {
"cloud_storage_base_folder": "binary_dependencies",
"cloud_storage_bucket": "chromium-telemetry",
"file_info": {
"linux_x86_64": {
"cloud_storage_hash": "27ad092b0ce59d2da32090a00f717f0c31e65240",
"download_path": "bin/node/node-linux64.zip",
"path_within_archive": "node-v10.14.1-linux-x64/bin/node",
"version_in_cs": "6.7.0"
},
"mac_x86_64": {
"cloud_storage_hash": "1af7c221e530165af8a6ab8ff7ccb1f2dd54036d",
"download_path": "bin/node/node-mac64.zip",
"path_within_archive": "node-v6.7.0-darwin-x64/bin/node",
"version_in_cs": "6.7.0"
},
"win_AMD64": {
"cloud_storage_hash": "23f21bfb2edf874a8b6bdb6c1acb408bc7edeced",
"download_path": "bin/node/node-win64.zip",
"path_within_archive": "node-v6.7.0-win-x64/node.exe",
"version_in_cs": "6.7.0"
}
}
},
"npm": {
"cloud_storage_base_folder": "binary_dependencies",
"cloud_storage_bucket": "chromium-telemetry",
"file_info": {
"linux_x86_64": {
"cloud_storage_hash": "5750e968975e7f5ab8cb694f5e92a34a890e129d",
"download_path": "bin/node/node-linux64.zip",
"path_within_archive": "node-v6.7.0-linux-x64/lib/node_modules/npm/bin/npm-cli.js",
"version_in_cs": "6.7.0"
},
"mac_x86_64": {
"cloud_storage_hash": "1af7c221e530165af8a6ab8ff7ccb1f2dd54036d",
"download_path": "bin/node/node-mac64.zip",
"path_within_archive": "node-v6.7.0-darwin-x64/lib/node_modules/npm/bin/npm-cli.js",
"version_in_cs": "6.7.0"
},
"win_AMD64": {
"cloud_storage_hash": "23f21bfb2edf874a8b6bdb6c1acb408bc7edeced",
"download_path": "bin/node/node-win64.zip",
"path_within_archive": "node-v6.7.0-win-x64\\node_modules\\npm\\bin\\npm-cli.js",
"version_in_cs": "6.7.0"
}
}
}
}
}

View File

@ -0,0 +1,60 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import py_utils
from py_utils import binary_manager
from py_utils import dependency_util
def _NodeBinariesConfigPath():
return os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'node_binaries.json'))
class _NodeManager(object):
def __init__(self):
self.bm = binary_manager.BinaryManager(
[_NodeBinariesConfigPath()])
self.os_name = dependency_util.GetOSNameForCurrentDesktopPlatform()
self.arch_name = dependency_util.GetArchForCurrentDesktopPlatform(
self.os_name)
self.node_path = self.bm.FetchPath('node', self.os_name, self.arch_name)
self.npm_path = self.bm.FetchPath('npm', self.os_name, self.arch_name)
self.node_initialized = False
def InitNode(self):
if self.node_initialized:
return # So we only init once per run
self.node_initialized = True
old_dir = os.path.abspath(os.curdir)
os.chdir(os.path.join(os.path.abspath(
py_utils.GetCatapultDir()), 'common', 'node_runner', 'node_runner'))
subprocess.call([self.node_path, self.npm_path, 'install'])
os.chdir(old_dir)
_NODE_MANAGER = _NodeManager()
def InitNode():
_NODE_MANAGER.InitNode()
def GetNodePath():
return _NODE_MANAGER.node_path
def GetNodeModulesPath():
_NODE_MANAGER.InitNode()
path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'node_modules'))
if sys.platform.startswith('win'):
# Escape path on Windows because it's very long and must be passed to NTFS.
path = u'\\\\?\\' + path
return path

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,64 @@
{
"name": "catapult_base",
"version": "1.0.0",
"description": "Catapult project base",
"repository": {
"type": "git",
"url": "https://github.com/catapult-project/catapult/tree/master/catapult_base"
},
"main": "index.js",
"scripts": {
"test": "cd ../../../dashboard/dashboard/spa && karma start --coverage --no-colors"
},
"author": "The Chromium Authors",
"license": "BSD-2-Clause",
"gypfile": false,
"private": true,
"dependencies": {
"dot-prop-immutable": "1.5.0",
"@chopsui/result-channel": "0.1.0",
"@chopsui/batch-iterator": "0.1.0",
"@chopsui/chops-button": "0.1.11",
"@chopsui/chops-checkbox": "0.1.11",
"@chopsui/chops-input": "0.1.11",
"@chopsui/chops-loading": "0.1.11",
"@chopsui/chops-radio": "0.1.11",
"@chopsui/chops-radio-group": "0.1.11",
"@chopsui/chops-switch": "0.1.11",
"@chopsui/chops-tab": "0.1.11",
"@chopsui/chops-tab-bar": "0.1.11",
"@chopsui/chops-textarea": "0.1.11",
"@chopsui/tsmon-client": "0.0.1",
"@chopsui/chops-header": "0.1.5",
"@chopsui/chops-signin": "0.1.5",
"@polymer/app-route": "^3.0.0",
"@polymer/iron-collapse": "^3.0.0",
"@polymer/iron-icon": "^3.0.0",
"@polymer/iron-iconset-svg": "^3.0.0",
"@polymer/polymer": "^3.0.0",
"chai": "^4.0.2",
"dom5": "^1.0.0",
"escodegen": "^1.11.0",
"eslint": "^4.0.0",
"eslint-config-google": "^0.6.0",
"eslint-plugin-html": "^4.0.0",
"espree": "^3.0.0",
"istanbul-instrumenter-loader": "^3.0.1",
"lit-element": "^2.0.0",
"karma": "^4.0.0",
"karma-chrome-launcher": "^2.2.0",
"karma-coverage": "^1.1.2",
"karma-mocha": "^1.3.0",
"karma-sinon": "^1.0.5",
"karma-sourcemap-loader": "^0.3.7",
"karma-webpack": "4.0.0-rc.6",
"mocha": "^5.2.0",
"path": "^0.12.7",
"puppeteer": "^1.10.0",
"redux": "^4.0.0",
"sinon": "^7.2.3",
"vulcanize": "^1.16.0",
"webpack": "^4.16.1",
"webpack-command": "^0.4.1"
}
}

View File

@ -0,0 +1,7 @@
py_trace_event allows low-overhead instrumentation of a multi-threaded,
multi-process application in order to study its global performance
characteristics. It uses the trace event format used in Chromium/Chrome's
about:tracing system.
Trace files generated by py_trace_event can be viewed and manipulated by
trace_event_viewer.

View File

@ -0,0 +1,35 @@
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
_CATAPULT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..'))
_PY_TRACE_EVENT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
def _RunTestsOrDie(top_level_dir):
# Need everything in one process for tracing to work.
exit_code = run_with_typ.Run(
top_level_dir, path=[_PY_TRACE_EVENT_PATH], jobs=1)
if exit_code:
sys.exit(exit_code)
def _AddToPathIfNeeded(path):
if path not in sys.path:
sys.path.insert(0, path)
if __name__ == '__main__':
_AddToPathIfNeeded(_CATAPULT_PATH)
from catapult_build import run_with_typ
_RunTestsOrDie(_PY_TRACE_EVENT_PATH)

View File

@ -0,0 +1,12 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
PY_UTILS = os.path.abspath(os.path.join(SCRIPT_DIR, '..', '..', 'py_utils'))
PROTOBUF = os.path.abspath(os.path.join(
SCRIPT_DIR, '..', 'third_party', 'protobuf'))
sys.path.append(PY_UTILS)
sys.path.append(PROTOBUF)

View File

@ -0,0 +1,12 @@
#!/usr/bin/env python
# Copyright 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from distutils.core import setup
setup(
name='py_trace_event',
packages=['trace_event_impl'],
version='0.1.0',
description='Performance tracing for python',
author='Nat Duca'
)

View File

@ -0,0 +1,295 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from py_trace_event import trace_time
r"""Instrumentation-based profiling for Python.
trace_event allows you to hand-instrument your code with areas of interest.
When enabled, trace_event logs the start and stop times of these events to a
logfile. These resulting logfiles can be viewed with either Chrome's
about:tracing UI or with the standalone trace_event_viewer available at
http://www.github.com/natduca/trace_event_viewer/
To use trace event, call trace_event_enable and start instrumenting your code:
from trace_event import *
if "--trace" in sys.argv:
trace_enable("myfile.trace")
@traced
def foo():
...
class MyFoo(object):
@traced
def bar(self):
...
trace_event records trace events to an in-memory buffer. If your application is
long running and you want to see the results of a trace before it exits, you can
call trace_flush to write any in-memory events to disk.
To help intregrating trace_event into existing codebases that dont want to add
trace_event as a dependancy, trace_event is split into an import shim
(trace_event.py) and an implementaiton (trace_event_impl/*). You can copy the
shim, trace_event.py, directly into your including codebase. If the
trace_event_impl is not found, the shim will simply noop.
trace_event is safe with regard to Python threads. Simply trace as you normally
would and each thread's timing will show up in the trace file.
Multiple processes can safely output into a single trace_event logfile. If you
fork after enabling tracing, the child process will continue outputting to the
logfile. Use of the multiprocessing module will work as well. In both cases,
however, note that disabling tracing in the parent process will not stop tracing
in the child processes.
"""
try:
import trace_event_impl
except ImportError:
trace_event_impl = None
def trace_can_enable():
"""
Returns True if a trace_event_impl was found. If false,
trace_enable will fail. Regular tracing methods, including
trace_begin and trace_end, will simply be no-ops.
"""
return trace_event_impl != None
# Default TracedMetaClass to type incase trace_event_impl is not defined.
# This is to avoid exception during import time since TracedMetaClass typically
# used in class definition scope.
TracedMetaClass = type
if trace_event_impl:
import time
# Trace file formats
JSON = trace_event_impl.JSON
JSON_WITH_METADATA = trace_event_impl.JSON_WITH_METADATA
PROTOBUF = trace_event_impl.PROTOBUF
def trace_is_enabled():
return trace_event_impl.trace_is_enabled()
def trace_enable(logfile, format=None):
return trace_event_impl.trace_enable(logfile, format)
def trace_disable():
return trace_event_impl.trace_disable()
def trace_flush():
trace_event_impl.trace_flush()
def trace_begin(name, **kwargs):
args_to_log = {key: repr(value) for key, value in kwargs.iteritems()}
trace_event_impl.add_trace_event("B", trace_time.Now(), "python", name,
args_to_log)
def trace_end(name):
trace_event_impl.add_trace_event("E", trace_time.Now(), "python", name)
def trace_set_thread_name(thread_name):
trace_event_impl.add_trace_event("M", trace_time.Now(), "__metadata",
"thread_name", {"name": thread_name})
def trace_add_benchmark_metadata(*args, **kwargs):
trace_event_impl.trace_add_benchmark_metadata(*args, **kwargs)
def trace(name, **kwargs):
return trace_event_impl.trace(name, **kwargs)
TracedMetaClass = trace_event_impl.TracedMetaClass
def traced(fn):
return trace_event_impl.traced(fn)
def clock_sync(sync_id, issue_ts=None):
'''
Add a clock sync event to the trace log.
Args:
sync_id: ID of clock sync event.
issue_ts: Time at which clock sync was issued, in microseconds.
'''
time_stamp = trace_time.Now()
args_to_log = {'sync_id': sync_id}
if issue_ts: # Issuer if issue_ts is set, else reciever.
assert issue_ts <= time_stamp
args_to_log['issue_ts'] = issue_ts
trace_event_impl.add_trace_event(
"c", time_stamp, "python", "clock_sync", args_to_log)
def is_tracing_controllable():
return trace_event_impl.is_tracing_controllable()
else:
import contextlib
# Trace file formats
JSON = None
JSON_WITH_METADATA = None
PROTOBUF = None
def trace_enable():
raise TraceException(
"Cannot enable trace_event. No trace_event_impl module found.")
def trace_disable():
pass
def trace_is_enabled():
return False
def trace_flush():
pass
def trace_begin(name, **kwargs):
del name # unused.
del kwargs # unused.
pass
def trace_end(name):
del name # unused.
pass
def trace_set_thread_name(thread_name):
del thread_name # unused.
pass
@contextlib.contextmanager
def trace(name, **kwargs):
del name # unused
del kwargs # unused
yield
def traced(fn):
return fn
def clock_sync(sync_id, issue_ts=None):
del sync_id # unused.
pass
def is_tracing_controllable():
return False
trace_enable.__doc__ = """Enables tracing.
Once enabled, the enabled bit propagates to forked processes and
multiprocessing subprocesses. Regular child processes, e.g. those created via
os.system/popen, or subprocess.Popen instances, will not get traced. You can,
however, enable tracing on those subprocess manually.
Trace files are multiprocess safe, so you can have multiple processes
outputting to the same tracelog at once.
log_file can be one of three things:
None: a logfile is opened based on sys[argv], namely
"./" + sys.argv[0] + ".json"
string: a logfile of the given name is opened.
file-like object: the fileno() is is used. The underlying file descriptor
must support fcntl.lockf() operations.
"""
trace_disable.__doc__ = """Disables tracing, if enabled.
Will not disable tracing on any existing child proceses that were forked
from this process. You must disable them yourself.
"""
trace_flush.__doc__ = """Flushes any currently-recorded trace data to disk.
trace_event records traces into an in-memory buffer for efficiency. Flushing
is only done at process exit or when this method is called.
"""
trace_is_enabled.__doc__ = """Returns whether tracing is enabled.
"""
trace_begin.__doc__ = """Records the beginning of an event of the given name.
The building block for performance tracing. A typical example is:
from trace_event import *
def something_heavy():
trace_begin("something_heavy")
trace_begin("read")
try:
lines = open().readlines()
finally:
trace_end("read")
trace_begin("parse")
try:
parse(lines)
finally:
trace_end("parse")
trace_end("something_heavy")
Note that a trace_end call must be issued for every trace_begin call. When
tracing around blocks that might throw exceptions, you should use the trace
function, or a try-finally pattern to ensure that the trace_end method is
called.
See the documentation for the @traced decorator for a simpler way to
instrument functions and methods.
"""
trace_end.__doc__ = """Records the end of an event of the given name.
See the documentation for trace_begin for more information.
Make sure to issue a trace_end for every trace_begin issued. Failure to pair
these calls will lead to bizarrely tall looking traces in the
trace_event_viewer UI.
"""
trace_set_thread_name.__doc__ = """Sets the trace's name for the current thread.
"""
trace.__doc__ = """Traces a block of code using a with statement.
Example usage:
from trace_event import *
def something_heavy(lines):
with trace("parse_lines", lines=lines):
parse(lines)
If tracing an entire function call, prefer the @traced decorator.
"""
traced.__doc__ = """
Traces the provided function, using the function name for the actual generated
event.
Prefer this decorator over the explicit trace_begin and trace_end functions
whenever you are tracing the start and stop of a function. It automatically
issues trace_begin/end events, even when the wrapped function throws.
You can also pass the function's argument names to traced, and the argument
values will be added to the trace. Example usage:
from trace_event import *
@traced("url")
def send_request(url):
urllib2.urlopen(url).read()
"""
clock_sync.__doc__ = """
Issues a clock sync marker event.
Clock sync markers are used to synchronize the clock domains of different
traces so that they can be used together. It takes a sync_id, and if it is
the issuer of a clock sync event it will also require an issue_ts. The
issue_ts is a timestamp from when the clocksync was first issued. This is used
to calculate the time difference between clock domains.
"""

View File

@ -0,0 +1,7 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from log import *
from decorators import *
from meta_class import *
import multiprocessing_shim

View File

@ -0,0 +1,87 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import inspect
import time
import functools
import log
from py_trace_event import trace_time
@contextlib.contextmanager
def trace(name, **kwargs):
category = "python"
start = trace_time.Now()
args_to_log = {key: repr(value) for key, value in kwargs.iteritems()}
log.add_trace_event("B", start, category, name, args_to_log)
try:
yield
finally:
end = trace_time.Now()
log.add_trace_event("E", end, category, name)
def traced(*args):
def get_wrapper(func):
if inspect.isgeneratorfunction(func):
raise Exception("Can not trace generators.")
category = "python"
arg_spec = inspect.getargspec(func)
is_method = arg_spec.args and arg_spec.args[0] == "self"
def arg_spec_tuple(name):
arg_index = arg_spec.args.index(name)
defaults_length = len(arg_spec.defaults) if arg_spec.defaults else 0
default_index = arg_index + defaults_length - len(arg_spec.args)
if default_index >= 0:
default = arg_spec.defaults[default_index]
else:
default = None
return (name, arg_index, default)
args_to_log = map(arg_spec_tuple, arg_names)
@functools.wraps(func)
def traced_function(*args, **kwargs):
# Everything outside traced_function is done at decoration-time.
# Everything inside traced_function is done at run-time and must be fast.
if not log._enabled: # This check must be at run-time.
return func(*args, **kwargs)
def get_arg_value(name, index, default):
if name in kwargs:
return kwargs[name]
elif index < len(args):
return args[index]
else:
return default
if is_method:
name = "%s.%s" % (args[0].__class__.__name__, func.__name__)
else:
name = "%s.%s" % (func.__module__, func.__name__)
# Be sure to repr before calling func. Argument values may change.
arg_values = {
name: repr(get_arg_value(name, index, default))
for name, index, default in args_to_log}
start = trace_time.Now()
log.add_trace_event("B", start, category, name, arg_values)
try:
return func(*args, **kwargs)
finally:
end = trace_time.Now()
log.add_trace_event("E", end, category, name)
return traced_function
no_decorator_arguments = len(args) == 1 and callable(args[0])
if no_decorator_arguments:
arg_names = ()
return get_wrapper(args[0])
else:
arg_names = args
return get_wrapper

View File

@ -0,0 +1,63 @@
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import decorators
import logging
import unittest
from trace_test import TraceTest
#from .trace_test import TraceTest
def generator():
yield 1
yield 2
class DecoratorTests(unittest.TestCase):
def test_tracing_object_fails(self):
self.assertRaises(Exception, lambda: decorators.trace(1))
self.assertRaises(Exception, lambda: decorators.trace(""))
self.assertRaises(Exception, lambda: decorators.trace([]))
def test_tracing_generators_fail(self):
self.assertRaises(Exception, lambda: decorators.trace(generator))
class ClassToTest(object):
@decorators.traced
def method1(self):
return 1
@decorators.traced
def method2(self):
return 1
@decorators.traced
def traced_func():
return 1
class DecoratorTests(TraceTest):
def _get_decorated_method_name(self, f):
res = self.go(f)
events = res.findEventsOnThread(res.findThreadIds()[0])
# Sanity checks.
self.assertEquals(2, len(events))
self.assertEquals(events[0]["name"], events[1]["name"])
return events[1]["name"]
def test_func_names_work(self):
expected_method_name = __name__ + '.traced_func'
self.assertEquals(expected_method_name,
self._get_decorated_method_name(traced_func))
def test_method_names_work(self):
ctt = ClassToTest()
self.assertEquals('ClassToTest.method1',
self._get_decorated_method_name(ctt.method1))
self.assertEquals('ClassToTest.method2',
self._get_decorated_method_name(ctt.method2))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)

View File

@ -0,0 +1,364 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import atexit
import json
import os
import sys
import time
import threading
import multiprocessing
import multiprocessing_shim
from py_trace_event.trace_event_impl import perfetto_trace_writer
from py_trace_event import trace_time
from py_utils import lock
# Trace file formats:
# Legacy format: json list of events.
# Events can be written from multiple processes, but since no process
# can be sure that it is the last one, nobody writes the closing ']'.
# So the resulting file is not technically correct json.
JSON = "json"
# Full json with events and metadata.
# This format produces correct json ready to feed into TraceDataBuilder.
# Note that it is the responsibility of the user of py_trace_event to make sure
# that trace_disable() is called after all child processes have finished.
JSON_WITH_METADATA = "json_with_metadata"
# Perfetto protobuf trace format.
PROTOBUF = "protobuf"
_lock = threading.Lock()
_enabled = False
_log_file = None
_cur_events = [] # events that have yet to be buffered
_benchmark_metadata = {}
_tls = threading.local() # tls used to detect forking/etc
_atexit_regsitered_for_pid = None
_control_allowed = True
_original_multiprocessing_process = multiprocessing.Process
class TraceException(Exception):
pass
def _note(msg, *args):
pass
# print "%i: %s" % (os.getpid(), msg)
def _locked(fn):
def locked_fn(*args,**kwargs):
_lock.acquire()
try:
ret = fn(*args,**kwargs)
finally:
_lock.release()
return ret
return locked_fn
def _disallow_tracing_control():
global _control_allowed
_control_allowed = False
def trace_enable(log_file=None, format=None):
""" Enable tracing.
Args:
log_file: file to write trace into. Can be a file-like object,
a name of file, or None. If None, file name is constructed
from executable name.
format: trace file format. See trace_event.py for available options.
"""
if format is None:
format = JSON
_trace_enable(log_file, format)
def _write_header():
tid = threading.current_thread().ident
if not tid:
tid = os.getpid()
if _format == PROTOBUF:
tid = threading.current_thread().ident
perfetto_trace_writer.write_thread_descriptor_event(
output=_log_file,
pid=os.getpid(),
tid=tid,
ts=trace_time.Now(),
)
perfetto_trace_writer.write_event(
output=_log_file,
ph="M",
category="process_argv",
name="process_argv",
ts=trace_time.Now(),
args=sys.argv,
tid=tid,
)
else:
if _format == JSON:
_log_file.write('[')
elif _format == JSON_WITH_METADATA:
_log_file.write('{"traceEvents": [\n')
else:
raise TraceException("Unknown format: %s" % _format)
json.dump({
"ph": "M",
"category": "process_argv",
"pid": os.getpid(),
"tid": threading.current_thread().ident,
"ts": trace_time.Now(),
"name": "process_argv",
"args": {"argv": sys.argv},
}, _log_file)
_log_file.write('\n')
@_locked
def _trace_enable(log_file=None, format=None):
global _format
_format = format
global _enabled
if _enabled:
raise TraceException("Already enabled")
if not _control_allowed:
raise TraceException("Tracing control not allowed in child processes.")
_enabled = True
global _log_file
if log_file == None:
if sys.argv[0] == '':
n = 'trace_event'
else:
n = sys.argv[0]
if _format == PROTOBUF:
log_file = open("%s.pb" % n, "ab", False)
else:
log_file = open("%s.json" % n, "ab", False)
elif isinstance(log_file, basestring):
log_file = open("%s" % log_file, "ab", False)
elif not hasattr(log_file, 'fileno'):
raise TraceException(
"Log file must be None, a string, or file-like object with a fileno()")
_note("trace_event: tracelog name is %s" % log_file)
_log_file = log_file
with lock.FileLock(_log_file, lock.LOCK_EX):
_log_file.seek(0, os.SEEK_END)
lastpos = _log_file.tell()
creator = lastpos == 0
if creator:
_note("trace_event: Opened new tracelog, lastpos=%i", lastpos)
_write_header()
else:
_note("trace_event: Opened existing tracelog")
_log_file.flush()
# Monkeypatch in our process replacement for the multiprocessing.Process class
if multiprocessing.Process != multiprocessing_shim.ProcessShim:
multiprocessing.Process = multiprocessing_shim.ProcessShim
@_locked
def trace_flush():
if _enabled:
_flush()
@_locked
def trace_disable():
global _enabled
if not _control_allowed:
raise TraceException("Tracing control not allowed in child processes.")
if not _enabled:
return
_enabled = False
_flush(close=True)
multiprocessing.Process = _original_multiprocessing_process
def _write_cur_events():
if _format == PROTOBUF:
for e in _cur_events:
perfetto_trace_writer.write_event(
output=_log_file,
ph=e["ph"],
category=e["category"],
name=e["name"],
ts=e["ts"],
args=e["args"],
tid=threading.current_thread().ident,
)
elif _format in (JSON, JSON_WITH_METADATA):
for e in _cur_events:
_log_file.write(",\n")
json.dump(e, _log_file)
else:
raise TraceException("Unknown format: %s" % _format)
del _cur_events[:]
def _write_footer():
if _format in [JSON, PROTOBUF]:
# In JSON format we might not be the only process writing to this logfile.
# So, we will simply close the file rather than writing the trailing ] that
# it technically requires. The trace viewer understands this and
# will insert a trailing ] during loading.
# In PROTOBUF format there's no need for a footer. The metadata has already
# been written in a special proto message.
pass
elif _format == JSON_WITH_METADATA:
_log_file.write('],\n"metadata": ')
json.dump(_benchmark_metadata, _log_file)
_log_file.write('}')
else:
raise TraceException("Unknown format: %s" % _format)
def _flush(close=False):
global _log_file
with lock.FileLock(_log_file, lock.LOCK_EX):
_log_file.seek(0, os.SEEK_END)
if len(_cur_events):
_write_cur_events()
if close:
_write_footer()
_log_file.flush()
if close:
_note("trace_event: Closed")
_log_file.close()
_log_file = None
else:
_note("trace_event: Flushed")
@_locked
def trace_is_enabled():
return _enabled
@_locked
def add_trace_event(ph, ts, category, name, args=None):
global _enabled
if not _enabled:
return
if not hasattr(_tls, 'pid') or _tls.pid != os.getpid():
_tls.pid = os.getpid()
global _atexit_regsitered_for_pid
if _tls.pid != _atexit_regsitered_for_pid:
_atexit_regsitered_for_pid = _tls.pid
atexit.register(_trace_disable_atexit)
_tls.pid = os.getpid()
del _cur_events[:] # we forked, clear the event buffer!
tid = threading.current_thread().ident
if not tid:
tid = os.getpid()
_tls.tid = tid
_cur_events.append({
"ph": ph,
"category": category,
"pid": _tls.pid,
"tid": _tls.tid,
"ts": ts,
"name": name,
"args": args or {},
});
def trace_begin(name, args=None):
add_trace_event("B", trace_time.Now(), "python", name, args)
def trace_end(name, args=None):
add_trace_event("E", trace_time.Now(), "python", name, args)
def trace_set_thread_name(thread_name):
add_trace_event("M", trace_time.Now(), "__metadata", "thread_name",
{"name": thread_name})
def trace_add_benchmark_metadata(
benchmark_start_time_us,
story_run_time_us,
benchmark_name,
benchmark_description,
story_name,
story_tags,
story_run_index,
label=None,
had_failures=None,
):
""" Add benchmark metadata to be written to trace file.
Args:
benchmark_start_time_us: Benchmark start time in microseconds.
story_run_time_us: Story start time in microseconds.
benchmark_name: Name of the benchmark.
benchmark_description: Description of the benchmark.
story_name: Name of the story.
story_tags: List of story tags.
story_run_index: Index of the story run.
label: Optional label.
had_failures: Whether this story run failed.
"""
global _benchmark_metadata
if _format == PROTOBUF:
# Write metadata immediately.
perfetto_trace_writer.write_metadata(
output=_log_file,
benchmark_start_time_us=benchmark_start_time_us,
story_run_time_us=story_run_time_us,
benchmark_name=benchmark_name,
benchmark_description=benchmark_description,
story_name=story_name,
story_tags=story_tags,
story_run_index=story_run_index,
label=label,
had_failures=had_failures,
)
elif _format == JSON_WITH_METADATA:
# Store metadata to write it in the footer.
telemetry_metadata_for_json = {
"benchmarkStart": benchmark_start_time_us / 1000.0,
"traceStart": story_run_time_us / 1000.0,
"benchmarks": [benchmark_name],
"benchmarkDescriptions": [benchmark_description],
"stories": [story_name],
"storyTags": story_tags,
"storysetRepeats": [story_run_index],
}
if label:
telemetry_metadata_for_json["labels"] = [label]
if had_failures:
telemetry_metadata_for_json["hadFailures"] = [had_failures]
_benchmark_metadata = {
# TODO(crbug.com/948633): For right now, we use "TELEMETRY" as the
# clock domain to guarantee that Telemetry is given its own clock
# domain. Telemetry isn't really a clock domain, though: it's a
# system that USES a clock domain like LINUX_CLOCK_MONOTONIC or
# WIN_QPC. However, there's a chance that a Telemetry controller
# running on Linux (using LINUX_CLOCK_MONOTONIC) is interacting
# with an Android phone (also using LINUX_CLOCK_MONOTONIC, but
# on a different machine). The current logic collapses clock
# domains based solely on the clock domain string, but we really
# should to collapse based on some (device ID, clock domain ID)
# tuple. Giving Telemetry its own clock domain is a work-around
# for this.
"clock-domain": "TELEMETRY",
"telemetry": telemetry_metadata_for_json,
}
elif _format == JSON:
raise TraceException("Can't write metadata in JSON format")
else:
raise TraceException("Unknown format: %s" % _format)
def _trace_disable_atexit():
trace_disable()
def is_tracing_controllable():
global _control_allowed
return _control_allowed

View File

@ -0,0 +1,46 @@
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
import unittest
from log import *
from parsed_trace_events import *
from py_utils import tempfile_ext
class LogIOTest(unittest.TestCase):
def test_enable_with_file(self):
with tempfile_ext.TemporaryFileName() as filename:
trace_enable(open(filename, 'w+'))
trace_disable()
e = ParsedTraceEvents(trace_filename=filename)
self.assertTrue(len(e) > 0)
def test_enable_with_filename(self):
with tempfile_ext.TemporaryFileName() as filename:
trace_enable(filename)
trace_disable()
e = ParsedTraceEvents(trace_filename=filename)
self.assertTrue(len(e) > 0)
def test_enable_with_implicit_filename(self):
expected_filename = "%s.json" % sys.argv[0]
def do_work():
trace_enable()
trace_disable()
e = ParsedTraceEvents(trace_filename=expected_filename)
self.assertTrue(len(e) > 0)
try:
do_work()
finally:
if os.path.exists(expected_filename):
os.unlink(expected_filename)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)

View File

@ -0,0 +1,17 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import types
from py_trace_event.trace_event_impl import decorators
class TracedMetaClass(type):
def __new__(cls, name, bases, attrs):
for attr_name, attr_value in attrs.iteritems():
if (not attr_name.startswith('_') and
isinstance(attr_value, types.FunctionType)):
attrs[attr_name] = decorators.traced(attr_value)
return super(TracedMetaClass, cls).__new__(cls, name, bases, attrs)

View File

@ -0,0 +1,88 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import multiprocessing
import log
import time
_RealProcess = multiprocessing.Process
__all__ = []
class ProcessSubclass(_RealProcess):
def __init__(self, shim, *args, **kwards):
_RealProcess.__init__(self, *args, **kwards)
self._shim = shim
def run(self,*args,**kwargs):
log._disallow_tracing_control()
try:
r = _RealProcess.run(self, *args, **kwargs)
finally:
if log.trace_is_enabled():
log.trace_flush() # todo, reduce need for this...
return r
class ProcessShim():
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
self._proc = ProcessSubclass(self, group, target, name, args, kwargs)
# hint to testing code that the shimming worked
self._shimmed_by_trace_event = True
def run(self):
self._proc.run()
def start(self):
self._proc.start()
def terminate(self):
if log.trace_is_enabled():
# give the flush a chance to finish --> TODO: find some other way.
time.sleep(0.25)
self._proc.terminate()
def join(self, timeout=None):
self._proc.join( timeout)
def is_alive(self):
return self._proc.is_alive()
@property
def name(self):
return self._proc.name
@name.setter
def name(self, name):
self._proc.name = name
@property
def daemon(self):
return self._proc.daemon
@daemon.setter
def daemon(self, daemonic):
self._proc.daemon = daemonic
@property
def authkey(self):
return self._proc._authkey
@authkey.setter
def authkey(self, authkey):
self._proc.authkey = AuthenticationString(authkey)
@property
def exitcode(self):
return self._proc.exitcode
@property
def ident(self):
return self._proc.ident
@property
def pid(self):
return self._proc.pid
def __repr__(self):
return self._proc.__repr__()

View File

@ -0,0 +1,98 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import math
import json
class ParsedTraceEvents(object):
def __init__(self, events = None, trace_filename = None):
"""
Utility class for filtering and manipulating trace data.
events -- An iterable object containing trace events
trace_filename -- A file object that contains a complete trace.
"""
if trace_filename and events:
raise Exception("Provide either a trace file or event list")
if not trace_filename and events == None:
raise Exception("Provide either a trace file or event list")
if trace_filename:
f = open(trace_filename, 'r')
t = f.read()
f.close()
# If the event data begins with a [, then we know it should end with a ].
# The reason we check for this is because some tracing implementations
# cannot guarantee that a ']' gets written to the trace file. So, we are
# forgiving and if this is obviously the case, we fix it up before
# throwing the string at JSON.parse.
if t[0] == '[':
n = len(t);
if t[n - 1] != ']' and t[n - 1] != '\n':
t = t + ']'
elif t[n - 2] != ']' and t[n - 1] == '\n':
t = t + ']'
elif t[n - 3] != ']' and t[n - 2] == '\r' and t[n - 1] == '\n':
t = t + ']'
try:
events = json.loads(t)
except ValueError:
raise Exception("Corrupt trace, did not parse. Value: %s" % t)
if 'traceEvents' in events:
events = events['traceEvents']
if not hasattr(events, '__iter__'):
raise Exception, 'events must be iteraable.'
self.events = events
self.pids = None
self.tids = None
def __len__(self):
return len(self.events)
def __getitem__(self, i):
return self.events[i]
def __setitem__(self, i, v):
self.events[i] = v
def __repr__(self):
return "[%s]" % ",\n ".join([repr(e) for e in self.events])
def findProcessIds(self):
if self.pids:
return self.pids
pids = set()
for e in self.events:
if "pid" in e and e["pid"]:
pids.add(e["pid"])
self.pids = list(pids)
return self.pids
def findThreadIds(self):
if self.tids:
return self.tids
tids = set()
for e in self.events:
if "tid" in e and e["tid"]:
tids.add(e["tid"])
self.tids = list(tids)
return self.tids
def findEventsOnProcess(self, pid):
return ParsedTraceEvents([e for e in self.events if e["pid"] == pid])
def findEventsOnThread(self, tid):
return ParsedTraceEvents(
[e for e in self.events if e["ph"] != "M" and e["tid"] == tid])
def findByPhase(self, ph):
return ParsedTraceEvents([e for e in self.events if e["ph"] == ph])
def findByName(self, n):
return ParsedTraceEvents([e for e in self.events if e["name"] == n])

View File

@ -0,0 +1,222 @@
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Classes representing perfetto trace protobuf messages.
This module makes use of neither python-protobuf library nor python classes
compiled from .proto definitions, because currently there's no way to
deploy those to all the places where telemetry is run.
TODO(crbug.com/944078): Remove this module after the python-protobuf library
is deployed to all the bots.
Definitions of perfetto messages can be found here:
https://android.googlesource.com/platform/external/perfetto/+/refs/heads/master/protos/perfetto/trace/
"""
import encoder
import wire_format
class TracePacket(object):
def __init__(self):
self.interned_data = None
self.thread_descriptor = None
self.incremental_state_cleared = None
self.track_event = None
self.trusted_packet_sequence_id = None
self.chrome_benchmark_metadata = None
def encode(self):
parts = []
if self.trusted_packet_sequence_id is not None:
writer = encoder.UInt32Encoder(10, False, False)
writer(parts.append, self.trusted_packet_sequence_id)
if self.track_event is not None:
tag = encoder.TagBytes(11, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = self.track_event.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
if self.interned_data is not None:
tag = encoder.TagBytes(12, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = self.interned_data.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
if self.incremental_state_cleared is not None:
writer = encoder.BoolEncoder(41, False, False)
writer(parts.append, self.incremental_state_cleared)
if self.thread_descriptor is not None:
tag = encoder.TagBytes(44, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = self.thread_descriptor.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
if self.chrome_benchmark_metadata is not None:
tag = encoder.TagBytes(48, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = self.chrome_benchmark_metadata.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
return b"".join(parts)
class InternedData(object):
def __init__(self):
self.event_category = None
self.legacy_event_name = None
def encode(self):
parts = []
if self.event_category is not None:
tag = encoder.TagBytes(1, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = self.event_category.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
if self.legacy_event_name is not None:
tag = encoder.TagBytes(2, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = self.legacy_event_name.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
return b"".join(parts)
class EventCategory(object):
def __init__(self):
self.iid = None
self.name = None
def encode(self):
if (self.iid is None or self.name is None):
raise RuntimeError("Missing mandatory fields.")
parts = []
writer = encoder.UInt32Encoder(1, False, False)
writer(parts.append, self.iid)
writer = encoder.StringEncoder(2, False, False)
writer(parts.append, self.name)
return b"".join(parts)
LegacyEventName = EventCategory
class ThreadDescriptor(object):
def __init__(self):
self.pid = None
self.tid = None
self.reference_timestamp_us = None
def encode(self):
if (self.pid is None or self.tid is None or
self.reference_timestamp_us is None):
raise RuntimeError("Missing mandatory fields.")
parts = []
writer = encoder.UInt32Encoder(1, False, False)
writer(parts.append, self.pid)
writer = encoder.UInt32Encoder(2, False, False)
writer(parts.append, self.tid)
writer = encoder.Int64Encoder(6, False, False)
writer(parts.append, self.reference_timestamp_us)
return b"".join(parts)
class TrackEvent(object):
def __init__(self):
self.timestamp_absolute_us = None
self.timestamp_delta_us = None
self.legacy_event = None
self.category_iids = None
def encode(self):
parts = []
if self.timestamp_delta_us is not None:
writer = encoder.Int64Encoder(1, False, False)
writer(parts.append, self.timestamp_delta_us)
if self.category_iids is not None:
writer = encoder.UInt32Encoder(3, is_repeated=True, is_packed=False)
writer(parts.append, self.category_iids)
if self.legacy_event is not None:
tag = encoder.TagBytes(6, wire_format.WIRETYPE_LENGTH_DELIMITED)
data = self.legacy_event.encode()
length = encoder._VarintBytes(len(data))
parts += [tag, length, data]
if self.timestamp_absolute_us is not None:
writer = encoder.Int64Encoder(16, False, False)
writer(parts.append, self.timestamp_absolute_us)
return b"".join(parts)
class LegacyEvent(object):
def __init__(self):
self.phase = None
self.name_iid = None
def encode(self):
parts = []
if self.name_iid is not None:
writer = encoder.UInt32Encoder(1, False, False)
writer(parts.append, self.name_iid)
if self.phase is not None:
writer = encoder.Int32Encoder(2, False, False)
writer(parts.append, self.phase)
return b"".join(parts)
class ChromeBenchmarkMetadata(object):
def __init__(self):
self.benchmark_start_time_us = None
self.story_run_time_us = None
self.benchmark_name = None
self.benchmark_description = None
self.story_name = None
self.story_tags = None
self.story_run_index = None
self.label = None
self.had_failures = None
def encode(self):
parts = []
if self.benchmark_start_time_us is not None:
writer = encoder.Int64Encoder(1, False, False)
writer(parts.append, self.benchmark_start_time_us)
if self.story_run_time_us is not None:
writer = encoder.Int64Encoder(2, False, False)
writer(parts.append, self.story_run_time_us)
if self.benchmark_name is not None:
writer = encoder.StringEncoder(3, False, False)
writer(parts.append, self.benchmark_name)
if self.benchmark_description is not None:
writer = encoder.StringEncoder(4, False, False)
writer(parts.append, self.benchmark_description)
if self.label is not None:
writer = encoder.StringEncoder(5, False, False)
writer(parts.append, self.label)
if self.story_name is not None:
writer = encoder.StringEncoder(6, False, False)
writer(parts.append, self.story_name)
if self.story_tags is not None:
writer = encoder.StringEncoder(7, is_repeated=True, is_packed=False)
writer(parts.append, self.story_tags)
if self.story_run_index is not None:
writer = encoder.Int32Encoder(8, False, False)
writer(parts.append, self.story_run_index)
if self.had_failures is not None:
writer = encoder.BoolEncoder(9, False, False)
writer(parts.append, self.had_failures)
return b"".join(parts)
def write_trace_packet(output, trace_packet):
tag = encoder.TagBytes(1, wire_format.WIRETYPE_LENGTH_DELIMITED)
output.write(tag)
binary_data = trace_packet.encode()
encoder._EncodeVarint(output.write, len(binary_data))
output.write(binary_data)

View File

@ -0,0 +1,166 @@
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Functions to write trace data in perfetto protobuf format.
"""
import collections
import perfetto_proto_classes as proto
# Dicts of strings for interning.
# Note that each thread has its own interning index.
_interned_categories_by_tid = collections.defaultdict(dict)
_interned_event_names_by_tid = collections.defaultdict(dict)
# Trusted sequence ids from telemetry should not overlap with
# trusted sequence ids from other trace producers. Chrome assigns
# sequence ids incrementally starting from 1 and we expect all its ids
# to be well below 10000. Starting from 2^20 will give us enough
# confidence that it will not overlap.
_next_sequence_id = 1<<20
_sequence_ids = {}
# Timestamp of the last event from each thread. Used for delta-encoding
# of timestamps.
_last_timestamps = {}
def _get_sequence_id(tid):
global _sequence_ids
global _next_sequence_id
if tid not in _sequence_ids:
_sequence_ids[tid] = _next_sequence_id
_next_sequence_id += 1
return _sequence_ids[tid]
def _intern_category(category, trace_packet, tid):
global _interned_categories_by_tid
categories = _interned_categories_by_tid[tid]
if category not in categories:
# note that interning indices start from 1
categories[category] = len(categories) + 1
if trace_packet.interned_data is None:
trace_packet.interned_data = proto.InternedData()
trace_packet.interned_data.event_category = proto.EventCategory()
trace_packet.interned_data.event_category.iid = categories[category]
trace_packet.interned_data.event_category.name = category
return categories[category]
def _intern_event_name(event_name, trace_packet, tid):
global _interned_event_names_by_tid
event_names = _interned_event_names_by_tid[tid]
if event_name not in event_names:
# note that interning indices start from 1
event_names[event_name] = len(event_names) + 1
if trace_packet.interned_data is None:
trace_packet.interned_data = proto.InternedData()
trace_packet.interned_data.legacy_event_name = proto.LegacyEventName()
trace_packet.interned_data.legacy_event_name.iid = event_names[event_name]
trace_packet.interned_data.legacy_event_name.name = event_name
return event_names[event_name]
def write_thread_descriptor_event(output, pid, tid, ts):
""" Write the first event in a sequence.
Call this function before writing any other events.
Note that this function is NOT thread-safe.
Args:
output: a file-like object to write events into.
pid: process ID.
tid: thread ID.
ts: timestamp in microseconds.
"""
global _last_timestamps
ts_us = int(ts)
_last_timestamps[tid] = ts_us
thread_descriptor_packet = proto.TracePacket()
thread_descriptor_packet.trusted_packet_sequence_id = _get_sequence_id(tid)
thread_descriptor_packet.thread_descriptor = proto.ThreadDescriptor()
thread_descriptor_packet.thread_descriptor.pid = pid
# Thread ID from threading module doesn't fit into int32.
# But we don't need the exact thread ID, just some number to
# distinguish one thread from another. We assume that the last 31 bits
# will do for that purpose.
thread_descriptor_packet.thread_descriptor.tid = tid & 0x7FFFFFFF
thread_descriptor_packet.thread_descriptor.reference_timestamp_us = ts_us
thread_descriptor_packet.incremental_state_cleared = True;
proto.write_trace_packet(output, thread_descriptor_packet)
def write_event(output, ph, category, name, ts, args, tid):
""" Write a trace event.
Note that this function is NOT thread-safe.
Args:
output: a file-like object to write events into.
ph: phase of event.
category: category of event.
name: event name.
ts: timestamp in microseconds.
args: this argument is currently ignored.
tid: thread ID.
"""
del args # TODO(khokhlov): Encode args as DebugAnnotations.
global _last_timestamps
ts_us = int(ts)
delta_ts = ts_us - _last_timestamps[tid]
packet = proto.TracePacket()
packet.trusted_packet_sequence_id = _get_sequence_id(tid)
packet.track_event = proto.TrackEvent()
if delta_ts >= 0:
packet.track_event.timestamp_delta_us = delta_ts
_last_timestamps[tid] = ts_us
else:
packet.track_event.timestamp_absolute_us = ts_us
packet.track_event.category_iids = [_intern_category(category, packet, tid)]
legacy_event = proto.LegacyEvent()
legacy_event.phase = ord(ph)
legacy_event.name_iid = _intern_event_name(name, packet, tid)
packet.track_event.legacy_event = legacy_event
proto.write_trace_packet(output, packet)
def write_metadata(
output,
benchmark_start_time_us,
story_run_time_us,
benchmark_name,
benchmark_description,
story_name,
story_tags,
story_run_index,
label=None,
had_failures=None,
):
metadata = proto.ChromeBenchmarkMetadata()
metadata.benchmark_start_time_us = int(benchmark_start_time_us)
metadata.story_run_time_us = int(story_run_time_us)
metadata.benchmark_name = benchmark_name
metadata.benchmark_description = benchmark_description
metadata.story_name = story_name
metadata.story_tags = list(story_tags)
metadata.story_run_index = int(story_run_index)
if label is not None:
metadata.label = label
if had_failures is not None:
metadata.had_failures = had_failures
packet = proto.TracePacket()
packet.chrome_benchmark_metadata = metadata
proto.write_trace_packet(output, packet)

View File

@ -0,0 +1,80 @@
#!/usr/bin/env python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import StringIO
from py_trace_event.trace_event_impl import perfetto_trace_writer
class PerfettoTraceWriterTest(unittest.TestCase):
""" Tests functions that write perfetto protobufs.
TODO(crbug.com/944078): Switch to using python-protobuf library
and implement proper protobuf parsing then.
"""
def testWriteThreadDescriptorEvent(self):
result = StringIO.StringIO()
perfetto_trace_writer.write_thread_descriptor_event(
output=result,
pid=1,
tid=2,
ts=1556716807306000,
)
expected_output = (
'\n\x17P\x80\x80@\xc8\x02\x01\xe2\x02\r\x08\x01\x10'
'\x020\x90\xf6\xc2\x82\xb6\xfa\xe1\x02'
)
self.assertEqual(expected_output, result.getvalue())
def testWriteTwoEvents(self):
result = StringIO.StringIO()
perfetto_trace_writer.write_thread_descriptor_event(
output=result,
pid=1,
tid=2,
ts=1556716807306000,
)
perfetto_trace_writer.write_event(
output=result,
ph="M",
category="category",
name="event_name",
ts=1556716807406000,
args={},
tid=2,
)
expected_output = (
'\n\x17P\x80\x80@\xc8\x02\x01\xe2\x02\r\x08\x01\x10'
'\x020\x90\xf6\xc2\x82\xb6\xfa\xe1\x02\n2P\x80\x80@Z\x0c\x08'
'\xa0\x8d\x06\x18\x012\x04\x08\x01\x10Mb\x1e\n\x0c\x08\x01'
'\x12\x08category\x12\x0e\x08\x01\x12\nevent_name'
)
self.assertEqual(expected_output, result.getvalue())
def testWriteMetadata(self):
result = StringIO.StringIO()
perfetto_trace_writer.write_metadata(
output=result,
benchmark_start_time_us=1556716807306000,
story_run_time_us=1556716807406000,
benchmark_name="benchmark",
benchmark_description="description",
story_name="story",
story_tags=["foo", "bar"],
story_run_index=0,
label="label",
had_failures=False,
)
expected_output = (
'\nI\x82\x03F\x08\x90\xf6\xc2\x82\xb6\xfa\xe1'
'\x02\x10\xb0\x83\xc9\x82\xb6\xfa\xe1\x02\x1a\tbenchmark"'
'\x0bdescription*\x05label2\x05story:\x03foo:\x03bar@\x00H\x00'
)
self.assertEqual(expected_output, result.getvalue())

View File

@ -0,0 +1,48 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
#from .log import *
#from .parsed_trace_events import *
from log import *
from parsed_trace_events import *
from py_utils import tempfile_ext
class TraceTest(unittest.TestCase):
def __init__(self, *args):
"""
Infrastructure for running tests of the tracing system.
Does not actually run any tests. Look at subclasses for those.
"""
unittest.TestCase.__init__(self, *args)
self._file = None
def go(self, cb):
"""
Enables tracing, runs the provided callback, and if successful, returns a
TraceEvents object with the results.
"""
with tempfile_ext.TemporaryFileName() as filename:
self._file = open(filename, 'a+')
trace_enable(self._file)
try:
cb()
finally:
trace_disable()
e = ParsedTraceEvents(trace_filename=self._file.name)
self._file.close()
self._file = None
return e
@property
def trace_filename(self):
return self._file.name
def tearDown(self):
if trace_is_enabled():
trace_disable()
if self._file:
self._file.close()

View File

@ -0,0 +1,518 @@
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import json
import logging
import math
import multiprocessing
import os
import time
import unittest
import sys
from py_trace_event import trace_event
from py_trace_event import trace_time
from py_trace_event.trace_event_impl import log
from py_trace_event.trace_event_impl import multiprocessing_shim
from py_utils import tempfile_ext
class TraceEventTests(unittest.TestCase):
@contextlib.contextmanager
def _test_trace(self, disable=True, format=None):
with tempfile_ext.TemporaryFileName() as filename:
self._log_path = filename
try:
trace_event.trace_enable(self._log_path, format=format)
yield
finally:
if disable:
trace_event.trace_disable()
def testNoImpl(self):
orig_impl = trace_event.trace_event_impl
try:
trace_event.trace_event_impl = None
self.assertFalse(trace_event.trace_can_enable())
finally:
trace_event.trace_event_impl = orig_impl
def testImpl(self):
self.assertTrue(trace_event.trace_can_enable())
def testIsEnabledFalse(self):
self.assertFalse(trace_event.trace_is_enabled())
def testIsEnabledTrue(self):
with self._test_trace():
self.assertTrue(trace_event.trace_is_enabled())
def testEnable(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 1)
self.assertTrue(trace_event.trace_is_enabled())
log_output = log_output.pop()
self.assertEquals(log_output['category'], 'process_argv')
self.assertEquals(log_output['name'], 'process_argv')
self.assertTrue(log_output['args']['argv'])
self.assertEquals(log_output['ph'], 'M')
def testDoubleEnable(self):
try:
with self._test_trace():
with self._test_trace():
pass
except log.TraceException:
return
assert False
def testDisable(self):
_old_multiprocessing_process = multiprocessing.Process
with self._test_trace(disable=False):
with open(self._log_path, 'r') as f:
self.assertTrue(trace_event.trace_is_enabled())
self.assertEqual(
multiprocessing.Process, multiprocessing_shim.ProcessShim)
trace_event.trace_disable()
self.assertEqual(
multiprocessing.Process, _old_multiprocessing_process)
self.assertEquals(len(json.loads(f.read() + ']')), 1)
self.assertFalse(trace_event.trace_is_enabled())
def testDoubleDisable(self):
with self._test_trace():
pass
trace_event.trace_disable()
def testFlushChanges(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
trace_event.clock_sync('1')
self.assertEquals(len(json.loads(f.read() + ']')), 1)
f.seek(0)
trace_event.trace_flush()
self.assertEquals(len(json.loads(f.read() + ']')), 2)
def testFlushNoChanges(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
self.assertEquals(len(json.loads(f.read() + ']')),1)
f.seek(0)
trace_event.trace_flush()
self.assertEquals(len(json.loads(f.read() + ']')), 1)
def testDoubleFlush(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
trace_event.clock_sync('1')
self.assertEquals(len(json.loads(f.read() + ']')), 1)
f.seek(0)
trace_event.trace_flush()
trace_event.trace_flush()
self.assertEquals(len(json.loads(f.read() + ']')), 2)
def testTraceBegin(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
trace_event.trace_begin('test_event', this='that')
trace_event.trace_flush()
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 2)
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'process_argv')
self.assertEquals(current_entry['name'], 'process_argv')
self.assertTrue( current_entry['args']['argv'])
self.assertEquals( current_entry['ph'], 'M')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], 'test_event')
self.assertEquals(current_entry['args']['this'], '\'that\'')
self.assertEquals(current_entry['ph'], 'B')
def testTraceEnd(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
trace_event.trace_end('test_event')
trace_event.trace_flush()
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 2)
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'process_argv')
self.assertEquals(current_entry['name'], 'process_argv')
self.assertTrue(current_entry['args']['argv'])
self.assertEquals(current_entry['ph'], 'M')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], 'test_event')
self.assertEquals(current_entry['args'], {})
self.assertEquals(current_entry['ph'], 'E')
def testTrace(self):
with self._test_trace():
with trace_event.trace('test_event', this='that'):
pass
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 3)
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'process_argv')
self.assertEquals(current_entry['name'], 'process_argv')
self.assertTrue(current_entry['args']['argv'])
self.assertEquals(current_entry['ph'], 'M')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], 'test_event')
self.assertEquals(current_entry['args']['this'], '\'that\'')
self.assertEquals(current_entry['ph'], 'B')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], 'test_event')
self.assertEquals(current_entry['args'], {})
self.assertEquals(current_entry['ph'], 'E')
def testTracedDecorator(self):
@trace_event.traced("this")
def test_decorator(this="that"):
pass
with self._test_trace():
test_decorator()
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 3)
expected_name = __name__ + '.test_decorator'
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'process_argv')
self.assertEquals(current_entry['name'], 'process_argv')
self.assertTrue(current_entry['args']['argv'])
self.assertEquals(current_entry['ph'], 'M')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], expected_name)
self.assertEquals(current_entry['args']['this'], '\'that\'')
self.assertEquals(current_entry['ph'], 'B')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], expected_name)
self.assertEquals(current_entry['args'], {})
self.assertEquals(current_entry['ph'], 'E')
def testClockSyncWithTs(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
trace_event.clock_sync('id', issue_ts=trace_time.Now())
trace_event.trace_flush()
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 2)
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'process_argv')
self.assertEquals(current_entry['name'], 'process_argv')
self.assertTrue(current_entry['args']['argv'])
self.assertEquals(current_entry['ph'], 'M')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], 'clock_sync')
self.assertTrue(current_entry['args']['issue_ts'])
self.assertEquals(current_entry['ph'], 'c')
def testClockSyncWithoutTs(self):
with self._test_trace():
with open(self._log_path, 'r') as f:
trace_event.clock_sync('id')
trace_event.trace_flush()
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 2)
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'process_argv')
self.assertEquals(current_entry['name'], 'process_argv')
self.assertTrue(current_entry['args']['argv'])
self.assertEquals(current_entry['ph'], 'M')
current_entry = log_output.pop(0)
self.assertEquals(current_entry['category'], 'python')
self.assertEquals(current_entry['name'], 'clock_sync')
self.assertFalse(current_entry['args'].get('issue_ts'))
self.assertEquals(current_entry['ph'], 'c')
def testTime(self):
actual_diff = []
def func1():
trace_begin("func1")
start = time.time()
time.sleep(0.25)
end = time.time()
actual_diff.append(end-start) # Pass via array because of Python scoping
trace_end("func1")
with self._test_trace():
start_ts = time.time()
trace_event.trace_begin('test')
end_ts = time.time()
trace_event.trace_end('test')
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 3)
meta_data = log_output[0]
open_data = log_output[1]
close_data = log_output[2]
self.assertEquals(meta_data['category'], 'process_argv')
self.assertEquals(meta_data['name'], 'process_argv')
self.assertTrue(meta_data['args']['argv'])
self.assertEquals(meta_data['ph'], 'M')
self.assertEquals(open_data['category'], 'python')
self.assertEquals(open_data['name'], 'test')
self.assertEquals(open_data['ph'], 'B')
self.assertEquals(close_data['category'], 'python')
self.assertEquals(close_data['name'], 'test')
self.assertEquals(close_data['ph'], 'E')
event_time_diff = close_data['ts'] - open_data['ts']
recorded_time_diff = (end_ts - start_ts) * 1000000
self.assertLess(math.fabs(event_time_diff - recorded_time_diff), 1000)
def testNestedCalls(self):
with self._test_trace():
trace_event.trace_begin('one')
trace_event.trace_begin('two')
trace_event.trace_end('two')
trace_event.trace_end('one')
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 5)
meta_data = log_output[0]
one_open = log_output[1]
two_open = log_output[2]
two_close = log_output[3]
one_close = log_output[4]
self.assertEquals(meta_data['category'], 'process_argv')
self.assertEquals(meta_data['name'], 'process_argv')
self.assertTrue(meta_data['args']['argv'])
self.assertEquals(meta_data['ph'], 'M')
self.assertEquals(one_open['category'], 'python')
self.assertEquals(one_open['name'], 'one')
self.assertEquals(one_open['ph'], 'B')
self.assertEquals(one_close['category'], 'python')
self.assertEquals(one_close['name'], 'one')
self.assertEquals(one_close['ph'], 'E')
self.assertEquals(two_open['category'], 'python')
self.assertEquals(two_open['name'], 'two')
self.assertEquals(two_open['ph'], 'B')
self.assertEquals(two_close['category'], 'python')
self.assertEquals(two_close['name'], 'two')
self.assertEquals(two_close['ph'], 'E')
self.assertLessEqual(one_open['ts'], two_open['ts'])
self.assertGreaterEqual(one_close['ts'], two_close['ts'])
def testInterleavedCalls(self):
with self._test_trace():
trace_event.trace_begin('one')
trace_event.trace_begin('two')
trace_event.trace_end('one')
trace_event.trace_end('two')
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 5)
meta_data = log_output[0]
one_open = log_output[1]
two_open = log_output[2]
two_close = log_output[4]
one_close = log_output[3]
self.assertEquals(meta_data['category'], 'process_argv')
self.assertEquals(meta_data['name'], 'process_argv')
self.assertTrue(meta_data['args']['argv'])
self.assertEquals(meta_data['ph'], 'M')
self.assertEquals(one_open['category'], 'python')
self.assertEquals(one_open['name'], 'one')
self.assertEquals(one_open['ph'], 'B')
self.assertEquals(one_close['category'], 'python')
self.assertEquals(one_close['name'], 'one')
self.assertEquals(one_close['ph'], 'E')
self.assertEquals(two_open['category'], 'python')
self.assertEquals(two_open['name'], 'two')
self.assertEquals(two_open['ph'], 'B')
self.assertEquals(two_close['category'], 'python')
self.assertEquals(two_close['name'], 'two')
self.assertEquals(two_close['ph'], 'E')
self.assertLessEqual(one_open['ts'], two_open['ts'])
self.assertLessEqual(one_close['ts'], two_close['ts'])
# TODO(khokhlov): Fix this test on Windows. See crbug.com/945819 for details.
def disabled_testMultiprocess(self):
def child_function():
with trace_event.trace('child_event'):
pass
with self._test_trace():
trace_event.trace_begin('parent_event')
trace_event.trace_flush()
p = multiprocessing.Process(target=child_function)
p.start()
self.assertTrue(hasattr(p, "_shimmed_by_trace_event"))
p.join()
trace_event.trace_end('parent_event')
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 5)
meta_data = log_output[0]
parent_open = log_output[1]
child_open = log_output[2]
child_close = log_output[3]
parent_close = log_output[4]
self.assertEquals(meta_data['category'], 'process_argv')
self.assertEquals(meta_data['name'], 'process_argv')
self.assertTrue(meta_data['args']['argv'])
self.assertEquals(meta_data['ph'], 'M')
self.assertEquals(parent_open['category'], 'python')
self.assertEquals(parent_open['name'], 'parent_event')
self.assertEquals(parent_open['ph'], 'B')
self.assertEquals(child_open['category'], 'python')
self.assertEquals(child_open['name'], 'child_event')
self.assertEquals(child_open['ph'], 'B')
self.assertEquals(child_close['category'], 'python')
self.assertEquals(child_close['name'], 'child_event')
self.assertEquals(child_close['ph'], 'E')
self.assertEquals(parent_close['category'], 'python')
self.assertEquals(parent_close['name'], 'parent_event')
self.assertEquals(parent_close['ph'], 'E')
@unittest.skipIf(sys.platform == 'win32', 'crbug.com/945819')
def testTracingControlDisabledInChildButNotInParent(self):
def child(resp):
# test tracing is not controllable in the child
resp.put(trace_event.is_tracing_controllable())
with self._test_trace():
q = multiprocessing.Queue()
p = multiprocessing.Process(target=child, args=[q])
p.start()
# test tracing is controllable in the parent
self.assertTrue(trace_event.is_tracing_controllable())
self.assertFalse(q.get())
p.join()
def testMultiprocessExceptionInChild(self):
def bad_child():
trace_event.trace_disable()
with self._test_trace():
p = multiprocessing.Pool(1)
trace_event.trace_begin('parent')
self.assertRaises(Exception, lambda: p.apply(bad_child, ()))
p.close()
p.terminate()
p.join()
trace_event.trace_end('parent')
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 3)
meta_data = log_output[0]
parent_open = log_output[1]
parent_close = log_output[2]
self.assertEquals(parent_open['category'], 'python')
self.assertEquals(parent_open['name'], 'parent')
self.assertEquals(parent_open['ph'], 'B')
self.assertEquals(parent_close['category'], 'python')
self.assertEquals(parent_close['name'], 'parent')
self.assertEquals(parent_close['ph'], 'E')
def testFormatJson(self):
with self._test_trace(format=trace_event.JSON):
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
log_output = json.loads(f.read() + ']')
self.assertEquals(len(log_output), 1)
self.assertEquals(log_output[0]['ph'], 'M')
def testFormatJsonWithMetadata(self):
with self._test_trace(format=trace_event.JSON_WITH_METADATA):
trace_event.trace_disable()
with open(self._log_path, 'r') as f:
log_output = json.load(f)
self.assertEquals(len(log_output), 2)
events = log_output['traceEvents']
self.assertEquals(len(events), 1)
self.assertEquals(events[0]['ph'], 'M')
def testFormatProtobuf(self):
with self._test_trace(format=trace_event.PROTOBUF):
trace_event.trace_flush()
with open(self._log_path, 'r') as f:
self.assertGreater(len(f.read()), 0)
def testAddMetadata(self):
with self._test_trace(format=trace_event.JSON_WITH_METADATA):
trace_event.trace_add_benchmark_metadata(
benchmark_start_time_us=1000,
story_run_time_us=2000,
benchmark_name='benchmark',
benchmark_description='desc',
story_name='story',
story_tags=['tag1', 'tag2'],
story_run_index=0,
)
trace_event.trace_disable()
with open(self._log_path, 'r') as f:
log_output = json.load(f)
self.assertEquals(len(log_output), 2)
telemetry_metadata = log_output['metadata']['telemetry']
self.assertEquals(len(telemetry_metadata), 7)
self.assertEquals(telemetry_metadata['benchmarkStart'], 1)
self.assertEquals(telemetry_metadata['traceStart'], 2)
self.assertEquals(telemetry_metadata['benchmarks'], ['benchmark'])
self.assertEquals(telemetry_metadata['benchmarkDescriptions'], ['desc'])
self.assertEquals(telemetry_metadata['stories'], ['story'])
self.assertEquals(telemetry_metadata['storyTags'], ['tag1', 'tag2'])
self.assertEquals(telemetry_metadata['storysetRepeats'], [0])
def testAddMetadataProtobuf(self):
with self._test_trace(format=trace_event.PROTOBUF):
trace_event.trace_add_benchmark_metadata(
benchmark_start_time_us=1000,
story_run_time_us=2000,
benchmark_name='benchmark',
benchmark_description='desc',
story_name='story',
story_tags=['tag1', 'tag2'],
story_run_index=0,
)
trace_event.trace_disable()
with open(self._log_path, 'r') as f:
self.assertGreater(len(f.read()), 0)
def testAddMetadataInJsonFormatRaises(self):
with self._test_trace(format=trace_event.JSON):
with self.assertRaises(log.TraceException):
trace_event.trace_add_benchmark_metadata(
benchmark_start_time_us=1000,
story_run_time_us=2000,
benchmark_name='benchmark',
benchmark_description='description',
story_name='story',
story_tags=['tag1', 'tag2'],
story_run_index=0,
)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)

View File

@ -0,0 +1,234 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ctypes
import ctypes.util
import logging
import os
import platform
import sys
import time
import threading
GET_TICK_COUNT_LAST_NOW = 0
# If GET_TICK_COUNTER_LAST_NOW is less than the current time, the clock has
# rolled over, and this needs to be accounted for.
GET_TICK_COUNT_WRAPAROUNDS = 0
# The current detected platform
_CLOCK = None
_NOW_FUNCTION = None
# Mapping of supported platforms and what is returned by sys.platform.
_PLATFORMS = {
'mac': 'darwin',
'linux': 'linux',
'windows': 'win32',
'cygwin': 'cygwin',
'freebsd': 'freebsd',
'sunos': 'sunos5',
'bsd': 'bsd'
}
# Mapping of what to pass get_clocktime based on platform.
_CLOCK_MONOTONIC = {
'linux': 1,
'freebsd': 4,
'bsd': 3,
'sunos5': 4
}
_LINUX_CLOCK = 'LINUX_CLOCK_MONOTONIC'
_MAC_CLOCK = 'MAC_MACH_ABSOLUTE_TIME'
_WIN_HIRES = 'WIN_QPC'
_WIN_LORES = 'WIN_ROLLOVER_PROTECTED_TIME_GET_TIME'
def InitializeMacNowFunction(plat):
"""Sets a monotonic clock for the Mac platform.
Args:
plat: Platform that is being run on. Unused in GetMacNowFunction. Passed
for consistency between initilaizers.
"""
del plat # Unused
global _CLOCK # pylint: disable=global-statement
global _NOW_FUNCTION # pylint: disable=global-statement
_CLOCK = _MAC_CLOCK
libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True)
class MachTimebaseInfoData(ctypes.Structure):
"""System timebase info. Defined in <mach/mach_time.h>."""
_fields_ = (('numer', ctypes.c_uint32),
('denom', ctypes.c_uint32))
mach_absolute_time = libc.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
timebase = MachTimebaseInfoData()
libc.mach_timebase_info(ctypes.byref(timebase))
ticks_per_second = timebase.numer / timebase.denom * 1.0e9
def MacNowFunctionImpl():
return mach_absolute_time() / ticks_per_second
_NOW_FUNCTION = MacNowFunctionImpl
def GetClockGetTimeClockNumber(plat):
for key in _CLOCK_MONOTONIC:
if plat.startswith(key):
return _CLOCK_MONOTONIC[key]
raise LookupError('Platform not in clock dicitonary')
def InitializeLinuxNowFunction(plat):
"""Sets a monotonic clock for linux platforms.
Args:
plat: Platform that is being run on.
"""
global _CLOCK # pylint: disable=global-statement
global _NOW_FUNCTION # pylint: disable=global-statement
_CLOCK = _LINUX_CLOCK
clock_monotonic = GetClockGetTimeClockNumber(plat)
try:
# Attempt to find clock_gettime in the C library.
clock_gettime = ctypes.CDLL(ctypes.util.find_library('c'),
use_errno=True).clock_gettime
except AttributeError:
# If not able to find int in the C library, look in rt library.
clock_gettime = ctypes.CDLL(ctypes.util.find_library('rt'),
use_errno=True).clock_gettime
class Timespec(ctypes.Structure):
"""Time specification, as described in clock_gettime(3)."""
_fields_ = (('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long))
def LinuxNowFunctionImpl():
ts = Timespec()
if clock_gettime(clock_monotonic, ctypes.pointer(ts)):
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno))
return ts.tv_sec + ts.tv_nsec / 1.0e9
_NOW_FUNCTION = LinuxNowFunctionImpl
def IsQPCUsable():
"""Determines if system can query the performance counter.
The performance counter is a high resolution timer on windows systems.
Some chipsets have unreliable performance counters, so this checks that one
of those chipsets is not present.
Returns:
True if QPC is useable, false otherwise.
"""
# Sample output: 'Intel64 Family 6 Model 23 Stepping 6, GenuineIntel'
info = platform.processor()
if 'AuthenticAMD' in info and 'Family 15' in info:
return False
if not hasattr(ctypes, 'windll'):
return False
try: # If anything goes wrong during this, assume QPC isn't available.
frequency = ctypes.c_int64()
ctypes.windll.Kernel32.QueryPerformanceFrequency(
ctypes.byref(frequency))
if float(frequency.value) <= 0:
return False
except Exception: # pylint: disable=broad-except
logging.exception('Error when determining if QPC is usable.')
return False
return True
def InitializeWinNowFunction(plat):
"""Sets a monotonic clock for windows platforms.
Args:
plat: Platform that is being run on.
"""
global _CLOCK # pylint: disable=global-statement
global _NOW_FUNCTION # pylint: disable=global-statement
if IsQPCUsable():
_CLOCK = _WIN_HIRES
qpc_return = ctypes.c_int64()
qpc_frequency = ctypes.c_int64()
ctypes.windll.Kernel32.QueryPerformanceFrequency(
ctypes.byref(qpc_frequency))
qpc_frequency = float(qpc_frequency.value)
qpc = ctypes.windll.Kernel32.QueryPerformanceCounter
def WinNowFunctionImpl():
qpc(ctypes.byref(qpc_return))
return qpc_return.value / qpc_frequency
else:
_CLOCK = _WIN_LORES
kernel32 = (ctypes.cdll.kernel32
if plat.startswith(_PLATFORMS['cygwin'])
else ctypes.windll.kernel32)
get_tick_count_64 = getattr(kernel32, 'GetTickCount64', None)
# Windows Vista or newer
if get_tick_count_64:
get_tick_count_64.restype = ctypes.c_ulonglong
def WinNowFunctionImpl():
return get_tick_count_64() / 1000.0
else: # Pre Vista.
get_tick_count = kernel32.GetTickCount
get_tick_count.restype = ctypes.c_uint32
get_tick_count_lock = threading.Lock()
def WinNowFunctionImpl():
global GET_TICK_COUNT_LAST_NOW # pylint: disable=global-statement
global GET_TICK_COUNT_WRAPAROUNDS # pylint: disable=global-statement
with get_tick_count_lock:
current_sample = get_tick_count()
if current_sample < GET_TICK_COUNT_LAST_NOW:
GET_TICK_COUNT_WRAPAROUNDS += 1
GET_TICK_COUNT_LAST_NOW = current_sample
final_ms = GET_TICK_COUNT_WRAPAROUNDS << 32
final_ms += GET_TICK_COUNT_LAST_NOW
return final_ms / 1000.0
_NOW_FUNCTION = WinNowFunctionImpl
def InitializeNowFunction(plat):
"""Sets a monotonic clock for the current platform.
Args:
plat: Platform that is being run on.
"""
if plat.startswith(_PLATFORMS['mac']):
InitializeMacNowFunction(plat)
elif (plat.startswith(_PLATFORMS['linux'])
or plat.startswith(_PLATFORMS['freebsd'])
or plat.startswith(_PLATFORMS['bsd'])
or plat.startswith(_PLATFORMS['sunos'])):
InitializeLinuxNowFunction(plat)
elif (plat.startswith(_PLATFORMS['windows'])
or plat.startswith(_PLATFORMS['cygwin'])):
InitializeWinNowFunction(plat)
else:
raise RuntimeError('%s is not a supported platform.' % plat)
global _NOW_FUNCTION
global _CLOCK
assert _NOW_FUNCTION, 'Now function not properly set during initialization.'
assert _CLOCK, 'Clock not properly set during initialization.'
def Now():
return _NOW_FUNCTION() * 1e6 # convert from seconds to microseconds
def GetClock():
return _CLOCK
InitializeNowFunction(sys.platform)

View File

@ -0,0 +1,123 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import logging
import platform
import sys
import unittest
from py_trace_event import trace_time
class TimerTest(unittest.TestCase):
# Helper methods.
@contextlib.contextmanager
def ReplacePlatformProcessorCall(self, f):
try:
old_proc = platform.processor
platform.processor = f
yield
finally:
platform.processor = old_proc
@contextlib.contextmanager
def ReplaceQPCCheck(self, f):
try:
old_qpc = trace_time.IsQPCUsable
trace_time.IsQPCUsable = f
yield
finally:
trace_time.IsQPCUsable = old_qpc
# Platform detection tests.
def testInitializeNowFunction_platformNotSupported(self):
with self.assertRaises(RuntimeError):
trace_time.InitializeNowFunction('invalid_platform')
def testInitializeNowFunction_windows(self):
if not (sys.platform.startswith(trace_time._PLATFORMS['windows'])
or sys.platform.startswith(trace_time._PLATFORMS['cygwin'])):
return True
trace_time.InitializeNowFunction(sys.platform)
self.assertTrue(trace_time.GetClock() == trace_time._WIN_HIRES
or trace_time.GetClock() == trace_time._WIN_LORES)
def testInitializeNowFunction_linux(self):
if not sys.platform.startswith(trace_time._PLATFORMS['linux']):
return True
trace_time.InitializeNowFunction(sys.platform)
self.assertEqual(trace_time.GetClock(), trace_time._LINUX_CLOCK)
def testInitializeNowFunction_mac(self):
if not sys.platform.startswith(trace_time._PLATFORMS['mac']):
return True
trace_time.InitializeNowFunction(sys.platform)
self.assertEqual(trace_time.GetClock(), trace_time._MAC_CLOCK)
# Windows Tests
def testIsQPCUsable_buggyAthlonProcReturnsFalse(self):
if not (sys.platform.startswith(trace_time._PLATFORMS['windows'])
or sys.platform.startswith(trace_time._PLATFORMS['cygwin'])):
return True
def BuggyAthlonProc():
return 'AMD64 Family 15 Model 23 Stepping 6, AuthenticAMD'
with self.ReplacePlatformProcessorCall(BuggyAthlonProc):
self.assertFalse(trace_time.IsQPCUsable())
def testIsQPCUsable_returnsTrueOnWindows(self):
if not (sys.platform.startswith(trace_time._PLATFORMS['windows'])
or sys.platform.startswith(trace_time._PLATFORMS['cygwin'])):
return True
def Proc():
return 'Intel64 Family 15 Model 23 Stepping 6, GenuineIntel'
with self.ReplacePlatformProcessorCall(Proc):
self.assertTrue(trace_time.IsQPCUsable())
def testGetWinNowFunction_QPC(self):
if not (sys.platform.startswith(trace_time._PLATFORMS['windows'])
or sys.platform.startswith(trace_time._PLATFORMS['cygwin'])):
return True
# Test requires QPC to be available on platform.
if not trace_time.IsQPCUsable():
return True
self.assertGreater(trace_time.Now(), 0)
# Works even if QPC would work.
def testGetWinNowFunction_GetTickCount(self):
if not (sys.platform.startswith(trace_time._PLATFORMS['windows'])
or sys.platform.startswith(trace_time._PLATFORMS['cygwin'])):
return True
with self.ReplaceQPCCheck(lambda: False):
self.assertGreater(trace_time.Now(), 0)
# Linux tests.
def testGetClockGetTimeClockNumber_linux(self):
self.assertEquals(trace_time.GetClockGetTimeClockNumber('linux'), 1)
def testGetClockGetTimeClockNumber_freebsd(self):
self.assertEquals(trace_time.GetClockGetTimeClockNumber('freebsd'), 4)
def testGetClockGetTimeClockNumber_bsd(self):
self.assertEquals(trace_time.GetClockGetTimeClockNumber('bsd'), 3)
def testGetClockGetTimeClockNumber_sunos(self):
self.assertEquals(trace_time.GetClockGetTimeClockNumber('sunos5'), 4)
# Smoke Test.
def testMonotonic(self):
time_one = trace_time.Now()
for _ in xrange(1000):
time_two = trace_time.Now()
self.assertLessEqual(time_one, time_two)
time_one = time_two
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)

View File

@ -0,0 +1,12 @@
Name: Protobuf
URL: https://developers.google.com/protocol-buffers/
Version: 3.0.0
License: BSD
Description:
Protocol buffers are Google's language-neutral, platform-neutral,
extensible mechanism for serializing structured data.
Local Modifications:
Removed pretty much everything except functions necessary to write
bools, ints, and strings.

View File

@ -0,0 +1,224 @@
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import six
import wire_format
def _VarintSize(value):
"""Compute the size of a varint value."""
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _SignedVarintSize(value):
"""Compute the size of a signed varint value."""
if value < 0: return 10
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _VarintEncoder():
"""Return an encoder for a basic varint value (does not include tag)."""
def EncodeVarint(write, value):
bits = value & 0x7f
value >>= 7
while value:
write(six.int2byte(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(six.int2byte(bits))
return EncodeVarint
def _SignedVarintEncoder():
"""Return an encoder for a basic signed varint value (does not include
tag)."""
def EncodeSignedVarint(write, value):
if value < 0:
value += (1 << 64)
bits = value & 0x7f
value >>= 7
while value:
write(six.int2byte(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(six.int2byte(bits))
return EncodeSignedVarint
_EncodeVarint = _VarintEncoder()
_EncodeSignedVarint = _SignedVarintEncoder()
def _VarintBytes(value):
"""Encode the given integer as a varint and return the bytes. This is only
called at startup time so it doesn't need to be fast."""
pieces = []
_EncodeVarint(pieces.append, value)
return b"".join(pieces)
def TagBytes(field_number, wire_type):
"""Encode the given tag and return the bytes. Only called at startup."""
return _VarintBytes(wire_format.PackTag(field_number, wire_type))
def _SimpleEncoder(wire_type, encode_value, compute_value_size):
"""Return a constructor for an encoder for fields of a particular type.
Args:
wire_type: The field's wire type, for encoding tags.
encode_value: A function which encodes an individual value, e.g.
_EncodeVarint().
compute_value_size: A function which computes the size of an individual
value, e.g. _VarintSize().
"""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(element)
local_EncodeVarint(write, size)
for element in value:
encode_value(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, value)
return EncodeField
return SpecificEncoder
Int32Encoder = Int64Encoder = EnumEncoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeSignedVarint, _SignedVarintSize)
UInt32Encoder = UInt64Encoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize)
def BoolEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a boolean field."""
false_byte = b'\x00'
true_byte = b'\x01'
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value))
for element in value:
if element:
write(true_byte)
else:
write(false_byte)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
if element:
write(true_byte)
else:
write(false_byte)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeField(write, value):
write(tag_bytes)
if value:
return write(true_byte)
return write(false_byte)
return EncodeField
def StringEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a string field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
encoded = element.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
write(encoded)
return EncodeRepeatedField
else:
def EncodeField(write, value):
encoded = value.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
return write(encoded)
return EncodeField

View File

@ -0,0 +1,52 @@
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag.
WIRETYPE_VARINT = 0
WIRETYPE_FIXED64 = 1
WIRETYPE_LENGTH_DELIMITED = 2
WIRETYPE_START_GROUP = 3
WIRETYPE_END_GROUP = 4
WIRETYPE_FIXED32 = 5
_WIRETYPE_MAX = 5
def PackTag(field_number, wire_type):
"""Returns an unsigned 32-bit integer that encodes the field number and
wire type information in standard protocol message wire format.
Args:
field_number: Expected to be an integer in the range [1, 1 << 29)
wire_type: One of the WIRETYPE_* constants.
"""
if not 0 <= wire_type <= _WIRETYPE_MAX:
raise RuntimeError('Unknown wire type: %d' % wire_type)
return (field_number << TAG_TYPE_BITS) | wire_type

View File

@ -0,0 +1,31 @@
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
def _CommonChecks(input_api, output_api):
results = []
results += input_api.RunTests(input_api.canned_checks.GetPylint(
input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api),
pylintrc='../../pylintrc'))
return results
def _GetPathsToPrepend(input_api):
project_dir = input_api.PresubmitLocalPath()
catapult_dir = input_api.os_path.join(project_dir, '..', '..')
return [
project_dir,
input_api.os_path.join(catapult_dir, 'dependency_manager'),
input_api.os_path.join(catapult_dir, 'devil'),
input_api.os_path.join(catapult_dir, 'third_party', 'mock'),
input_api.os_path.join(catapult_dir, 'third_party', 'pyfakefs'),
]

View File

@ -0,0 +1,38 @@
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
_CATAPULT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..'))
_PY_UTILS_PATH = os.path.abspath(
os.path.join(_CATAPULT_PATH, 'common', 'py_utils'))
def _RunTestsOrDie(top_level_dir):
exit_code = run_with_typ.Run(top_level_dir, path=[_PY_UTILS_PATH])
if exit_code:
sys.exit(exit_code)
def _AddToPathIfNeeded(path):
if path not in sys.path:
sys.path.insert(0, path)
if __name__ == '__main__':
_AddToPathIfNeeded(_CATAPULT_PATH)
from hooks import install
if '--no-install-hooks' in sys.argv:
sys.argv.remove('--no-install-hooks')
else:
install.InstallHooks()
from catapult_build import run_with_typ
_RunTestsOrDie(_PY_UTILS_PATH)
sys.exit(0)

View File

@ -0,0 +1,158 @@
#!/usr/bin/env python
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import functools
import inspect
import os
import sys
import time
import platform
def GetCatapultDir():
return os.path.normpath(
os.path.join(os.path.dirname(__file__), '..', '..', '..'))
def IsRunningOnCrosDevice():
"""Returns True if we're on a ChromeOS device."""
lsb_release = '/etc/lsb-release'
if sys.platform.startswith('linux') and os.path.exists(lsb_release):
with open(lsb_release, 'r') as f:
res = f.read()
if res.count('CHROMEOS_RELEASE_NAME'):
return True
return False
def GetHostOsName():
if IsRunningOnCrosDevice():
return 'chromeos'
elif sys.platform.startswith('linux'):
return 'linux'
elif sys.platform == 'darwin':
return 'mac'
elif sys.platform == 'win32':
return 'win'
def GetHostArchName():
return platform.machine()
def _ExecutableExtensions():
# pathext is, e.g. '.com;.exe;.bat;.cmd'
exts = os.getenv('PATHEXT').split(';') #e.g. ['.com','.exe','.bat','.cmd']
return [x[1:].upper() for x in exts] #e.g. ['COM','EXE','BAT','CMD']
def IsExecutable(path):
if os.path.isfile(path):
if hasattr(os, 'name') and os.name == 'nt':
return path.split('.')[-1].upper() in _ExecutableExtensions()
else:
return os.access(path, os.X_OK)
else:
return False
def _AddDirToPythonPath(*path_parts):
path = os.path.abspath(os.path.join(*path_parts))
if os.path.isdir(path) and path not in sys.path:
# Some callsite that use telemetry assumes that sys.path[0] is the directory
# containing the script, so we add these extra paths to right after it.
sys.path.insert(1, path)
_AddDirToPythonPath(os.path.join(GetCatapultDir(), 'devil'))
_AddDirToPythonPath(os.path.join(GetCatapultDir(), 'dependency_manager'))
_AddDirToPythonPath(os.path.join(GetCatapultDir(), 'third_party', 'mock'))
# mox3 is needed for pyfakefs usage, but not for pylint.
_AddDirToPythonPath(os.path.join(GetCatapultDir(), 'third_party', 'mox3'))
_AddDirToPythonPath(
os.path.join(GetCatapultDir(), 'third_party', 'pyfakefs'))
from devil.utils import timeout_retry # pylint: disable=wrong-import-position
from devil.utils import reraiser_thread # pylint: disable=wrong-import-position
# Decorator that adds timeout functionality to a function.
def Timeout(default_timeout):
return lambda func: TimeoutDeco(func, default_timeout)
# Note: Even though the "timeout" keyword argument is the only
# keyword argument that will need to be given to the decorated function,
# we still have to use the **kwargs syntax, because we have to use
# the *args syntax here before (since the decorator decorates functions
# with different numbers of positional arguments) and Python doesn't allow
# a single named keyword argument after *args.
# (e.g., 'def foo(*args, bar=42):' is a syntax error)
def TimeoutDeco(func, default_timeout):
@functools.wraps(func)
def RunWithTimeout(*args, **kwargs):
if 'timeout' in kwargs:
timeout = kwargs['timeout']
else:
timeout = default_timeout
try:
return timeout_retry.Run(func, timeout, 0, args=args)
except reraiser_thread.TimeoutError:
print('%s timed out.' % func.__name__)
return False
return RunWithTimeout
MIN_POLL_INTERVAL_IN_SECONDS = 0.1
MAX_POLL_INTERVAL_IN_SECONDS = 5
OUTPUT_INTERVAL_IN_SECONDS = 300
def WaitFor(condition, timeout):
"""Waits for up to |timeout| secs for the function |condition| to return True.
Polling frequency is (elapsed_time / 10), with a min of .1s and max of 5s.
Returns:
Result of |condition| function (if present).
"""
def GetConditionString():
if condition.__name__ == '<lambda>':
try:
return inspect.getsource(condition).strip()
except IOError:
pass
return condition.__name__
# Do an initial check to see if its true.
res = condition()
if res:
return res
start_time = time.time()
last_output_time = start_time
elapsed_time = time.time() - start_time
while elapsed_time < timeout:
res = condition()
if res:
return res
now = time.time()
elapsed_time = now - start_time
last_output_elapsed_time = now - last_output_time
if last_output_elapsed_time > OUTPUT_INTERVAL_IN_SECONDS:
last_output_time = time.time()
poll_interval = min(max(elapsed_time / 10., MIN_POLL_INTERVAL_IN_SECONDS),
MAX_POLL_INTERVAL_IN_SECONDS)
time.sleep(poll_interval)
raise TimeoutException('Timed out while waiting %ds for %s.' %
(timeout, GetConditionString()))
class TimeoutException(Exception):
"""The operation failed to complete because of a timeout.
It is possible that waiting for a longer period of time would result in a
successful operation.
"""
pass

View File

@ -0,0 +1,21 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import atexit
import logging
def _WrapFunction(function):
def _WrappedFn(*args, **kwargs):
logging.debug('Try running %s', repr(function))
try:
function(*args, **kwargs)
logging.debug('Did run %s', repr(function))
except Exception: # pylint: disable=broad-except
logging.exception('Exception running %s', repr(function))
return _WrappedFn
def Register(function, *args, **kwargs):
atexit.register(_WrapFunction(function), *args, **kwargs)

View File

@ -0,0 +1,61 @@
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import dependency_manager
class BinaryManager(object):
""" This class is effectively a subclass of dependency_manager, but uses a
different number of arguments for FetchPath and LocalPath.
"""
def __init__(self, config_files):
if not config_files or not isinstance(config_files, list):
raise ValueError(
'Must supply a list of config files to the BinaryManager')
configs = [dependency_manager.BaseConfig(config) for config in config_files]
self._dependency_manager = dependency_manager.DependencyManager(configs)
def FetchPathWithVersion(self, binary_name, os_name, arch, os_version=None):
""" Return a path to the executable for <binary_name>, or None if not found.
Will attempt to download from cloud storage if needed.
"""
return self._WrapDependencyManagerFunction(
self._dependency_manager.FetchPathWithVersion, binary_name, os_name,
arch, os_version)
def FetchPath(self, binary_name, os_name, arch, os_version=None):
""" Return a path to the executable for <binary_name>, or None if not found.
Will attempt to download from cloud storage if needed.
"""
return self._WrapDependencyManagerFunction(
self._dependency_manager.FetchPath, binary_name, os_name, arch,
os_version)
def LocalPath(self, binary_name, os_name, arch, os_version=None):
""" Return a local path to the given binary name, or None if not found.
Will not download from cloud_storage.
"""
return self._WrapDependencyManagerFunction(
self._dependency_manager.LocalPath, binary_name, os_name, arch,
os_version)
def _WrapDependencyManagerFunction(
self, function, binary_name, os_name, arch, os_version):
platform = '%s_%s' % (os_name, arch)
if os_version:
try:
versioned_platform = '%s_%s_%s' % (os_name, os_version, arch)
return function(binary_name, versioned_platform)
except dependency_manager.NoPathFoundError:
logging.warning(
'Cannot find path for %s on platform %s. Falling back to %s.',
binary_name, versioned_platform, platform)
return function(binary_name, platform)

View File

@ -0,0 +1,214 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
from pyfakefs import fake_filesystem_unittest
from dependency_manager import exceptions
from py_utils import binary_manager
class BinaryManagerTest(fake_filesystem_unittest.TestCase):
# TODO(aiolos): disable cloud storage use during this test.
def setUp(self):
self.setUpPyfakefs()
# pylint: disable=bad-continuation
self.expected_dependencies = {
'dep_1': {
'cloud_storage_base_folder': 'dependencies/fake_config',
'cloud_storage_bucket': 'chrome-tel',
'file_info': {
'linux_x86_64': {
'cloud_storage_hash': '661ce936b3276f7ec3d687ab62be05b96d796f21',
'download_path': 'bin/linux/x86_64/dep_1'
},
'mac_x86_64': {
'cloud_storage_hash': 'c7b1bfc6399dc683058e88dac1ef0f877edea74b',
'download_path': 'bin/mac/x86_64/dep_1'
},
'win_AMD64': {
'cloud_storage_hash': 'ac4fee89a51662b9d920bce443c19b9b2929b198',
'download_path': 'bin/win/AMD64/dep_1.exe'
},
'win_x86': {
'cloud_storage_hash': 'e246e183553ea26967d7b323ea269e3357b9c837',
'download_path': 'bin/win/x86/dep_1.exe'
}
}
},
'dep_2': {
'cloud_storage_base_folder': 'dependencies/fake_config',
'cloud_storage_bucket': 'chrome-tel',
'file_info': {
'linux_x86_64': {
'cloud_storage_hash': '13a57efae9a680ac0f160b3567e02e81f4ac493c',
'download_path': 'bin/linux/x86_64/dep_2',
'local_paths': [
'../../example/location/linux/dep_2',
'../../example/location2/linux/dep_2'
]
},
'mac_x86_64': {
'cloud_storage_hash': 'd10c0ddaa8586b20449e951216bee852fa0f8850',
'download_path': 'bin/mac/x86_64/dep_2',
'local_paths': [
'../../example/location/mac/dep_2',
'../../example/location2/mac/dep_2'
]
},
'win_AMD64': {
'cloud_storage_hash': 'fd5b417f78c7f7d9192a98967058709ded1d399d',
'download_path': 'bin/win/AMD64/dep_2.exe',
'local_paths': [
'../../example/location/win64/dep_2',
'../../example/location2/win64/dep_2'
]
},
'win_x86': {
'cloud_storage_hash': 'cf5c8fe920378ce30d057e76591d57f63fd31c1a',
'download_path': 'bin/win/x86/dep_2.exe',
'local_paths': [
'../../example/location/win32/dep_2',
'../../example/location2/win32/dep_2'
]
},
'android_k_x64': {
'cloud_storage_hash': '09177be2fed00b44df0e777932828425440b23b3',
'download_path': 'bin/android/x64/k/dep_2.apk',
'local_paths': [
'../../example/location/android_x64/k/dep_2',
'../../example/location2/android_x64/k/dep_2'
]
},
'android_l_x64': {
'cloud_storage_hash': '09177be2fed00b44df0e777932828425440b23b3',
'download_path': 'bin/android/x64/l/dep_2.apk',
'local_paths': [
'../../example/location/android_x64/l/dep_2',
'../../example/location2/android_x64/l/dep_2'
]
},
'android_k_x86': {
'cloud_storage_hash': 'bcf02af039713a48b69b89bd7f0f9c81ed8183a4',
'download_path': 'bin/android/x86/k/dep_2.apk',
'local_paths': [
'../../example/location/android_x86/k/dep_2',
'../../example/location2/android_x86/k/dep_2'
]
},
'android_l_x86': {
'cloud_storage_hash': '12a74cec071017ba11655b5740b8a58e2f52a219',
'download_path': 'bin/android/x86/l/dep_2.apk',
'local_paths': [
'../../example/location/android_x86/l/dep_2',
'../../example/location2/android_x86/l/dep_2'
]
}
}
},
'dep_3': {
'file_info': {
'linux_x86_64': {
'local_paths': [
'../../example/location/linux/dep_3',
'../../example/location2/linux/dep_3'
]
},
'mac_x86_64': {
'local_paths': [
'../../example/location/mac/dep_3',
'../../example/location2/mac/dep_3'
]
},
'win_AMD64': {
'local_paths': [
'../../example/location/win64/dep_3',
'../../example/location2/win64/dep_3'
]
},
'win_x86': {
'local_paths': [
'../../example/location/win32/dep_3',
'../../example/location2/win32/dep_3'
]
}
}
}
}
# pylint: enable=bad-continuation
fake_config = {
'config_type': 'BaseConfig',
'dependencies': self.expected_dependencies
}
self.base_config = os.path.join(os.path.dirname(__file__),
'example_config.json')
self.fs.CreateFile(self.base_config, contents=json.dumps(fake_config))
linux_file = os.path.join(
os.path.dirname(self.base_config),
os.path.join('..', '..', 'example', 'location2', 'linux', 'dep_2'))
android_file = os.path.join(
os.path.dirname(self.base_config),
'..', '..', 'example', 'location', 'android_x86', 'l', 'dep_2')
self.expected_dep2_linux_file = os.path.abspath(linux_file)
self.expected_dep2_android_file = os.path.abspath(android_file)
self.fs.CreateFile(self.expected_dep2_linux_file)
self.fs.CreateFile(self.expected_dep2_android_file)
def tearDown(self):
self.tearDownPyfakefs()
def testInitializationNoConfig(self):
with self.assertRaises(ValueError):
binary_manager.BinaryManager(None)
def testInitializationMissingConfig(self):
with self.assertRaises(ValueError):
binary_manager.BinaryManager(os.path.join('missing', 'path'))
def testInitializationWithConfig(self):
with self.assertRaises(ValueError):
manager = binary_manager.BinaryManager(self.base_config)
manager = binary_manager.BinaryManager([self.base_config])
self.assertItemsEqual(self.expected_dependencies,
manager._dependency_manager._lookup_dict)
def testSuccessfulFetchPathNoOsVersion(self):
manager = binary_manager.BinaryManager([self.base_config])
found_path = manager.FetchPath('dep_2', 'linux', 'x86_64')
self.assertEqual(self.expected_dep2_linux_file, found_path)
def testSuccessfulFetchPathOsVersion(self):
manager = binary_manager.BinaryManager([self.base_config])
found_path = manager.FetchPath('dep_2', 'android', 'x86', 'l')
self.assertEqual(self.expected_dep2_android_file, found_path)
def testSuccessfulFetchPathFallbackToNoOsVersion(self):
manager = binary_manager.BinaryManager([self.base_config])
found_path = manager.FetchPath('dep_2', 'linux', 'x86_64', 'fake_version')
self.assertEqual(self.expected_dep2_linux_file, found_path)
def testFailedFetchPathMissingDep(self):
manager = binary_manager.BinaryManager([self.base_config])
with self.assertRaises(exceptions.NoPathFoundError):
manager.FetchPath('missing_dep', 'linux', 'x86_64')
with self.assertRaises(exceptions.NoPathFoundError):
manager.FetchPath('missing_dep', 'android', 'x86', 'l')
with self.assertRaises(exceptions.NoPathFoundError):
manager.FetchPath('dep_1', 'linux', 'bad_arch')
with self.assertRaises(exceptions.NoPathFoundError):
manager.FetchPath('dep_1', 'bad_os', 'x86')
def testSuccessfulLocalPathNoOsVersion(self):
manager = binary_manager.BinaryManager([self.base_config])
found_path = manager.LocalPath('dep_2', 'linux', 'x86_64')
self.assertEqual(self.expected_dep2_linux_file, found_path)
def testSuccessfulLocalPathOsVersion(self):
manager = binary_manager.BinaryManager([self.base_config])
found_path = manager.LocalPath('dep_2', 'android', 'x86', 'l')
self.assertEqual(self.expected_dep2_android_file, found_path)

View File

@ -0,0 +1,34 @@
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import six
def ToUnderscore(obj):
"""Converts a string, list, or dict from camelCase to lower_with_underscores.
Descends recursively into lists and dicts, converting all dict keys.
Returns a newly allocated object of the same structure as the input.
"""
if isinstance(obj, six.string_types):
return re.sub('(?!^)([A-Z]+)', r'_\1', obj).lower()
elif isinstance(obj, list):
return [ToUnderscore(item) for item in obj]
elif isinstance(obj, dict):
output = {}
for k, v in six.iteritems(obj):
if isinstance(v, list) or isinstance(v, dict):
output[ToUnderscore(k)] = ToUnderscore(v)
else:
output[ToUnderscore(k)] = v
return output
else:
return obj

View File

@ -0,0 +1,50 @@
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from py_utils import camel_case
class CamelCaseTest(unittest.TestCase):
def testString(self):
self.assertEqual(camel_case.ToUnderscore('camelCase'), 'camel_case')
self.assertEqual(camel_case.ToUnderscore('CamelCase'), 'camel_case')
self.assertEqual(camel_case.ToUnderscore('Camel2Case'), 'camel2_case')
self.assertEqual(camel_case.ToUnderscore('Camel2Case2'), 'camel2_case2')
self.assertEqual(camel_case.ToUnderscore('2012Q3'), '2012_q3')
def testList(self):
camel_case_list = ['CamelCase', ['NestedList']]
underscore_list = ['camel_case', ['nested_list']]
self.assertEqual(camel_case.ToUnderscore(camel_case_list), underscore_list)
def testDict(self):
camel_case_dict = {
'gpu': {
'vendorId': 1000,
'deviceId': 2000,
'vendorString': 'aString',
'deviceString': 'bString'},
'secondaryGpus': [
{'vendorId': 3000, 'deviceId': 4000,
'vendorString': 'k', 'deviceString': 'l'}
]
}
underscore_dict = {
'gpu': {
'vendor_id': 1000,
'device_id': 2000,
'vendor_string': 'aString',
'device_string': 'bString'},
'secondary_gpus': [
{'vendor_id': 3000, 'device_id': 4000,
'vendor_string': 'k', 'device_string': 'l'}
]
}
self.assertEqual(camel_case.ToUnderscore(camel_case_dict), underscore_dict)
def testOther(self):
self.assertEqual(camel_case.ToUnderscore(self), self)

View File

@ -0,0 +1,126 @@
{
"config_type": "BaseConfig",
"dependencies": {
"chrome_canary": {
"cloud_storage_base_folder": "binary_dependencies",
"cloud_storage_bucket": "chrome-telemetry",
"file_info": {
"mac_x86_64": {
"cloud_storage_hash": "381a491e14ab523b8db4cdf3c993713678237af8",
"download_path": "bin/reference_builds/chrome-mac64.zip",
"path_within_archive": "chrome-mac/Google Chrome.app/Contents/MacOS/Google Chrome",
"version_in_cs": "77.0.3822.0"
},
"win_AMD64": {
"cloud_storage_hash": "600ee522c410efe1de2f593c0efc32ae113a7d99",
"download_path": "bin\\reference_build\\chrome-win64-clang.zip",
"path_within_archive": "chrome-win64-clang\\chrome.exe",
"version_in_cs": "77.0.3822.0"
},
"win_x86": {
"cloud_storage_hash": "5b79a181bfbd94d8288529b0da1defa3ef097197",
"download_path": "bin\\reference_build\\chrome-win32-clang.zip",
"path_within_archive": "chrome-win32-clang\\chrome.exe",
"version_in_cs": "77.0.3822.0"
}
}
},
"chrome_dev": {
"cloud_storage_base_folder": "binary_dependencies",
"cloud_storage_bucket": "chrome-telemetry",
"file_info": {
"linux_x86_64": {
"cloud_storage_hash": "61d68a6b00f25c964f5162f5251962468c886f3a",
"download_path": "bin/reference_build/chrome-linux64.zip",
"path_within_archive": "chrome-linux64/chrome",
"version_in_cs": "76.0.3809.21"
}
}
},
"chrome_stable": {
"cloud_storage_base_folder": "binary_dependencies",
"cloud_storage_bucket": "chrome-telemetry",
"file_info": {
"android_k_armeabi-v7a": {
"cloud_storage_hash": "28b913c720d56a30c092625c7862f00175a316c7",
"download_path": "bin/reference_build/android_k_armeabi-v7a/ChromeStable.apk",
"version_in_cs": "75.0.3770.67"
},
"android_l_arm64-v8a": {
"cloud_storage_hash": "4b953c33c61f94c2198e8001d0d8142c6504a875",
"download_path": "bin/reference_build/android_l_arm64-v8a/ChromeStable.apk",
"version_in_cs": "75.0.3770.67"
},
"android_l_armeabi-v7a": {
"cloud_storage_hash": "28b913c720d56a30c092625c7862f00175a316c7",
"download_path": "bin/reference_build/android_l_armeabi-v7a/ChromeStable.apk",
"version_in_cs": "75.0.3770.67"
},
"android_n_arm64-v8a": {
"cloud_storage_hash": "84152ba8f7a25cacc79d588ed827ea75f0e4ab94",
"download_path": "bin/reference_build/android_n_arm64-v8a/Monochrome.apk",
"version_in_cs": "75.0.3770.67"
},
"android_n_armeabi-v7a": {
"cloud_storage_hash": "656bb9e3982d0d35decd5347ced2c320a7267f33",
"download_path": "bin/reference_build/android_n_armeabi-v7a/Monochrome.apk",
"version_in_cs": "75.0.3770.67"
},
"linux_x86_64": {
"cloud_storage_hash": "dee8469e8dcd8453efd33f3a00d7ea302a126a4b",
"download_path": "bin/reference_build/chrome-linux64.zip",
"path_within_archive": "chrome-linux64/chrome",
"version_in_cs": "75.0.3770.80"
},
"mac_x86_64": {
"cloud_storage_hash": "16a43a1e794bb99ec1ebcd40569084985b3c6626",
"download_path": "bin/reference_builds/chrome-mac64.zip",
"path_within_archive": "chrome-mac/Google Chrome.app/Contents/MacOS/Google Chrome",
"version_in_cs": "75.0.3770.80"
},
"win_AMD64": {
"cloud_storage_hash": "1ec52bd4164f2d93c53113a093dae9e041eb2d73",
"download_path": "bin\\reference_build\\chrome-win64-clang.zip",
"path_within_archive": "chrome-win64-clang\\chrome.exe",
"version_in_cs": "75.0.3770.80"
},
"win_x86": {
"cloud_storage_hash": "0f9eb991ba618dc61f2063ea252f44be94c2252e",
"download_path": "bin\\reference_build\\chrome-win-clang.zip",
"path_within_archive": "chrome-win-clang\\chrome.exe",
"version_in_cs": "75.0.3770.80"
}
}
},
"chrome_m72": {
"cloud_storage_base_folder": "binary_dependencies",
"cloud_storage_bucket": "chrome-telemetry",
"file_info": {
"linux_x86_64": {
"cloud_storage_hash": "537c19346b20340cc6807242e1eb6d82dfcfa2e8",
"download_path": "bin/reference_build/chrome-linux64.zip",
"path_within_archive": "chrome-linux64/chrome",
"version_in_cs": "72.0.3626.119"
},
"mac_x86_64": {
"cloud_storage_hash": "7f6a931f696f57561703538c6f799781d6e22e7e",
"download_path": "bin/reference_builds/chrome-mac64.zip",
"path_within_archive": "chrome-mac/Google Chrome.app/Contents/MacOS/Google Chrome",
"version_in_cs": "72.0.3626.119"
},
"win_AMD64": {
"cloud_storage_hash": "563d7985c85bfe77e92b8253d0389ff8551018c7",
"download_path": "bin\\reference_build\\chrome-win64-clang.zip",
"path_within_archive": "chrome-win64-clang\\chrome.exe",
"version_in_cs": "72.0.3626.119"
},
"win_x86": {
"cloud_storage_hash": "1802179da16e44b83bd3f0b296f9e5b0b053d59c",
"download_path": "bin\\reference_build\\chrome-win-clang.zip",
"path_within_archive": "chrome-win-clang\\chrome.exe",
"version_in_cs": "72.0.3626.119"
}
}
}
}
}

View File

@ -0,0 +1,26 @@
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import inspect
def IsMethodOverridden(parent_cls, child_cls, method_name):
assert inspect.isclass(parent_cls), '%s should be a class' % parent_cls
assert inspect.isclass(child_cls), '%s should be a class' % child_cls
assert parent_cls.__dict__.get(method_name), '%s has no method %s' % (
parent_cls, method_name)
if child_cls.__dict__.get(method_name):
# It's overridden
return True
if parent_cls in child_cls.__bases__:
# The parent is the base class of the child, we did not find the
# overridden method.
return False
# For all the base classes of this class that are not object, check if
# they override the method.
base_cls = [cls for cls in child_cls.__bases__ if cls and cls != object]
return any(
IsMethodOverridden(parent_cls, base, method_name) for base in base_cls)

View File

@ -0,0 +1,138 @@
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from py_utils import class_util
class ClassUtilTest(unittest.TestCase):
def testClassOverridden(self):
class Parent(object):
def MethodShouldBeOverridden(self):
pass
class Child(Parent):
def MethodShouldBeOverridden(self):
pass
self.assertTrue(class_util.IsMethodOverridden(
Parent, Child, 'MethodShouldBeOverridden'))
def testGrandchildOverridden(self):
class Parent(object):
def MethodShouldBeOverridden(self):
pass
class Child(Parent):
pass
class Grandchild(Child):
def MethodShouldBeOverridden(self):
pass
self.assertTrue(class_util.IsMethodOverridden(
Parent, Grandchild, 'MethodShouldBeOverridden'))
def testClassNotOverridden(self):
class Parent(object):
def MethodShouldBeOverridden(self):
pass
class Child(Parent):
def SomeOtherMethod(self):
pass
self.assertFalse(class_util.IsMethodOverridden(
Parent, Child, 'MethodShouldBeOverridden'))
def testGrandchildNotOverridden(self):
class Parent(object):
def MethodShouldBeOverridden(self):
pass
class Child(Parent):
def MethodShouldBeOverridden(self):
pass
class Grandchild(Child):
def SomeOtherMethod(self):
pass
self.assertTrue(class_util.IsMethodOverridden(
Parent, Grandchild, 'MethodShouldBeOverridden'))
def testClassNotPresentInParent(self):
class Parent(object):
def MethodShouldBeOverridden(self):
pass
class Child(Parent):
def MethodShouldBeOverridden(self):
pass
self.assertRaises(
AssertionError, class_util.IsMethodOverridden,
Parent, Child, 'WrongMethod')
def testInvalidClass(self):
class Foo(object):
def Bar(self):
pass
self.assertRaises(
AssertionError, class_util.IsMethodOverridden, 'invalid', Foo, 'Bar')
self.assertRaises(
AssertionError, class_util.IsMethodOverridden, Foo, 'invalid', 'Bar')
def testMultipleInheritance(self):
class Aaa(object):
def One(self):
pass
class Bbb(object):
def Two(self):
pass
class Ccc(Aaa, Bbb):
pass
class Ddd(object):
def Three(self):
pass
class Eee(Ddd):
def Three(self):
pass
class Fff(Ccc, Eee):
def One(self):
pass
class Ggg(object):
def Four(self):
pass
class Hhh(Fff, Ggg):
def Two(self):
pass
class Iii(Hhh):
pass
class Jjj(Iii):
pass
self.assertFalse(class_util.IsMethodOverridden(Aaa, Ccc, 'One'))
self.assertTrue(class_util.IsMethodOverridden(Aaa, Fff, 'One'))
self.assertTrue(class_util.IsMethodOverridden(Aaa, Hhh, 'One'))
self.assertTrue(class_util.IsMethodOverridden(Aaa, Jjj, 'One'))
self.assertFalse(class_util.IsMethodOverridden(Bbb, Ccc, 'Two'))
self.assertTrue(class_util.IsMethodOverridden(Bbb, Hhh, 'Two'))
self.assertTrue(class_util.IsMethodOverridden(Bbb, Jjj, 'Two'))
self.assertFalse(class_util.IsMethodOverridden(Eee, Fff, 'Three'))

View File

@ -0,0 +1,502 @@
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrappers for gsutil, for basic interaction with Google Cloud Storage."""
import collections
import contextlib
import hashlib
import logging
import os
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import time
import py_utils
from py_utils import cloud_storage_global_lock # pylint: disable=unused-import
from py_utils import lock
# Do a no-op import here so that cloud_storage_global_lock dep is picked up
# by https://cs.chromium.org/chromium/src/build/android/test_runner.pydeps.
# TODO(nedn, jbudorick): figure out a way to get rid of this ugly hack.
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PUBLIC_BUCKET = 'chromium-telemetry'
PARTNER_BUCKET = 'chrome-partner-telemetry'
INTERNAL_BUCKET = 'chrome-telemetry'
TELEMETRY_OUTPUT = 'chrome-telemetry-output'
# Uses ordered dict to make sure that bucket's key-value items are ordered from
# the most open to the most restrictive.
BUCKET_ALIASES = collections.OrderedDict((
('public', PUBLIC_BUCKET),
('partner', PARTNER_BUCKET),
('internal', INTERNAL_BUCKET),
('output', TELEMETRY_OUTPUT),
))
BUCKET_ALIAS_NAMES = list(BUCKET_ALIASES.keys())
_GSUTIL_PATH = os.path.join(py_utils.GetCatapultDir(), 'third_party', 'gsutil',
'gsutil')
# TODO(tbarzic): A workaround for http://crbug.com/386416 and
# http://crbug.com/359293. See |_RunCommand|.
_CROS_GSUTIL_HOME_WAR = '/home/chromeos-test/'
# If Environment variables has DISABLE_CLOUD_STORAGE_IO set to '1', any method
# calls that invoke cloud storage network io will throw exceptions.
DISABLE_CLOUD_STORAGE_IO = 'DISABLE_CLOUD_STORAGE_IO'
# The maximum number of seconds to wait to acquire the pseudo lock for a cloud
# storage file before raising an exception.
LOCK_ACQUISITION_TIMEOUT = 10
class CloudStorageError(Exception):
@staticmethod
def _GetConfigInstructions():
command = _GSUTIL_PATH
if py_utils.IsRunningOnCrosDevice():
command = 'HOME=%s %s' % (_CROS_GSUTIL_HOME_WAR, _GSUTIL_PATH)
return ('To configure your credentials:\n'
' 1. Run "%s config" and follow its instructions.\n'
' 2. If you have a @google.com account, use that account.\n'
' 3. For the project-id, just enter 0.' % command)
class PermissionError(CloudStorageError):
def __init__(self):
super(PermissionError, self).__init__(
'Attempted to access a file from Cloud Storage but you don\'t '
'have permission. ' + self._GetConfigInstructions())
class CredentialsError(CloudStorageError):
def __init__(self):
super(CredentialsError, self).__init__(
'Attempted to access a file from Cloud Storage but you have no '
'configured credentials. ' + self._GetConfigInstructions())
class CloudStorageIODisabled(CloudStorageError):
pass
class NotFoundError(CloudStorageError):
pass
class ServerError(CloudStorageError):
pass
# TODO(tonyg/dtu): Can this be replaced with distutils.spawn.find_executable()?
def _FindExecutableInPath(relative_executable_path, *extra_search_paths):
search_paths = list(extra_search_paths) + os.environ['PATH'].split(os.pathsep)
for search_path in search_paths:
executable_path = os.path.join(search_path, relative_executable_path)
if py_utils.IsExecutable(executable_path):
return executable_path
return None
def _EnsureExecutable(gsutil):
"""chmod +x if gsutil is not executable."""
st = os.stat(gsutil)
if not st.st_mode & stat.S_IEXEC:
os.chmod(gsutil, st.st_mode | stat.S_IEXEC)
def _IsRunningOnSwarming():
return os.environ.get('SWARMING_HEADLESS') is not None
def _RunCommand(args):
# On cros device, as telemetry is running as root, home will be set to /root/,
# which is not writable. gsutil will attempt to create a download tracker dir
# in home dir and fail. To avoid this, override HOME dir to something writable
# when running on cros device.
#
# TODO(tbarzic): Figure out a better way to handle gsutil on cros.
# http://crbug.com/386416, http://crbug.com/359293.
gsutil_env = None
if py_utils.IsRunningOnCrosDevice():
gsutil_env = os.environ.copy()
gsutil_env['HOME'] = _CROS_GSUTIL_HOME_WAR
elif _IsRunningOnSwarming():
gsutil_env = os.environ.copy()
if os.name == 'nt':
# If Windows, prepend python. Python scripts aren't directly executable.
args = [sys.executable, _GSUTIL_PATH] + args
else:
# Don't do it on POSIX, in case someone is using a shell script to redirect.
args = [_GSUTIL_PATH] + args
_EnsureExecutable(_GSUTIL_PATH)
if args[0] not in ('help', 'hash', 'version') and not IsNetworkIOEnabled():
raise CloudStorageIODisabled(
"Environment variable DISABLE_CLOUD_STORAGE_IO is set to 1. "
'Command %s is not allowed to run' % args)
gsutil = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=gsutil_env)
stdout, stderr = gsutil.communicate()
if gsutil.returncode:
raise GetErrorObjectForCloudStorageStderr(stderr)
return stdout
def GetErrorObjectForCloudStorageStderr(stderr):
if (stderr.startswith((
'You are attempting to access protected data with no configured',
'Failure: No handler was ready to authenticate.')) or
re.match('.*401.*does not have .* access to .*', stderr)):
return CredentialsError()
if ('status=403' in stderr or 'status 403' in stderr or
'403 Forbidden' in stderr or
re.match('.*403.*does not have .* access to .*', stderr)):
return PermissionError()
if (stderr.startswith('InvalidUriError') or 'No such object' in stderr or
'No URLs matched' in stderr or 'One or more URLs matched no' in stderr):
return NotFoundError(stderr)
if '500 Internal Server Error' in stderr:
return ServerError(stderr)
return CloudStorageError(stderr)
def IsNetworkIOEnabled():
"""Returns true if cloud storage is enabled."""
disable_cloud_storage_env_val = os.getenv(DISABLE_CLOUD_STORAGE_IO)
if disable_cloud_storage_env_val and disable_cloud_storage_env_val != '1':
logger.error(
'Unsupported value of environment variable '
'DISABLE_CLOUD_STORAGE_IO. Expected None or \'1\' but got %s.',
disable_cloud_storage_env_val)
return disable_cloud_storage_env_val != '1'
def List(bucket):
query = 'gs://%s/' % bucket
stdout = _RunCommand(['ls', query])
return [url[len(query):] for url in stdout.splitlines()]
def Exists(bucket, remote_path):
try:
_RunCommand(['ls', 'gs://%s/%s' % (bucket, remote_path)])
return True
except NotFoundError:
return False
def Move(bucket1, bucket2, remote_path):
url1 = 'gs://%s/%s' % (bucket1, remote_path)
url2 = 'gs://%s/%s' % (bucket2, remote_path)
logger.info('Moving %s to %s', url1, url2)
_RunCommand(['mv', url1, url2])
def Copy(bucket_from, bucket_to, remote_path_from, remote_path_to):
"""Copy a file from one location in CloudStorage to another.
Args:
bucket_from: The cloud storage bucket where the file is currently located.
bucket_to: The cloud storage bucket it is being copied to.
remote_path_from: The file path where the file is located in bucket_from.
remote_path_to: The file path it is being copied to in bucket_to.
It should: cause no changes locally or to the starting file, and will
overwrite any existing files in the destination location.
"""
url1 = 'gs://%s/%s' % (bucket_from, remote_path_from)
url2 = 'gs://%s/%s' % (bucket_to, remote_path_to)
logger.info('Copying %s to %s', url1, url2)
_RunCommand(['cp', url1, url2])
def Delete(bucket, remote_path):
url = 'gs://%s/%s' % (bucket, remote_path)
logger.info('Deleting %s', url)
_RunCommand(['rm', url])
def Get(bucket, remote_path, local_path):
with _FileLock(local_path):
_GetLocked(bucket, remote_path, local_path)
_CLOUD_STORAGE_GLOBAL_LOCK = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'cloud_storage_global_lock.py')
@contextlib.contextmanager
def _FileLock(base_path):
pseudo_lock_path = '%s.pseudo_lock' % base_path
_CreateDirectoryIfNecessary(os.path.dirname(pseudo_lock_path))
# Make sure that we guard the creation, acquisition, release, and removal of
# the pseudo lock all with the same guard (_CLOUD_STORAGE_GLOBAL_LOCK).
# Otherwise, we can get nasty interleavings that result in multiple processes
# thinking they have an exclusive lock, like:
#
# (Process 1) Create and acquire the pseudo lock
# (Process 1) Release the pseudo lock
# (Process 1) Release the file lock
# (Process 2) Open and acquire the existing pseudo lock
# (Process 1) Delete the (existing) pseudo lock
# (Process 3) Create and acquire a new pseudo lock
#
# Using the same guard for creation and removal of the pseudo lock guarantees
# that all processes are referring to the same lock.
pseudo_lock_fd = None
pseudo_lock_fd_return = []
py_utils.WaitFor(lambda: _AttemptPseudoLockAcquisition(pseudo_lock_path,
pseudo_lock_fd_return),
LOCK_ACQUISITION_TIMEOUT)
pseudo_lock_fd = pseudo_lock_fd_return[0]
try:
yield
finally:
py_utils.WaitFor(lambda: _AttemptPseudoLockRelease(pseudo_lock_fd),
LOCK_ACQUISITION_TIMEOUT)
def _AttemptPseudoLockAcquisition(pseudo_lock_path, pseudo_lock_fd_return):
"""Try to acquire the lock and return a boolean indicating whether the attempt
was successful. If the attempt was successful, pseudo_lock_fd_return, which
should be an empty array, will be modified to contain a single entry: the file
descriptor of the (now acquired) lock file.
This whole operation is guarded with the global cloud storage lock, which
prevents race conditions that might otherwise cause multiple processes to
believe they hold the same pseudo lock (see _FileLock for more details).
"""
pseudo_lock_fd = None
try:
with open(_CLOUD_STORAGE_GLOBAL_LOCK) as global_file:
with lock.FileLock(global_file, lock.LOCK_EX | lock.LOCK_NB):
# Attempt to acquire the lock in a non-blocking manner. If we block,
# then we'll cause deadlock because another process will be unable to
# acquire the cloud storage global lock in order to release the pseudo
# lock.
pseudo_lock_fd = open(pseudo_lock_path, 'w')
lock.AcquireFileLock(pseudo_lock_fd, lock.LOCK_EX | lock.LOCK_NB)
pseudo_lock_fd_return.append(pseudo_lock_fd)
return True
except (lock.LockException, IOError):
# We failed to acquire either the global cloud storage lock or the pseudo
# lock.
if pseudo_lock_fd:
pseudo_lock_fd.close()
return False
def _AttemptPseudoLockRelease(pseudo_lock_fd):
"""Try to release the pseudo lock and return a boolean indicating whether
the release was succesful.
This whole operation is guarded with the global cloud storage lock, which
prevents race conditions that might otherwise cause multiple processes to
believe they hold the same pseudo lock (see _FileLock for more details).
"""
pseudo_lock_path = pseudo_lock_fd.name
try:
with open(_CLOUD_STORAGE_GLOBAL_LOCK) as global_file:
with lock.FileLock(global_file, lock.LOCK_EX | lock.LOCK_NB):
lock.ReleaseFileLock(pseudo_lock_fd)
pseudo_lock_fd.close()
try:
os.remove(pseudo_lock_path)
except OSError:
# We don't care if the pseudo lock gets removed elsewhere before
# we have a chance to do so.
pass
return True
except (lock.LockException, IOError):
# We failed to acquire the global cloud storage lock and are thus unable to
# release the pseudo lock.
return False
def _CreateDirectoryIfNecessary(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def _GetLocked(bucket, remote_path, local_path):
url = 'gs://%s/%s' % (bucket, remote_path)
logger.info('Downloading %s to %s', url, local_path)
_CreateDirectoryIfNecessary(os.path.dirname(local_path))
with tempfile.NamedTemporaryFile(
dir=os.path.dirname(local_path),
delete=False) as partial_download_path:
try:
# Windows won't download to an open file.
partial_download_path.close()
try:
_RunCommand(['cp', url, partial_download_path.name])
except ServerError:
logger.info('Cloud Storage server error, retrying download')
_RunCommand(['cp', url, partial_download_path.name])
shutil.move(partial_download_path.name, local_path)
finally:
if os.path.exists(partial_download_path.name):
os.remove(partial_download_path.name)
def Insert(bucket, remote_path, local_path, publicly_readable=False):
""" Upload file in |local_path| to cloud storage.
Args:
bucket: the google cloud storage bucket name.
remote_path: the remote file path in |bucket|.
local_path: path of the local file to be uploaded.
publicly_readable: whether the uploaded file has publicly readable
permission.
Returns:
The url where the file is uploaded to.
"""
url = 'gs://%s/%s' % (bucket, remote_path)
command_and_args = ['cp']
extra_info = ''
if publicly_readable:
command_and_args += ['-a', 'public-read']
extra_info = ' (publicly readable)'
command_and_args += [local_path, url]
logger.info('Uploading %s to %s%s', local_path, url, extra_info)
_RunCommand(command_and_args)
return 'https://console.developers.google.com/m/cloudstorage/b/%s/o/%s' % (
bucket, remote_path)
def GetIfHashChanged(cs_path, download_path, bucket, file_hash):
"""Downloads |download_path| to |file_path| if |file_path| doesn't exist or
it's hash doesn't match |file_hash|.
Returns:
True if the binary was changed.
Raises:
CredentialsError if the user has no configured credentials.
PermissionError if the user does not have permission to access the bucket.
NotFoundError if the file is not in the given bucket in cloud_storage.
"""
with _FileLock(download_path):
if (os.path.exists(download_path) and
CalculateHash(download_path) == file_hash):
return False
_GetLocked(bucket, cs_path, download_path)
return True
def GetIfChanged(file_path, bucket):
"""Gets the file at file_path if it has a hash file that doesn't match or
if there is no local copy of file_path, but there is a hash file for it.
Returns:
True if the binary was changed.
Raises:
CredentialsError if the user has no configured credentials.
PermissionError if the user does not have permission to access the bucket.
NotFoundError if the file is not in the given bucket in cloud_storage.
"""
with _FileLock(file_path):
hash_path = file_path + '.sha1'
fetch_ts_path = file_path + '.fetchts'
if not os.path.exists(hash_path):
logger.warning('Hash file not found: %s', hash_path)
return False
expected_hash = ReadHash(hash_path)
# To save the time required computing binary hash (which is an expensive
# operation, see crbug.com/793609#c2 for details), any time we fetch a new
# binary, we save not only that binary but the time of the fetch in
# |fetch_ts_path|. Anytime the file needs updated (its
# hash in |hash_path| change), we can just need to compare the timestamp of
# |hash_path| with the timestamp in |fetch_ts_path| to figure out
# if the update operation has been done.
#
# Notes: for this to work, we make the assumption that only
# cloud_storage.GetIfChanged modifies the local |file_path| binary.
if os.path.exists(fetch_ts_path) and os.path.exists(file_path):
with open(fetch_ts_path) as f:
data = f.read().strip()
last_binary_fetch_ts = float(data)
if last_binary_fetch_ts > os.path.getmtime(hash_path):
return False
# Whether the binary stored in local already has hash matched
# expected_hash or we need to fetch new binary from cloud, update the
# timestamp in |fetch_ts_path| with current time anyway since it is
# outdated compared with sha1's last modified time.
with open(fetch_ts_path, 'w') as f:
f.write(str(time.time()))
if os.path.exists(file_path) and CalculateHash(file_path) == expected_hash:
return False
_GetLocked(bucket, expected_hash, file_path)
if CalculateHash(file_path) != expected_hash:
os.remove(fetch_ts_path)
raise RuntimeError(
'Binary stored in cloud storage does not have hash matching .sha1 '
'file. Please make sure that the binary file is uploaded using '
'depot_tools/upload_to_google_storage.py script or through automatic '
'framework.')
return True
def GetFilesInDirectoryIfChanged(directory, bucket):
""" Scan the directory for .sha1 files, and download them from the given
bucket in cloud storage if the local and remote hash don't match or
there is no local copy.
"""
if not os.path.isdir(directory):
raise ValueError(
'%s does not exist. Must provide a valid directory path.' % directory)
# Don't allow the root directory to be a serving_dir.
if directory == os.path.abspath(os.sep):
raise ValueError('Trying to serve root directory from HTTP server.')
for dirpath, _, filenames in os.walk(directory):
for filename in filenames:
path_name, extension = os.path.splitext(
os.path.join(dirpath, filename))
if extension != '.sha1':
continue
GetIfChanged(path_name, bucket)
def CalculateHash(file_path):
"""Calculates and returns the hash of the file at file_path."""
sha1 = hashlib.sha1()
with open(file_path, 'rb') as f:
while True:
# Read in 1mb chunks, so it doesn't all have to be loaded into memory.
chunk = f.read(1024 * 1024)
if not chunk:
break
sha1.update(chunk)
return sha1.hexdigest()
def ReadHash(hash_path):
with open(hash_path, 'rb') as f:
return f.read(1024).rstrip()

View File

@ -0,0 +1,5 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is used by cloud_storage._FileLock implementation, don't delete it!

View File

@ -0,0 +1,387 @@
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
import sys
import tempfile
import unittest
import mock
from pyfakefs import fake_filesystem_unittest
import py_utils
from py_utils import cloud_storage
from py_utils import lock
_CLOUD_STORAGE_GLOBAL_LOCK_PATH = os.path.join(
os.path.dirname(__file__), 'cloud_storage_global_lock.py')
def _FakeReadHash(_):
return 'hashthis!'
def _FakeCalulateHashMatchesRead(_):
return 'hashthis!'
def _FakeCalulateHashNewHash(_):
return 'omgnewhash'
class BaseFakeFsUnitTest(fake_filesystem_unittest.TestCase):
def setUp(self):
self.original_environ = os.environ.copy()
os.environ['DISABLE_CLOUD_STORAGE_IO'] = ''
self.setUpPyfakefs()
self.fs.CreateFile(
os.path.join(py_utils.GetCatapultDir(),
'third_party', 'gsutil', 'gsutil'))
def CreateFiles(self, file_paths):
for f in file_paths:
self.fs.CreateFile(f)
def tearDown(self):
self.tearDownPyfakefs()
os.environ = self.original_environ
def _FakeRunCommand(self, cmd):
pass
def _FakeGet(self, bucket, remote_path, local_path):
pass
class CloudStorageFakeFsUnitTest(BaseFakeFsUnitTest):
def _AssertRunCommandRaisesError(self, communicate_strs, error):
with mock.patch('py_utils.cloud_storage.subprocess.Popen') as popen:
p_mock = mock.Mock()
popen.return_value = p_mock
p_mock.returncode = 1
for stderr in communicate_strs:
p_mock.communicate.return_value = ('', stderr)
self.assertRaises(error, cloud_storage._RunCommand, [])
def testRunCommandCredentialsError(self):
strs = ['You are attempting to access protected data with no configured',
'Failure: No handler was ready to authenticate.']
self._AssertRunCommandRaisesError(strs, cloud_storage.CredentialsError)
def testRunCommandPermissionError(self):
strs = ['status=403', 'status 403', '403 Forbidden']
self._AssertRunCommandRaisesError(strs, cloud_storage.PermissionError)
def testRunCommandNotFoundError(self):
strs = ['InvalidUriError', 'No such object', 'No URLs matched',
'One or more URLs matched no', 'InvalidUriError']
self._AssertRunCommandRaisesError(strs, cloud_storage.NotFoundError)
def testRunCommandServerError(self):
strs = ['500 Internal Server Error']
self._AssertRunCommandRaisesError(strs, cloud_storage.ServerError)
def testRunCommandGenericError(self):
strs = ['Random string']
self._AssertRunCommandRaisesError(strs, cloud_storage.CloudStorageError)
def testInsertCreatesValidCloudUrl(self):
orig_run_command = cloud_storage._RunCommand
try:
cloud_storage._RunCommand = self._FakeRunCommand
remote_path = 'test-remote-path.html'
local_path = 'test-local-path.html'
cloud_url = cloud_storage.Insert(cloud_storage.PUBLIC_BUCKET,
remote_path, local_path)
self.assertEqual('https://console.developers.google.com/m/cloudstorage'
'/b/chromium-telemetry/o/test-remote-path.html',
cloud_url)
finally:
cloud_storage._RunCommand = orig_run_command
@mock.patch('py_utils.cloud_storage.subprocess')
def testExistsReturnsFalse(self, subprocess_mock):
p_mock = mock.Mock()
subprocess_mock.Popen.return_value = p_mock
p_mock.communicate.return_value = (
'',
'CommandException: One or more URLs matched no objects.\n')
p_mock.returncode_result = 1
self.assertFalse(cloud_storage.Exists('fake bucket',
'fake remote path'))
@unittest.skipIf(sys.platform.startswith('win'),
'https://github.com/catapult-project/catapult/issues/1861')
def testGetFilesInDirectoryIfChanged(self):
self.CreateFiles([
'real_dir_path/dir1/1file1.sha1',
'real_dir_path/dir1/1file2.txt',
'real_dir_path/dir1/1file3.sha1',
'real_dir_path/dir2/2file.txt',
'real_dir_path/dir3/3file1.sha1'])
def IncrementFilesUpdated(*_):
IncrementFilesUpdated.files_updated += 1
IncrementFilesUpdated.files_updated = 0
orig_get_if_changed = cloud_storage.GetIfChanged
cloud_storage.GetIfChanged = IncrementFilesUpdated
try:
self.assertRaises(ValueError, cloud_storage.GetFilesInDirectoryIfChanged,
os.path.abspath(os.sep), cloud_storage.PUBLIC_BUCKET)
self.assertEqual(0, IncrementFilesUpdated.files_updated)
self.assertRaises(ValueError, cloud_storage.GetFilesInDirectoryIfChanged,
'fake_dir_path', cloud_storage.PUBLIC_BUCKET)
self.assertEqual(0, IncrementFilesUpdated.files_updated)
cloud_storage.GetFilesInDirectoryIfChanged('real_dir_path',
cloud_storage.PUBLIC_BUCKET)
self.assertEqual(3, IncrementFilesUpdated.files_updated)
finally:
cloud_storage.GetIfChanged = orig_get_if_changed
def testCopy(self):
orig_run_command = cloud_storage._RunCommand
def AssertCorrectRunCommandArgs(args):
self.assertEqual(expected_args, args)
cloud_storage._RunCommand = AssertCorrectRunCommandArgs
expected_args = ['cp', 'gs://bucket1/remote_path1',
'gs://bucket2/remote_path2']
try:
cloud_storage.Copy('bucket1', 'bucket2', 'remote_path1', 'remote_path2')
finally:
cloud_storage._RunCommand = orig_run_command
@mock.patch('py_utils.cloud_storage.subprocess.Popen')
def testSwarmingUsesExistingEnv(self, mock_popen):
os.environ['SWARMING_HEADLESS'] = '1'
mock_gsutil = mock_popen()
mock_gsutil.communicate = mock.MagicMock(return_value=('a', 'b'))
mock_gsutil.returncode = None
cloud_storage.Copy('bucket1', 'bucket2', 'remote_path1', 'remote_path2')
mock_popen.assert_called_with(
mock.ANY, stderr=-1, env=os.environ, stdout=-1)
@mock.patch('py_utils.cloud_storage._FileLock')
def testDisableCloudStorageIo(self, unused_lock_mock):
os.environ['DISABLE_CLOUD_STORAGE_IO'] = '1'
dir_path = 'real_dir_path'
self.fs.CreateDirectory(dir_path)
file_path = os.path.join(dir_path, 'file1')
file_path_sha = file_path + '.sha1'
def CleanTimeStampFile():
os.remove(file_path + '.fetchts')
self.CreateFiles([file_path, file_path_sha])
with open(file_path_sha, 'w') as f:
f.write('hash1234')
with self.assertRaises(cloud_storage.CloudStorageIODisabled):
cloud_storage.Copy('bucket1', 'bucket2', 'remote_path1', 'remote_path2')
with self.assertRaises(cloud_storage.CloudStorageIODisabled):
cloud_storage.Get('bucket', 'foo', file_path)
with self.assertRaises(cloud_storage.CloudStorageIODisabled):
cloud_storage.GetIfChanged(file_path, 'foo')
with self.assertRaises(cloud_storage.CloudStorageIODisabled):
cloud_storage.GetIfHashChanged('bar', file_path, 'bucket', 'hash1234')
with self.assertRaises(cloud_storage.CloudStorageIODisabled):
cloud_storage.Insert('bucket', 'foo', file_path)
CleanTimeStampFile()
with self.assertRaises(cloud_storage.CloudStorageIODisabled):
cloud_storage.GetFilesInDirectoryIfChanged(dir_path, 'bucket')
class GetIfChangedTests(BaseFakeFsUnitTest):
def setUp(self):
super(GetIfChangedTests, self).setUp()
self._orig_read_hash = cloud_storage.ReadHash
self._orig_calculate_hash = cloud_storage.CalculateHash
def tearDown(self):
super(GetIfChangedTests, self).tearDown()
cloud_storage.CalculateHash = self._orig_calculate_hash
cloud_storage.ReadHash = self._orig_read_hash
@mock.patch('py_utils.cloud_storage._FileLock')
@mock.patch('py_utils.cloud_storage._GetLocked')
def testHashPathDoesNotExists(self, unused_get_locked, unused_lock_mock):
cloud_storage.ReadHash = _FakeReadHash
cloud_storage.CalculateHash = _FakeCalulateHashMatchesRead
file_path = 'test-file-path.wpr'
cloud_storage._GetLocked = self._FakeGet
# hash_path doesn't exist.
self.assertFalse(cloud_storage.GetIfChanged(file_path,
cloud_storage.PUBLIC_BUCKET))
@mock.patch('py_utils.cloud_storage._FileLock')
@mock.patch('py_utils.cloud_storage._GetLocked')
def testHashPathExistsButFilePathDoesNot(
self, unused_get_locked, unused_lock_mock):
cloud_storage.ReadHash = _FakeReadHash
cloud_storage.CalculateHash = _FakeCalulateHashMatchesRead
file_path = 'test-file-path.wpr'
hash_path = file_path + '.sha1'
# hash_path exists, but file_path doesn't.
self.CreateFiles([hash_path])
self.assertTrue(cloud_storage.GetIfChanged(file_path,
cloud_storage.PUBLIC_BUCKET))
@mock.patch('py_utils.cloud_storage._FileLock')
@mock.patch('py_utils.cloud_storage._GetLocked')
def testHashPathAndFileHashExistWithSameHash(
self, unused_get_locked, unused_lock_mock):
cloud_storage.ReadHash = _FakeReadHash
cloud_storage.CalculateHash = _FakeCalulateHashMatchesRead
file_path = 'test-file-path.wpr'
# hash_path and file_path exist, and have same hash.
self.CreateFiles([file_path])
self.assertFalse(cloud_storage.GetIfChanged(file_path,
cloud_storage.PUBLIC_BUCKET))
@mock.patch('py_utils.cloud_storage._FileLock')
@mock.patch('py_utils.cloud_storage._GetLocked')
def testHashPathAndFileHashExistWithDifferentHash(
self, mock_get_locked, unused_get_locked):
cloud_storage.ReadHash = _FakeReadHash
cloud_storage.CalculateHash = _FakeCalulateHashNewHash
file_path = 'test-file-path.wpr'
hash_path = file_path + '.sha1'
def _FakeGetLocked(bucket, expected_hash, file_path):
del bucket, expected_hash, file_path # unused
cloud_storage.CalculateHash = _FakeCalulateHashMatchesRead
mock_get_locked.side_effect = _FakeGetLocked
self.CreateFiles([file_path, hash_path])
# hash_path and file_path exist, and have different hashes.
self.assertTrue(cloud_storage.GetIfChanged(file_path,
cloud_storage.PUBLIC_BUCKET))
@mock.patch('py_utils.cloud_storage._FileLock')
@mock.patch('py_utils.cloud_storage.CalculateHash')
@mock.patch('py_utils.cloud_storage._GetLocked')
def testNoHashComputationNeededUponSecondCall(
self, mock_get_locked, mock_calculate_hash, unused_get_locked):
mock_calculate_hash.side_effect = _FakeCalulateHashNewHash
cloud_storage.ReadHash = _FakeReadHash
file_path = 'test-file-path.wpr'
hash_path = file_path + '.sha1'
def _FakeGetLocked(bucket, expected_hash, file_path):
del bucket, expected_hash, file_path # unused
cloud_storage.CalculateHash = _FakeCalulateHashMatchesRead
mock_get_locked.side_effect = _FakeGetLocked
self.CreateFiles([file_path, hash_path])
# hash_path and file_path exist, and have different hashes. This first call
# will invoke a fetch.
self.assertTrue(cloud_storage.GetIfChanged(file_path,
cloud_storage.PUBLIC_BUCKET))
# The fetch left a .fetchts file on machine.
self.assertTrue(os.path.exists(file_path + '.fetchts'))
# Subsequent invocations of GetIfChanged should not invoke CalculateHash.
mock_calculate_hash.assert_not_called()
self.assertFalse(cloud_storage.GetIfChanged(file_path,
cloud_storage.PUBLIC_BUCKET))
self.assertFalse(cloud_storage.GetIfChanged(file_path,
cloud_storage.PUBLIC_BUCKET))
@mock.patch('py_utils.cloud_storage._FileLock')
@mock.patch('py_utils.cloud_storage.CalculateHash')
@mock.patch('py_utils.cloud_storage._GetLocked')
def testRefetchingFileUponHashFileChange(
self, mock_get_locked, mock_calculate_hash, unused_get_locked):
mock_calculate_hash.side_effect = _FakeCalulateHashNewHash
cloud_storage.ReadHash = _FakeReadHash
file_path = 'test-file-path.wpr'
hash_path = file_path + '.sha1'
def _FakeGetLocked(bucket, expected_hash, file_path):
del bucket, expected_hash, file_path # unused
cloud_storage.CalculateHash = _FakeCalulateHashMatchesRead
mock_get_locked.side_effect = _FakeGetLocked
self.CreateFiles([file_path, hash_path])
# hash_path and file_path exist, and have different hashes. This first call
# will invoke a fetch.
self.assertTrue(cloud_storage.GetIfChanged(file_path,
cloud_storage.PUBLIC_BUCKET))
# The fetch left a .fetchts file on machine.
self.assertTrue(os.path.exists(file_path + '.fetchts'))
with open(file_path + '.fetchts') as f:
fetchts = float(f.read())
# Updating the .sha1 hash_path file with the new hash after .fetchts
# is created.
file_obj = self.fs.GetObject(hash_path)
file_obj.SetMTime(fetchts + 100)
cloud_storage.ReadHash = lambda _: 'hashNeW'
def _FakeGetLockedNewHash(bucket, expected_hash, file_path):
del bucket, expected_hash, file_path # unused
cloud_storage.CalculateHash = lambda _: 'hashNeW'
mock_get_locked.side_effect = _FakeGetLockedNewHash
# hash_path and file_path exist, and have different hashes. This first call
# will invoke a fetch.
self.assertTrue(cloud_storage.GetIfChanged(file_path,
cloud_storage.PUBLIC_BUCKET))
class CloudStorageRealFsUnitTest(unittest.TestCase):
def setUp(self):
self.original_environ = os.environ.copy()
os.environ['DISABLE_CLOUD_STORAGE_IO'] = ''
def tearDown(self):
os.environ = self.original_environ
@mock.patch('py_utils.cloud_storage.LOCK_ACQUISITION_TIMEOUT', .005)
def testGetPseudoLockUnavailableCausesTimeout(self):
with tempfile.NamedTemporaryFile(suffix='.pseudo_lock') as pseudo_lock_fd:
with lock.FileLock(pseudo_lock_fd, lock.LOCK_EX | lock.LOCK_NB):
with self.assertRaises(py_utils.TimeoutException):
file_path = pseudo_lock_fd.name.replace('.pseudo_lock', '')
cloud_storage.GetIfChanged(file_path, cloud_storage.PUBLIC_BUCKET)
@mock.patch('py_utils.cloud_storage.LOCK_ACQUISITION_TIMEOUT', .005)
def testGetGlobalLockUnavailableCausesTimeout(self):
with open(_CLOUD_STORAGE_GLOBAL_LOCK_PATH) as global_lock_fd:
with lock.FileLock(global_lock_fd, lock.LOCK_EX | lock.LOCK_NB):
tmp_dir = tempfile.mkdtemp()
try:
file_path = os.path.join(tmp_dir, 'foo')
with self.assertRaises(py_utils.TimeoutException):
cloud_storage.GetIfChanged(file_path, cloud_storage.PUBLIC_BUCKET)
finally:
shutil.rmtree(tmp_dir)
class CloudStorageErrorHandlingTest(unittest.TestCase):
def runTest(self):
self.assertIsInstance(cloud_storage.GetErrorObjectForCloudStorageStderr(
'ServiceException: 401 Anonymous users does not have '
'storage.objects.get access to object chrome-partner-telemetry'),
cloud_storage.CredentialsError)
self.assertIsInstance(cloud_storage.GetErrorObjectForCloudStorageStderr(
'403 Caller does not have storage.objects.list access to bucket '
'chrome-telemetry'), cloud_storage.PermissionError)

View File

@ -0,0 +1,33 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class _OptionalContextManager(object):
def __init__(self, manager, condition):
self._manager = manager
self._condition = condition
def __enter__(self):
if self._condition:
return self._manager.__enter__()
return None
def __exit__(self, exc_type, exc_val, exc_tb):
if self._condition:
return self._manager.__exit__(exc_type, exc_val, exc_tb)
return None
def Optional(manager, condition):
"""Wraps the provided context manager and runs it if condition is True.
Args:
manager: A context manager to conditionally run.
condition: If true, runs the given context manager.
Returns:
A context manager that conditionally executes the given manager.
"""
return _OptionalContextManager(manager, condition)

View File

@ -0,0 +1,34 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from py_utils import contextlib_ext
class OptionalUnittest(unittest.TestCase):
class SampleContextMgr(object):
def __init__(self):
self.entered = False
self.exited = False
def __enter__(self):
self.entered = True
def __exit__(self, exc_type, exc_val, exc_tb):
self.exited = True
def testConditionTrue(self):
c = self.SampleContextMgr()
with contextlib_ext.Optional(c, True):
self.assertTrue(c.entered)
self.assertTrue(c.exited)
def testConditionFalse(self):
c = self.SampleContextMgr()
with contextlib_ext.Optional(c, False):
self.assertFalse(c.entered)
self.assertFalse(c.exited)

View File

@ -0,0 +1,49 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import platform
import sys
import py_utils
def GetOSAndArchForCurrentDesktopPlatform():
os_name = GetOSNameForCurrentDesktopPlatform()
return os_name, GetArchForCurrentDesktopPlatform(os_name)
def GetOSNameForCurrentDesktopPlatform():
if py_utils.IsRunningOnCrosDevice():
return 'chromeos'
if sys.platform.startswith('linux'):
return 'linux'
if sys.platform == 'darwin':
return 'mac'
if sys.platform == 'win32':
return 'win'
return sys.platform
def GetArchForCurrentDesktopPlatform(os_name):
if os_name == 'chromeos':
# Current tests outside of telemetry don't run on chromeos, and
# platform.machine is not the way telemetry gets the arch name on chromeos.
raise NotImplementedError()
return platform.machine()
def GetChromeApkOsVersion(version_name):
version = version_name[0]
assert version.isupper(), (
'First character of versions name %s was not an uppercase letter.')
if version < 'L':
return 'k'
elif version > 'M':
return 'n'
return 'l'
def ChromeBinariesConfigPath():
return os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'chrome_binaries.json'))

View File

@ -0,0 +1,191 @@
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import importlib
import inspect
import os
import re
import sys
from py_utils import camel_case
def DiscoverModules(start_dir, top_level_dir, pattern='*'):
"""Discover all modules in |start_dir| which match |pattern|.
Args:
start_dir: The directory to recursively search.
top_level_dir: The top level of the package, for importing.
pattern: Unix shell-style pattern for filtering the filenames to import.
Returns:
list of modules.
"""
# start_dir and top_level_dir must be consistent with each other.
start_dir = os.path.realpath(start_dir)
top_level_dir = os.path.realpath(top_level_dir)
modules = []
sub_paths = list(os.walk(start_dir))
# We sort the directories & file paths to ensure a deterministic ordering when
# traversing |top_level_dir|.
sub_paths.sort(key=lambda paths_tuple: paths_tuple[0])
for dir_path, _, filenames in sub_paths:
# Sort the directories to walk recursively by the directory path.
filenames.sort()
for filename in filenames:
# Filter out unwanted filenames.
if filename.startswith('.') or filename.startswith('_'):
continue
if os.path.splitext(filename)[1] != '.py':
continue
if not fnmatch.fnmatch(filename, pattern):
continue
# Find the module.
module_rel_path = os.path.relpath(
os.path.join(dir_path, filename), top_level_dir)
module_name = re.sub(r'[/\\]', '.', os.path.splitext(module_rel_path)[0])
# Import the module.
try:
# Make sure that top_level_dir is the first path in the sys.path in case
# there are naming conflict in module parts.
original_sys_path = sys.path[:]
sys.path.insert(0, top_level_dir)
module = importlib.import_module(module_name)
modules.append(module)
finally:
sys.path = original_sys_path
return modules
def AssertNoKeyConflicts(classes_by_key_1, classes_by_key_2):
for k in classes_by_key_1:
if k in classes_by_key_2:
assert classes_by_key_1[k] is classes_by_key_2[k], (
'Found conflicting classes for the same key: '
'key=%s, class_1=%s, class_2=%s' % (
k, classes_by_key_1[k], classes_by_key_2[k]))
# TODO(dtu): Normalize all discoverable classes to have corresponding module
# and class names, then always index by class name.
def DiscoverClasses(start_dir,
top_level_dir,
base_class,
pattern='*',
index_by_class_name=True,
directly_constructable=False):
"""Discover all classes in |start_dir| which subclass |base_class|.
Base classes that contain subclasses are ignored by default.
Args:
start_dir: The directory to recursively search.
top_level_dir: The top level of the package, for importing.
base_class: The base class to search for.
pattern: Unix shell-style pattern for filtering the filenames to import.
index_by_class_name: If True, use class name converted to
lowercase_with_underscores instead of module name in return dict keys.
directly_constructable: If True, will only return classes that can be
constructed without arguments
Returns:
dict of {module_name: class} or {underscored_class_name: class}
"""
modules = DiscoverModules(start_dir, top_level_dir, pattern)
classes = {}
for module in modules:
new_classes = DiscoverClassesInModule(
module, base_class, index_by_class_name, directly_constructable)
# TODO(nednguyen): we should remove index_by_class_name once
# benchmark_smoke_unittest in chromium/src/tools/perf no longer relied
# naming collisions to reduce the number of smoked benchmark tests.
# crbug.com/548652
if index_by_class_name:
AssertNoKeyConflicts(classes, new_classes)
classes = dict(list(classes.items()) + list(new_classes.items()))
return classes
# TODO(nednguyen): we should remove index_by_class_name once
# benchmark_smoke_unittest in chromium/src/tools/perf no longer relied
# naming collisions to reduce the number of smoked benchmark tests.
# crbug.com/548652
def DiscoverClassesInModule(module,
base_class,
index_by_class_name=False,
directly_constructable=False):
"""Discover all classes in |module| which subclass |base_class|.
Base classes that contain subclasses are ignored by default.
Args:
module: The module to search.
base_class: The base class to search for.
index_by_class_name: If True, use class name converted to
lowercase_with_underscores instead of module name in return dict keys.
Returns:
dict of {module_name: class} or {underscored_class_name: class}
"""
classes = {}
for _, obj in inspect.getmembers(module):
# Ensure object is a class.
if not inspect.isclass(obj):
continue
# Include only subclasses of base_class.
if not issubclass(obj, base_class):
continue
# Exclude the base_class itself.
if obj is base_class:
continue
# Exclude protected or private classes.
if obj.__name__.startswith('_'):
continue
# Include only the module in which the class is defined.
# If a class is imported by another module, exclude those duplicates.
if obj.__module__ != module.__name__:
continue
if index_by_class_name:
key_name = camel_case.ToUnderscore(obj.__name__)
else:
key_name = module.__name__.split('.')[-1]
if not directly_constructable or IsDirectlyConstructable(obj):
if key_name in classes and index_by_class_name:
assert classes[key_name] is obj, (
'Duplicate key_name with different objs detected: '
'key=%s, obj1=%s, obj2=%s' % (key_name, classes[key_name], obj))
else:
classes[key_name] = obj
return classes
def IsDirectlyConstructable(cls):
"""Returns True if instance of |cls| can be construct without arguments."""
assert inspect.isclass(cls)
if not hasattr(cls, '__init__'):
# Case |class A: pass|.
return True
if cls.__init__ is object.__init__:
# Case |class A(object): pass|.
return True
# Case |class (object):| with |__init__| other than |object.__init__|.
args, _, _, defaults = inspect.getargspec(cls.__init__)
if defaults is None:
defaults = ()
# Return true if |self| is only arg without a default.
return len(args) == len(defaults) + 1
_COUNTER = [0]
def _GetUniqueModuleName():
_COUNTER[0] += 1
return "module_" + str(_COUNTER[0])

View File

@ -0,0 +1,151 @@
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
from py_utils import discover
import six
class DiscoverTest(unittest.TestCase):
def setUp(self):
self._base_dir = os.path.join(os.path.dirname(__file__), 'test_data')
self._start_dir = os.path.join(self._base_dir, 'discoverable_classes')
self._base_class = Exception
def testDiscoverClassesWithIndexByModuleName(self):
classes = discover.DiscoverClasses(self._start_dir,
self._base_dir,
self._base_class,
index_by_class_name=False)
actual_classes = dict(
(name, cls.__name__) for name, cls in six.iteritems(classes))
expected_classes = {
'another_discover_dummyclass': 'DummyExceptionWithParameterImpl1',
'discover_dummyclass': 'DummyException',
'parameter_discover_dummyclass': 'DummyExceptionWithParameterImpl2'
}
self.assertEqual(actual_classes, expected_classes)
def testDiscoverDirectlyConstructableClassesWithIndexByClassName(self):
classes = discover.DiscoverClasses(self._start_dir,
self._base_dir,
self._base_class,
directly_constructable=True)
actual_classes = dict(
(name, cls.__name__) for name, cls in six.iteritems(classes))
expected_classes = {
'dummy_exception': 'DummyException',
'dummy_exception_impl1': 'DummyExceptionImpl1',
'dummy_exception_impl2': 'DummyExceptionImpl2',
}
self.assertEqual(actual_classes, expected_classes)
def testDiscoverClassesWithIndexByClassName(self):
classes = discover.DiscoverClasses(self._start_dir, self._base_dir,
self._base_class)
actual_classes = dict(
(name, cls.__name__) for name, cls in six.iteritems(classes))
expected_classes = {
'dummy_exception': 'DummyException',
'dummy_exception_impl1': 'DummyExceptionImpl1',
'dummy_exception_impl2': 'DummyExceptionImpl2',
'dummy_exception_with_parameter_impl1':
'DummyExceptionWithParameterImpl1',
'dummy_exception_with_parameter_impl2':
'DummyExceptionWithParameterImpl2'
}
self.assertEqual(actual_classes, expected_classes)
def testDiscoverClassesWithPatternAndIndexByModule(self):
classes = discover.DiscoverClasses(self._start_dir,
self._base_dir,
self._base_class,
pattern='another*',
index_by_class_name=False)
actual_classes = dict(
(name, cls.__name__) for name, cls in six.iteritems(classes))
expected_classes = {
'another_discover_dummyclass': 'DummyExceptionWithParameterImpl1'
}
self.assertEqual(actual_classes, expected_classes)
def testDiscoverDirectlyConstructableClassesWithPatternAndIndexByClassName(
self):
classes = discover.DiscoverClasses(self._start_dir,
self._base_dir,
self._base_class,
pattern='another*',
directly_constructable=True)
actual_classes = dict(
(name, cls.__name__) for name, cls in six.iteritems(classes))
expected_classes = {
'dummy_exception_impl1': 'DummyExceptionImpl1',
'dummy_exception_impl2': 'DummyExceptionImpl2',
}
self.assertEqual(actual_classes, expected_classes)
def testDiscoverClassesWithPatternAndIndexByClassName(self):
classes = discover.DiscoverClasses(self._start_dir,
self._base_dir,
self._base_class,
pattern='another*')
actual_classes = dict(
(name, cls.__name__) for name, cls in six.iteritems(classes))
expected_classes = {
'dummy_exception_impl1': 'DummyExceptionImpl1',
'dummy_exception_impl2': 'DummyExceptionImpl2',
'dummy_exception_with_parameter_impl1':
'DummyExceptionWithParameterImpl1',
}
self.assertEqual(actual_classes, expected_classes)
class ClassWithoutInitDefOne: # pylint: disable=old-style-class, no-init
pass
class ClassWithoutInitDefTwo(object):
pass
class ClassWhoseInitOnlyHasSelf(object):
def __init__(self):
pass
class ClassWhoseInitWithDefaultArguments(object):
def __init__(self, dog=1, cat=None, cow=None, fud='a'):
pass
class ClassWhoseInitWithDefaultArgumentsAndNonDefaultArguments(object):
def __init__(self, x, dog=1, cat=None, fish=None, fud='a'):
pass
class IsDirectlyConstructableTest(unittest.TestCase):
def testIsDirectlyConstructableReturnsTrue(self):
self.assertTrue(discover.IsDirectlyConstructable(ClassWithoutInitDefOne))
self.assertTrue(discover.IsDirectlyConstructable(ClassWithoutInitDefTwo))
self.assertTrue(discover.IsDirectlyConstructable(ClassWhoseInitOnlyHasSelf))
self.assertTrue(
discover.IsDirectlyConstructable(ClassWhoseInitWithDefaultArguments))
def testIsDirectlyConstructableReturnsFalse(self):
self.assertFalse(
discover.IsDirectlyConstructable(
ClassWhoseInitWithDefaultArgumentsAndNonDefaultArguments))

View File

@ -0,0 +1,84 @@
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
import logging
import sys
def BestEffort(func):
"""Decorator to log and dismiss exceptions if one if already being handled.
Note: This is largely a workaround for the lack of support of exception
chaining in Python 2.7, this decorator will no longer be needed in Python 3.
Typical usage would be in |Close| or |Disconnect| methods, to dismiss but log
any further exceptions raised if the current execution context is already
handling an exception. For example:
class Client(object):
def Connect(self):
# code to connect ...
@exc_util.BestEffort
def Disconnect(self):
# code to disconnect ...
client = Client()
try:
client.Connect()
except:
client.Disconnect()
raise
If an exception is raised by client.Connect(), and then a second exception
is raised by client.Disconnect(), the decorator will log the second exception
and let the original one be re-raised.
Otherwise, in Python 2.7 and without the decorator, the second exception is
the one propagated to the caller; while information about the original one,
usually more important, is completely lost.
Note that if client.Disconnect() is called in a context where an exception
is *not* being handled, then any exceptions raised within the method will
get through and be passed on to callers for them to handle in the usual way.
The decorator can also be used on cleanup functions meant to be called on
a finally block, however you must also include an except-raise clause to
properly signal (in Python 2.7) whether an exception is being handled; e.g.:
@exc_util.BestEffort
def cleanup():
# do cleanup things ...
try:
process(thing)
except:
raise # Needed to let cleanup know if an exception is being handled.
finally:
cleanup()
Failing to include the except-raise block has the same effect as not
including the decorator at all. Namely: exceptions during |cleanup| are
raised and swallow any prior exceptions that occurred during |process|.
"""
@functools.wraps(func)
def Wrapper(*args, **kwargs):
exc_type = sys.exc_info()[0]
if exc_type is None:
# Not currently handling an exception; let any errors raise exceptions
# as usual.
func(*args, **kwargs)
else:
# Otherwise, we are currently handling an exception, dismiss and log
# any further cascading errors. Callers are responsible to handle the
# original exception.
try:
func(*args, **kwargs)
except Exception: # pylint: disable=broad-except
logging.exception(
'While handling a %s, the following exception was also raised:',
exc_type.__name__)
return Wrapper

View File

@ -0,0 +1,183 @@
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import sys
import unittest
from py_utils import exc_util
class FakeConnectionError(Exception):
pass
class FakeDisconnectionError(Exception):
pass
class FakeProcessingError(Exception):
pass
class FakeCleanupError(Exception):
pass
class FaultyClient(object):
def __init__(self, *args):
self.failures = set(args)
self.called = set()
def Connect(self):
self.called.add('Connect')
if FakeConnectionError in self.failures:
raise FakeConnectionError('Oops!')
def Process(self):
self.called.add('Process')
if FakeProcessingError in self.failures:
raise FakeProcessingError('Oops!')
@exc_util.BestEffort
def Disconnect(self):
self.called.add('Disconnect')
if FakeDisconnectionError in self.failures:
raise FakeDisconnectionError('Oops!')
@exc_util.BestEffort
def Cleanup(self):
self.called.add('Cleanup')
if FakeCleanupError in self.failures:
raise FakeCleanupError('Oops!')
class ReraiseTests(unittest.TestCase):
def assertLogMatches(self, pattern):
self.assertRegexpMatches(
sys.stderr.getvalue(), pattern) # pylint: disable=no-member
def assertLogNotMatches(self, pattern):
self.assertNotRegexpMatches(
sys.stderr.getvalue(), pattern) # pylint: disable=no-member
def testTryRaisesExceptRaises(self):
client = FaultyClient(FakeConnectionError, FakeDisconnectionError)
# The connection error reaches the top level, while the disconnection
# error is logged.
with self.assertRaises(FakeConnectionError):
try:
client.Connect()
except:
client.Disconnect()
raise
self.assertLogMatches(re.compile(
r'While handling a FakeConnectionError, .* was also raised:\n'
r'Traceback \(most recent call last\):\n'
r'.*\n'
r'FakeDisconnectionError: Oops!\n', re.DOTALL))
self.assertItemsEqual(client.called, ['Connect', 'Disconnect'])
def testTryRaisesExceptDoesnt(self):
client = FaultyClient(FakeConnectionError)
# The connection error reaches the top level, disconnecting did not raise
# an exception (so nothing is logged).
with self.assertRaises(FakeConnectionError):
try:
client.Connect()
except:
client.Disconnect()
raise
self.assertLogNotMatches('FakeDisconnectionError')
self.assertItemsEqual(client.called, ['Connect', 'Disconnect'])
def testTryPassesNoException(self):
client = FaultyClient(FakeDisconnectionError)
# If there is no connection error, the except clause is not called (even if
# it would have raised an exception).
try:
client.Connect()
except:
client.Disconnect()
raise
self.assertLogNotMatches('FakeConnectionError')
self.assertLogNotMatches('FakeDisconnectionError')
self.assertItemsEqual(client.called, ['Connect'])
def testTryRaisesFinallyRaises(self):
worker = FaultyClient(FakeProcessingError, FakeCleanupError)
# The processing error reaches the top level, the cleanup error is logged.
with self.assertRaises(FakeProcessingError):
try:
worker.Process()
except:
raise # Needed for Cleanup to know if an exception is handled.
finally:
worker.Cleanup()
self.assertLogMatches(re.compile(
r'While handling a FakeProcessingError, .* was also raised:\n'
r'Traceback \(most recent call last\):\n'
r'.*\n'
r'FakeCleanupError: Oops!\n', re.DOTALL))
self.assertItemsEqual(worker.called, ['Process', 'Cleanup'])
def testTryRaisesFinallyDoesnt(self):
worker = FaultyClient(FakeProcessingError)
# The processing error reaches the top level, the cleanup code runs fine.
with self.assertRaises(FakeProcessingError):
try:
worker.Process()
except:
raise # Needed for Cleanup to know if an exception is handled.
finally:
worker.Cleanup()
self.assertLogNotMatches('FakeProcessingError')
self.assertLogNotMatches('FakeCleanupError')
self.assertItemsEqual(worker.called, ['Process', 'Cleanup'])
def testTryPassesFinallyRaises(self):
worker = FaultyClient(FakeCleanupError)
# The processing code runs fine, the cleanup code raises an exception
# which reaches the top level.
with self.assertRaises(FakeCleanupError):
try:
worker.Process()
except:
raise # Needed for Cleanup to know if an exception is handled.
finally:
worker.Cleanup()
self.assertLogNotMatches('FakeProcessingError')
self.assertLogNotMatches('FakeCleanupError')
self.assertItemsEqual(worker.called, ['Process', 'Cleanup'])
def testTryRaisesExceptRaisesFinallyRaises(self):
worker = FaultyClient(
FakeProcessingError, FakeDisconnectionError, FakeCleanupError)
# Chaining try-except-finally works fine. Only the processing error reaches
# the top level; the other two are logged.
with self.assertRaises(FakeProcessingError):
try:
worker.Process()
except:
worker.Disconnect()
raise
finally:
worker.Cleanup()
self.assertLogMatches('FakeDisconnectionError')
self.assertLogMatches('FakeCleanupError')
self.assertItemsEqual(worker.called, ['Process', 'Disconnect', 'Cleanup'])

View File

@ -0,0 +1,128 @@
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import six
class ParseError(Exception):
pass
class Expectation(object):
def __init__(self, reason, test, conditions, results):
"""Constructor for expectations.
Args:
reason: String that indicates the reason for disabling.
test: String indicating which test is being disabled.
conditions: List of tags indicating which conditions to disable for.
Conditions are combined using logical and. Example: ['Mac', 'Debug']
results: List of outcomes for test. Example: ['Skip', 'Pass']
"""
assert isinstance(reason, six.string_types) or reason is None
self._reason = reason
assert isinstance(test, six.string_types)
self._test = test
assert isinstance(conditions, list)
self._conditions = conditions
assert isinstance(results, list)
self._results = results
def __eq__(self, other):
return (self.reason == other.reason and
self.test == other.test and
self.conditions == other.conditions and
self.results == other.results)
@property
def reason(self):
return self._reason
@property
def test(self):
return self._test
@property
def conditions(self):
return self._conditions
@property
def results(self):
return self._results
class TestExpectationParser(object):
"""Parse expectations data in TA/DA format.
This parser covers the 'tagged' test lists format in:
bit.ly/chromium-test-list-format
Takes raw expectations data as a string read from the TA/DA expectation file
in the format:
# This is an example expectation file.
#
# tags: Mac Mac10.10 Mac10.11
# tags: Win Win8
crbug.com/123 [ Win ] benchmark/story [ Skip ]
...
"""
TAG_TOKEN = '# tags:'
_MATCH_STRING = r'^(?:(crbug.com/\d+) )?' # The bug field (optional).
_MATCH_STRING += r'(?:\[ (.+) \] )?' # The label field (optional).
_MATCH_STRING += r'(\S+) ' # The test path field.
_MATCH_STRING += r'\[ ([^\[.]+) \]' # The expectation field.
_MATCH_STRING += r'(\s+#.*)?$' # End comment (optional).
MATCHER = re.compile(_MATCH_STRING)
def __init__(self, raw_data):
self._tags = []
self._expectations = []
self._ParseRawExpectationData(raw_data)
def _ParseRawExpectationData(self, raw_data):
for count, line in list(enumerate(raw_data.splitlines(), start=1)):
# Handle metadata and comments.
if line.startswith(self.TAG_TOKEN):
for word in line[len(self.TAG_TOKEN):].split():
# Expectations must be after all tags are declared.
if self._expectations:
raise ParseError('Tag found after first expectation.')
self._tags.append(word)
elif line.startswith('#') or not line:
continue # Ignore, it is just a comment or empty.
else:
self._expectations.append(
self._ParseExpectationLine(count, line, self._tags))
def _ParseExpectationLine(self, line_number, line, tags):
match = self.MATCHER.match(line)
if not match:
raise ParseError(
'Expectation has invalid syntax on line %d: %s'
% (line_number, line))
# Unused group is optional trailing comment.
reason, raw_conditions, test, results, _ = match.groups()
conditions = [c for c in raw_conditions.split()] if raw_conditions else []
for c in conditions:
if c not in tags:
raise ParseError(
'Condition %s not found in expectations tag data. Line %d'
% (c, line_number))
return Expectation(reason, test, conditions, [r for r in results.split()])
@property
def expectations(self):
return self._expectations
@property
def tags(self):
return self._tags

View File

@ -0,0 +1,170 @@
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from py_utils import expectations_parser
from six.moves import range # pylint: disable=redefined-builtin
class TestExpectationParserTest(unittest.TestCase):
def testInitWithGoodData(self):
good_data = """
# This is a test expectation file.
#
# tags: tag1 tag2 tag3
# tags: tag4 Mac Win Debug
crbug.com/12345 [ Mac ] b1/s1 [ Skip ]
crbug.com/23456 [ Mac Debug ] b1/s2 [ Skip ]
"""
parser = expectations_parser.TestExpectationParser(good_data)
tags = ['tag1', 'tag2', 'tag3', 'tag4', 'Mac', 'Win', 'Debug']
self.assertEqual(parser.tags, tags)
expected_outcome = [
expectations_parser.Expectation(
'crbug.com/12345', 'b1/s1', ['Mac'], ['Skip']),
expectations_parser.Expectation(
'crbug.com/23456', 'b1/s2', ['Mac', 'Debug'], ['Skip'])
]
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcome[i])
def testInitWithBadData(self):
bad_data = """
# This is a test expectation file.
#
# tags: tag1 tag2 tag3
# tags: tag4
crbug.com/12345 [ Mac b1/s1 [ Skip ]
"""
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TestExpectationParser(bad_data)
def testTagAfterExpectationsStart(self):
bad_data = """
# This is a test expectation file.
#
# tags: tag1 tag2 tag3
crbug.com/12345 [ tag1 ] b1/s1 [ Skip ]
# tags: tag4
"""
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TestExpectationParser(bad_data)
def testParseExpectationLineEverythingThere(self):
raw_data = '# tags: Mac\ncrbug.com/23456 [ Mac ] b1/s2 [ Skip ]'
parser = expectations_parser.TestExpectationParser(raw_data)
expected_outcome = [
expectations_parser.Expectation(
'crbug.com/23456', 'b1/s2', ['Mac'], ['Skip'])
]
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcome[i])
def testParseExpectationLineBadTag(self):
raw_data = '# tags: None\ncrbug.com/23456 [ Mac ] b1/s2 [ Skip ]'
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TestExpectationParser(raw_data)
def testParseExpectationLineNoConditions(self):
raw_data = '# tags: All\ncrbug.com/12345 b1/s1 [ Skip ]'
parser = expectations_parser.TestExpectationParser(raw_data)
expected_outcome = [
expectations_parser.Expectation(
'crbug.com/12345', 'b1/s1', [], ['Skip']),
]
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcome[i])
def testParseExpectationLineNoBug(self):
raw_data = '# tags: All\n[ All ] b1/s1 [ Skip ]'
parser = expectations_parser.TestExpectationParser(raw_data)
expected_outcome = [
expectations_parser.Expectation(
None, 'b1/s1', ['All'], ['Skip']),
]
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcome[i])
def testParseExpectationLineNoBugNoConditions(self):
raw_data = '# tags: All\nb1/s1 [ Skip ]'
parser = expectations_parser.TestExpectationParser(raw_data)
expected_outcome = [
expectations_parser.Expectation(
None, 'b1/s1', [], ['Skip']),
]
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcome[i])
def testParseExpectationLineMultipleConditions(self):
raw_data = ('# tags:All None Batman\n'
'crbug.com/123 [ All None Batman ] b1/s1 [ Skip ]')
parser = expectations_parser.TestExpectationParser(raw_data)
expected_outcome = [
expectations_parser.Expectation(
'crbug.com/123', 'b1/s1', ['All', 'None', 'Batman'], ['Skip']),
]
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcome[i])
def testParseExpectationLineBadConditionBracket(self):
raw_data = '# tags: Mac\ncrbug.com/23456 ] Mac ] b1/s2 [ Skip ]'
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TestExpectationParser(raw_data)
def testParseExpectationLineBadResultBracket(self):
raw_data = '# tags: Mac\ncrbug.com/23456 ] Mac ] b1/s2 ] Skip ]'
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TestExpectationParser(raw_data)
def testParseExpectationLineBadConditionBracketSpacing(self):
raw_data = '# tags: Mac\ncrbug.com/2345 [Mac] b1/s1 [ Skip ]'
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TestExpectationParser(raw_data)
def testParseExpectationLineBadResultBracketSpacing(self):
raw_data = '# tags: Mac\ncrbug.com/2345 [ Mac ] b1/s1 [Skip]'
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TestExpectationParser(raw_data)
def testParseExpectationLineNoClosingConditionBracket(self):
raw_data = '# tags: Mac\ncrbug.com/2345 [ Mac b1/s1 [ Skip ]'
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TestExpectationParser(raw_data)
def testParseExpectationLineNoClosingResultBracket(self):
raw_data = '# tags: Mac\ncrbug.com/2345 [ Mac ] b1/s1 [ Skip'
with self.assertRaises(expectations_parser.ParseError):
expectations_parser.TestExpectationParser(raw_data)
def testParseExpectationLineUrlInTestName(self):
raw_data = (
'# tags: Mac\ncrbug.com/123 [ Mac ] b.1/http://google.com [ Skip ]')
expected_outcomes = [
expectations_parser.Expectation(
'crbug.com/123', 'b.1/http://google.com', ['Mac'], ['Skip'])
]
parser = expectations_parser.TestExpectationParser(raw_data)
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcomes[i])
def testParseExpectationLineEndingComment(self):
raw_data = '# tags: Mac\ncrbug.com/23456 [ Mac ] b1/s2 [ Skip ] # abc 123'
parser = expectations_parser.TestExpectationParser(raw_data)
expected_outcome = [
expectations_parser.Expectation(
'crbug.com/23456', 'b1/s2', ['Mac'], ['Skip'])
]
for i in range(len(parser.expectations)):
self.assertEqual(parser.expectations[i], expected_outcome[i])

View File

@ -0,0 +1,23 @@
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import errno
import os
import shutil
def CopyFileWithIntermediateDirectories(source_path, dest_path):
"""Copies a file and creates intermediate directories as needed.
Args:
source_path: Path to the source file.
dest_path: Path to the destination where the source file should be copied.
"""
assert os.path.exists(source_path)
try:
os.makedirs(os.path.dirname(dest_path))
except OSError as e:
if e.errno != errno.EEXIST:
raise
shutil.copy(source_path, dest_path)

View File

@ -0,0 +1,66 @@
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import errno
import os
import shutil
import tempfile
import unittest
from py_utils import file_util
class FileUtilTest(unittest.TestCase):
def setUp(self):
self._tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._tempdir)
def testCopySimple(self):
source_path = os.path.join(self._tempdir, 'source')
with open(source_path, 'w') as f:
f.write('data')
dest_path = os.path.join(self._tempdir, 'dest')
self.assertFalse(os.path.exists(dest_path))
file_util.CopyFileWithIntermediateDirectories(source_path, dest_path)
self.assertTrue(os.path.exists(dest_path))
self.assertEqual('data', open(dest_path, 'r').read())
def testCopyMakeDirectories(self):
source_path = os.path.join(self._tempdir, 'source')
with open(source_path, 'w') as f:
f.write('data')
dest_path = os.path.join(self._tempdir, 'path', 'to', 'dest')
self.assertFalse(os.path.exists(dest_path))
file_util.CopyFileWithIntermediateDirectories(source_path, dest_path)
self.assertTrue(os.path.exists(dest_path))
self.assertEqual('data', open(dest_path, 'r').read())
def testCopyOverwrites(self):
source_path = os.path.join(self._tempdir, 'source')
with open(source_path, 'w') as f:
f.write('source_data')
dest_path = os.path.join(self._tempdir, 'dest')
with open(dest_path, 'w') as f:
f.write('existing_data')
file_util.CopyFileWithIntermediateDirectories(source_path, dest_path)
self.assertEqual('source_data', open(dest_path, 'r').read())
def testRaisesError(self):
source_path = os.path.join(self._tempdir, 'source')
with open(source_path, 'w') as f:
f.write('data')
dest_path = ""
with self.assertRaises(OSError) as cm:
file_util.CopyFileWithIntermediateDirectories(source_path, dest_path)
self.assertEqual(errno.ENOENT, cm.exception.error_code)

View File

@ -0,0 +1,121 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import os
LOCK_EX = None # Exclusive lock
LOCK_SH = None # Shared lock
LOCK_NB = None # Non-blocking (LockException is raised if resource is locked)
class LockException(Exception):
pass
# pylint: disable=import-error
# pylint: disable=wrong-import-position
if os.name == 'nt':
import win32con
import win32file
import pywintypes
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
LOCK_SH = 0 # the default
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
_OVERLAPPED = pywintypes.OVERLAPPED()
elif os.name == 'posix':
import fcntl
LOCK_EX = fcntl.LOCK_EX
LOCK_SH = fcntl.LOCK_SH
LOCK_NB = fcntl.LOCK_NB
# pylint: enable=import-error
# pylint: enable=wrong-import-position
@contextlib.contextmanager
def FileLock(target_file, flags):
""" Lock the target file. Similar to AcquireFileLock but allow user to write:
with FileLock(f, LOCK_EX):
...do stuff on file f without worrying about race condition
Args: see AcquireFileLock's documentation.
"""
AcquireFileLock(target_file, flags)
try:
yield
finally:
ReleaseFileLock(target_file)
def AcquireFileLock(target_file, flags):
""" Lock the target file. Note that if |target_file| is closed, the lock is
automatically released.
Args:
target_file: file handle of the file to acquire lock.
flags: can be any of the type LOCK_EX, LOCK_SH, LOCK_NB, or a bitwise
OR combination of flags.
"""
assert flags in (
LOCK_EX, LOCK_SH, LOCK_NB, LOCK_EX | LOCK_NB, LOCK_SH | LOCK_NB)
if os.name == 'nt':
_LockImplWin(target_file, flags)
elif os.name == 'posix':
_LockImplPosix(target_file, flags)
else:
raise NotImplementedError('%s is not supported' % os.name)
def ReleaseFileLock(target_file):
""" Unlock the target file.
Args:
target_file: file handle of the file to release the lock.
"""
if os.name == 'nt':
_UnlockImplWin(target_file)
elif os.name == 'posix':
_UnlockImplPosix(target_file)
else:
raise NotImplementedError('%s is not supported' % os.name)
# These implementations are based on
# http://code.activestate.com/recipes/65203/
def _LockImplWin(target_file, flags):
hfile = win32file._get_osfhandle(target_file.fileno())
try:
win32file.LockFileEx(hfile, flags, 0, -0x10000, _OVERLAPPED)
except pywintypes.error as exc_value:
if exc_value[0] == 33:
raise LockException('Error trying acquiring lock of %s: %s' %
(target_file.name, exc_value[2]))
else:
raise
def _UnlockImplWin(target_file):
hfile = win32file._get_osfhandle(target_file.fileno())
try:
win32file.UnlockFileEx(hfile, 0, -0x10000, _OVERLAPPED)
except pywintypes.error as exc_value:
if exc_value[0] == 158:
# error: (158, 'UnlockFileEx', 'The segment is already unlocked.')
# To match the 'posix' implementation, silently ignore this error
pass
else:
# Q: Are there exceptions/codes we should be dealing with here?
raise
def _LockImplPosix(target_file, flags):
try:
fcntl.flock(target_file.fileno(), flags)
except IOError as exc_value:
if exc_value[0] == 11 or exc_value[0] == 35:
raise LockException('Error trying acquiring lock of %s: %s' %
(target_file.name, exc_value[1]))
else:
raise
def _UnlockImplPosix(target_file):
fcntl.flock(target_file.fileno(), fcntl.LOCK_UN)

View File

@ -0,0 +1,169 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import tempfile
import time
import unittest
from py_utils import lock
from six.moves import range # pylint: disable=redefined-builtin
def _AppendTextToFile(file_name):
with open(file_name, 'a') as f:
lock.AcquireFileLock(f, lock.LOCK_EX)
# Sleep 100 ms to increase the chance of another process trying to acquire
# the lock of file as the same time.
time.sleep(0.1)
f.write('Start')
for _ in range(10000):
f.write('*')
f.write('End')
def _ReadFileWithSharedLockBlockingThenWrite(read_file, write_file):
with open(read_file, 'r') as f:
lock.AcquireFileLock(f, lock.LOCK_SH)
content = f.read()
with open(write_file, 'a') as f2:
lock.AcquireFileLock(f2, lock.LOCK_EX)
f2.write(content)
def _ReadFileWithExclusiveLockNonBlocking(target_file, status_file):
with open(target_file, 'r') as f:
try:
lock.AcquireFileLock(f, lock.LOCK_EX | lock.LOCK_NB)
with open(status_file, 'w') as f2:
f2.write('LockException was not raised')
except lock.LockException:
with open(status_file, 'w') as f2:
f2.write('LockException raised')
class FileLockTest(unittest.TestCase):
def setUp(self):
tf = tempfile.NamedTemporaryFile(delete=False)
tf.close()
self.temp_file_path = tf.name
def tearDown(self):
os.remove(self.temp_file_path)
def testExclusiveLock(self):
processess = []
for _ in range(10):
p = multiprocessing.Process(
target=_AppendTextToFile, args=(self.temp_file_path,))
p.start()
processess.append(p)
for p in processess:
p.join()
# If the file lock works as expected, there should be 10 atomic writes of
# 'Start***...***End' to the file in some order, which lead to the final
# file content as below.
expected_file_content = ''.join((['Start'] + ['*']*10000 + ['End']) * 10)
with open(self.temp_file_path, 'r') as f:
# Use assertTrue instead of assertEquals since the strings are big, hence
# assertEquals's assertion failure will contain huge strings.
self.assertTrue(expected_file_content == f.read())
def testSharedLock(self):
tf = tempfile.NamedTemporaryFile(delete=False)
tf.close()
temp_write_file = tf.name
try:
with open(self.temp_file_path, 'w') as f:
f.write('0123456789')
with open(self.temp_file_path, 'r') as f:
# First, acquire a shared lock on temp_file_path
lock.AcquireFileLock(f, lock.LOCK_SH)
processess = []
# Create 10 processes that also try to acquire shared lock from
# temp_file_path then append temp_file_path's content to temp_write_file
for _ in range(10):
p = multiprocessing.Process(
target=_ReadFileWithSharedLockBlockingThenWrite,
args=(self.temp_file_path, temp_write_file))
p.start()
processess.append(p)
for p in processess:
p.join()
# temp_write_file should contains 10 copy of temp_file_path's content.
with open(temp_write_file, 'r') as f:
self.assertEquals('0123456789'*10, f.read())
finally:
os.remove(temp_write_file)
def testNonBlockingLockAcquiring(self):
tf = tempfile.NamedTemporaryFile(delete=False)
tf.close()
temp_status_file = tf.name
try:
with open(self.temp_file_path, 'w') as f:
lock.AcquireFileLock(f, lock.LOCK_EX)
p = multiprocessing.Process(
target=_ReadFileWithExclusiveLockNonBlocking,
args=(self.temp_file_path, temp_status_file))
p.start()
p.join()
with open(temp_status_file, 'r') as f:
self.assertEquals('LockException raised', f.read())
finally:
os.remove(temp_status_file)
def testUnlockBeforeClosingFile(self):
tf = tempfile.NamedTemporaryFile(delete=False)
tf.close()
temp_status_file = tf.name
try:
with open(self.temp_file_path, 'r') as f:
lock.AcquireFileLock(f, lock.LOCK_SH)
lock.ReleaseFileLock(f)
p = multiprocessing.Process(
target=_ReadFileWithExclusiveLockNonBlocking,
args=(self.temp_file_path, temp_status_file))
p.start()
p.join()
with open(temp_status_file, 'r') as f:
self.assertEquals('LockException was not raised', f.read())
finally:
os.remove(temp_status_file)
def testContextualLock(self):
tf = tempfile.NamedTemporaryFile(delete=False)
tf.close()
temp_status_file = tf.name
try:
with open(self.temp_file_path, 'r') as f:
with lock.FileLock(f, lock.LOCK_EX):
# Within this block, accessing self.temp_file_path from another
# process should raise exception.
p = multiprocessing.Process(
target=_ReadFileWithExclusiveLockNonBlocking,
args=(self.temp_file_path, temp_status_file))
p.start()
p.join()
with open(temp_status_file, 'r') as f:
self.assertEquals('LockException raised', f.read())
# Accessing self.temp_file_path here should not raise exception.
p = multiprocessing.Process(
target=_ReadFileWithExclusiveLockNonBlocking,
args=(self.temp_file_path, temp_status_file))
p.start()
p.join()
with open(temp_status_file, 'r') as f:
self.assertEquals('LockException was not raised', f.read())
finally:
os.remove(temp_status_file)

View File

@ -0,0 +1,35 @@
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Logging util functions.
It would be named logging, but other modules in this directory use the default
logging module, so that would break them.
"""
import contextlib
import logging
@contextlib.contextmanager
def CaptureLogs(file_stream):
if not file_stream:
# No file stream given, just don't capture logs.
yield
return
fh = logging.StreamHandler(file_stream)
logger = logging.getLogger()
# Try to copy the current log format, if one is set.
if logger.handlers and hasattr(logger.handlers[0], 'formatter'):
fh.formatter = logger.handlers[0].formatter
else:
fh.setFormatter(logging.Formatter(
'(%(levelname)s) %(asctime)s %(message)s'))
logger.addHandler(fh)
try:
yield
finally:
logger = logging.getLogger()
logger.removeHandler(fh)

View File

@ -0,0 +1,27 @@
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import unittest
try:
from six import StringIO
except ImportError:
from io import StringIO
from py_utils import logging_util
class LoggingUtilTest(unittest.TestCase):
def testCapture(self):
s = StringIO()
with logging_util.CaptureLogs(s):
logging.fatal('test')
# Only assert ends with, since the logging message by default has the date
# in it.
self.assertTrue(s.getvalue().endswith('test\n'))
if __name__ == '__main__':
unittest.main()

View File

@ -0,0 +1,95 @@
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import heapq
import logging
import os
import sys
try:
import psutil
except ImportError:
psutil = None
BYTE_UNITS = ['B', 'KiB', 'MiB', 'GiB']
def FormatBytes(value):
def GetValueAndUnit(value):
for unit in BYTE_UNITS[:-1]:
if abs(value) < 1024.0:
return value, unit
value /= 1024.0
return value, BYTE_UNITS[-1]
if value is not None:
return '%.1f %s' % GetValueAndUnit(value)
else:
return 'N/A'
def _GetProcessInfo(p):
pinfo = p.as_dict(attrs=['pid', 'name', 'memory_info'])
pinfo['mem_rss'] = getattr(pinfo['memory_info'], 'rss', 0)
return pinfo
def _LogProcessInfo(pinfo, level):
pinfo['mem_rss_fmt'] = FormatBytes(pinfo['mem_rss'])
logging.log(level, '%(mem_rss_fmt)s (pid=%(pid)s)', pinfo)
def LogHostMemoryUsage(top_n=10, level=logging.INFO):
if not psutil:
logging.warning('psutil module is not found, skipping logging memory info')
return
if psutil.version_info < (2, 0):
logging.warning('psutil %s too old, upgrade to version 2.0 or higher'
' for memory usage information.', psutil.__version__)
return
# TODO(crbug.com/777865): Remove the following pylint disable. Even if we
# check for a recent enough psutil version above, the catapult presubmit
# builder (still running some old psutil) fails pylint checks due to API
# changes in psutil.
# pylint: disable=no-member
mem = psutil.virtual_memory()
logging.log(level, 'Used %s out of %s memory available.',
FormatBytes(mem.used), FormatBytes(mem.total))
logging.log(level, 'Memory usage of top %i processes groups', top_n)
pinfos_by_names = {}
for p in psutil.process_iter():
try:
pinfo = _GetProcessInfo(p)
except psutil.NoSuchProcess:
logging.exception('process %s no longer exists', p)
continue
pname = pinfo['name']
if pname not in pinfos_by_names:
pinfos_by_names[pname] = {'name': pname, 'total_mem_rss': 0, 'pids': []}
pinfos_by_names[pname]['total_mem_rss'] += pinfo['mem_rss']
pinfos_by_names[pname]['pids'].append(str(pinfo['pid']))
sorted_pinfo_groups = heapq.nlargest(
top_n,
list(pinfos_by_names.values()),
key=lambda item: item['total_mem_rss'])
for group in sorted_pinfo_groups:
group['total_mem_rss_fmt'] = FormatBytes(group['total_mem_rss'])
group['pids_fmt'] = ', '.join(group['pids'])
logging.log(
level, '- %(name)s - %(total_mem_rss_fmt)s - pids: %(pids)s', group)
logging.log(level, 'Current process:')
pinfo = _GetProcessInfo(psutil.Process(os.getpid()))
_LogProcessInfo(pinfo, level)
def main():
logging.basicConfig(level=logging.INFO)
LogHostMemoryUsage()
if __name__ == '__main__':
sys.exit(main())

View File

@ -0,0 +1,35 @@
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from distutils import version # pylint: disable=no-name-in-module
def RequireVersion(module, min_version, max_version=None):
"""Ensure that an imported module's version is within a required range.
Version strings are parsed with LooseVersion, so versions like "1.8.0rc1"
(default numpy on macOS Sierra) and "2.4.13.2" (a version of OpenCV 2.x)
are allowed.
Args:
module: An already imported python module.
min_version: The module must have this or a higher version.
max_version: Optional, the module should not have this or a higher version.
Raises:
ImportError if the module's __version__ is not within the allowed range.
"""
module_version = version.LooseVersion(module.__version__)
min_version = version.LooseVersion(str(min_version))
valid_version = min_version <= module_version
if max_version is not None:
max_version = version.LooseVersion(str(max_version))
valid_version = valid_version and (module_version < max_version)
wants_version = 'at or above %s and below %s' % (min_version, max_version)
else:
wants_version = '%s or higher' % min_version
if not valid_version:
raise ImportError('%s has version %s, but version %s is required' % (
module.__name__, module_version, wants_version))

View File

@ -0,0 +1,41 @@
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from py_utils import modules_util
class FakeModule(object):
def __init__(self, name, version):
self.__name__ = name
self.__version__ = version
class ModulesUitlTest(unittest.TestCase):
def testRequireVersion_valid(self):
numpy = FakeModule('numpy', '2.3')
try:
modules_util.RequireVersion(numpy, '1.0')
except ImportError:
self.fail('ImportError raised unexpectedly')
def testRequireVersion_versionTooLow(self):
numpy = FakeModule('numpy', '2.3')
with self.assertRaises(ImportError) as error:
modules_util.RequireVersion(numpy, '2.5')
self.assertEqual(
str(error.exception),
'numpy has version 2.3, but version 2.5 or higher is required')
def testRequireVersion_versionTooHigh(self):
numpy = FakeModule('numpy', '2.3')
with self.assertRaises(ImportError) as error:
modules_util.RequireVersion(numpy, '1.0', '2.0')
self.assertEqual(
str(error.exception), 'numpy has version 2.3, but version'
' at or above 1.0 and below 2.0 is required')
if __name__ == '__main__':
unittest.main()

View File

@ -0,0 +1,56 @@
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
import py_utils
class PathTest(unittest.TestCase):
def testIsExecutable(self):
self.assertFalse(py_utils.IsExecutable('nonexistent_file'))
# We use actual files on disk instead of pyfakefs because the executable is
# set different on win that posix platforms and pyfakefs doesn't support
# win platform well.
self.assertFalse(py_utils.IsExecutable(_GetFileInTestDir('foo.txt')))
self.assertTrue(py_utils.IsExecutable(sys.executable))
def _GetFileInTestDir(file_name):
return os.path.join(os.path.dirname(__file__), 'test_data', file_name)
class WaitForTest(unittest.TestCase):
def testWaitForTrue(self):
def ReturnTrue():
return True
self.assertTrue(py_utils.WaitFor(ReturnTrue, .1))
def testWaitForFalse(self):
def ReturnFalse():
return False
with self.assertRaises(py_utils.TimeoutException):
py_utils.WaitFor(ReturnFalse, .1)
def testWaitForEventuallyTrue(self):
# Use list to pass to inner function in order to allow modifying the
# variable from the outer scope.
c = [0]
def ReturnCounterBasedValue():
c[0] += 1
return c[0] > 2
self.assertTrue(py_utils.WaitFor(ReturnCounterBasedValue, .5))
def testWaitForTrueLambda(self):
self.assertTrue(py_utils.WaitFor(lambda: True, .1))
def testWaitForFalseLambda(self):
with self.assertRaises(py_utils.TimeoutException):
py_utils.WaitFor(lambda: False, .1)

View File

@ -0,0 +1,28 @@
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Style-preserving Python code transforms.
This module provides components for modifying and querying Python code. They can
be used to build custom refactorings and linters.
"""
import functools
import multiprocessing
# pylint: disable=wildcard-import
from py_utils.refactor.annotated_symbol import * # pylint: disable=redefined-builtin
from py_utils.refactor.module import Module
def _TransformFile(transform, file_path):
module = Module(file_path)
result = transform(module)
module.Write()
return result
def Transform(transform, file_paths):
transform = functools.partial(_TransformFile, transform)
return multiprocessing.Pool().map(transform, file_paths)

View File

@ -0,0 +1,71 @@
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=wildcard-import
from py_utils.refactor.annotated_symbol.class_definition import *
from py_utils.refactor.annotated_symbol.function_definition import *
from py_utils.refactor.annotated_symbol.import_statement import *
from py_utils.refactor.annotated_symbol.reference import * # pylint: disable=redefined-builtin
from py_utils.refactor import snippet
__all__ = [
'Annotate',
'Class',
'Function',
'Import',
'Reference',
]
# Specific symbol types with extra methods for manipulating them.
# Python's full grammar is here:
# https://docs.python.org/2/reference/grammar.html
# Annotated Symbols have an Annotate classmethod that takes a symbol type and
# list of children, and returns an instance of that annotated Symbol.
ANNOTATED_SYMBOLS = (
AsName,
Class,
DottedName,
ImportFrom,
ImportName,
Function,
)
# Unfortunately, some logical groupings are not represented by a node in the
# parse tree. To work around this, some annotated Symbols have an Annotate
# classmethod that takes and returns a list of Snippets instead.
ANNOTATED_GROUPINGS = (
Reference,
)
def Annotate(f):
"""Return the syntax tree of the given file."""
return _AnnotateNode(snippet.Snippetize(f))
def _AnnotateNode(node):
if not isinstance(node, snippet.Symbol):
return node
children = [_AnnotateNode(c) for c in node.children]
for symbol_type in ANNOTATED_GROUPINGS:
annotated_grouping = symbol_type.Annotate(children)
if annotated_grouping:
children = annotated_grouping
break
for symbol_type in ANNOTATED_SYMBOLS:
annotated_symbol = symbol_type.Annotate(node.type, children)
if annotated_symbol:
return annotated_symbol
return snippet.Symbol(node.type, children)

Some files were not shown because too many files have changed in this diff Show More