text
stringlengths 4
1.02M
| meta
dict |
---|---|
def allLongestStrings(in_array):
# Given an array of strings, return another containing only
# the longest strings (all strings with longest length equal).
lengths = [(len(str), str) for str in in_array]
max_len = max(lengths)
# Filter only lengths of strings with same length as max.
# These tuples will also contain the original string, in
# the same originally occurring order.
res = filter(lambda x: x[0] == max_len[0], lengths)
# Return just the string part of the filtered tuples.
return list(map(lambda x: x[1], res))
| {
"content_hash": "0a54281006b176221ec8fae9ca5691cf",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 66,
"avg_line_length": 47.166666666666664,
"alnum_prop": 0.6908127208480566,
"repo_name": "Zubieta/CPP",
"id": "5eb1108fab75a71f9204b17a3582bc357cbe4cd1",
"size": "634",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "CodeSignal/Arcade/Intro/Level_03/01_All_Longest_Strings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "290798"
}
],
"symlink_target": ""
} |
"""Command line tool help you debug your event definitions.
Feed it a list of test notifications in json format, and it will show
you what events will be generated.
"""
import json
import sys
from oslo.config import cfg
from stevedore import extension
from ceilometer.event import converter
from ceilometer import service
cfg.CONF.register_cli_opts([
cfg.StrOpt('input-file',
short='i',
help='File to read test notifications from.'
' (Containing a json list of notifications.)'
' defaults to stdin.'),
cfg.StrOpt('output-file',
short='o',
help='File to write results to. Defaults to stdout'),
])
TYPES = {1: 'text',
2: 'int',
3: 'float',
4: 'datetime'}
service.prepare_service()
config_file = converter.get_config_file()
output_file = cfg.CONF.output_file
input_file = cfg.CONF.input_file
if output_file is None:
out = sys.stdout
else:
out = open(output_file, 'w')
if input_file is None:
notifications = json.load(sys.stdin)
else:
with open(input_file, 'r') as f:
notifications = json.load(f)
out.write("Definitions file: %s\n" % config_file)
out.write("Notifications tested: %s\n" % len(notifications))
event_converter = converter.setup_events(
extension.ExtensionManager(
namespace='ceilometer.event.trait_plugin'))
for notification in notifications:
event = event_converter.to_event(notification)
if event is None:
out.write("Dropped notification: %s\n" %
notification['message_id'])
continue
out.write("Event: %s at %s\n" % (event.event_name, event.generated))
for trait in event.traits:
dtype = TYPES[trait.dtype]
out.write(" Trait: name: %s, type: %s, value: %s\n" % (
trait.name, dtype, trait.value))
| {
"content_hash": "06d5f62889b234b65958a6dccfeccd9f",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 72,
"avg_line_length": 27.426470588235293,
"alnum_prop": 0.6375335120643432,
"repo_name": "lexxito/monitoring",
"id": "d9b6d70bfcc170fe6b78a4f351094d48e8b9a6f4",
"size": "2553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/ceilometer-test-event.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6284"
},
{
"name": "HTML",
"bytes": "5892"
},
{
"name": "JavaScript",
"bytes": "63538"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "2077479"
},
{
"name": "Shell",
"bytes": "8171"
}
],
"symlink_target": ""
} |
import demistomock as demisto
from CommonServerPython import *
import nltk
import re
from html.parser import HTMLParser
from html import unescape
html_parser = HTMLParser()
CLEAN_HTML = (demisto.args().get('cleanHtml', 'yes') == 'yes')
REMOVE_LINE_BREAKS = (demisto.args().get('removeLineBreaks', 'yes') == 'yes')
TOKENIZE_TYPE = demisto.args().get('type', 'word')
TEXT_ENCODE = demisto.args().get('zencoding', 'utf-8')
HASH_SEED = demisto.args().get('hashWordWithSeed')
REMOVE_HTML_PATTERNS = [
re.compile(r"(?is)<(script|style).*?>.*?(</\1>)"),
re.compile(r"(?s)<!--(.*?)-->[\n]?"),
re.compile(r"(?s)<.*?>"),
re.compile(r" "),
re.compile(r" +")
]
def clean_html(text):
if not CLEAN_HTML:
return text
cleaned = text
for pattern in REMOVE_HTML_PATTERNS:
cleaned = pattern.sub(" ", cleaned)
return unescape(cleaned).strip()
def tokenize_text(text):
if not text:
return ''
text = text.lower()
if TOKENIZE_TYPE == 'word':
word_tokens = nltk.word_tokenize(text)
elif TOKENIZE_TYPE == 'punkt':
word_tokens = nltk.wordpunct_tokenize(text)
else:
raise Exception("Unsupported tokenize type: %s" % TOKENIZE_TYPE)
if HASH_SEED:
word_tokens = map(str, map(lambda x: hash_djb2(x, int(HASH_SEED)), word_tokens))
return (' '.join(word_tokens)).strip()
def remove_line_breaks(text):
if not REMOVE_LINE_BREAKS:
return text
return text.replace("\r", "").replace("\n", "")
def main():
text = demisto.args()['value']
if type(text) is not list:
text = [text]
result = list(map(remove_line_breaks, map(tokenize_text, map(clean_html, text))))
if len(result) == 1:
result = result[0]
demisto.results({
'Contents': result,
'ContentsFormat': formats['json'] if type(result) is list else formats['text'],
'EntryContext': {
'WordTokenizeOutput': result
}
})
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins":
main()
| {
"content_hash": "757a08124a988a9891ce2dd05b7cf306",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 88,
"avg_line_length": 27.12987012987013,
"alnum_prop": 0.6069889899473432,
"repo_name": "VirusTotal/content",
"id": "28d6ee737a55190e9582fcaa25ba594225964b49",
"size": "2089",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/CommonScripts/Scripts/WordTokenizeTest/WordTokenizeTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47594464"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from mock import patch
from datetime import datetime
from django.core.urlresolvers import reverse
from sentry.models import (
Activity, File, Release, ReleaseCommit,
ReleaseFile, ReleaseProject, Repository
)
from sentry.testutils import APITestCase
class ReleaseDetailsTest(APITestCase):
def test_simple(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
project = self.create_project(team=team1, organization=org)
project2 = self.create_project(team=team2, organization=org)
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release2 = Release.objects.create(
organization_id=org.id,
version='12345678',
)
release.add_project(project)
release2.add_project(project2)
self.create_member(teams=[team1], user=user, organization=org)
self.login_as(user=user)
ReleaseProject.objects.filter(
project=project,
release=release
).update(new_groups=5)
url = reverse('sentry-api-0-organization-release-details', kwargs={
'organization_slug': org.slug,
'version': release.version,
})
response = self.client.get(url)
assert response.status_code == 200, response.content
assert response.data['version'] == release.version
assert response.data['newGroups'] == 5
# no access
url = reverse('sentry-api-0-organization-release-details', kwargs={
'organization_slug': org.slug,
'version': release2.version,
})
response = self.client.get(url)
assert response.status_code == 403
def test_multiple_projects(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
project = self.create_project(team=team1, organization=org)
project2 = self.create_project(team=team2, organization=org)
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release.add_project(project)
release.add_project(project2)
self.create_member(teams=[team1, team2], user=user, organization=org)
self.login_as(user=user)
url = reverse('sentry-api-0-organization-release-details', kwargs={
'organization_slug': org.slug,
'version': release.version,
})
response = self.client.get(url)
assert response.status_code == 200, response.content
class UpdateReleaseDetailsTest(APITestCase):
@patch('sentry.tasks.commits.fetch_commits')
def test_simple(self, mock_fetch_commits):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
repo = Repository.objects.create(
organization_id=org.id,
name='example/example',
provider='dummy',
)
repo2 = Repository.objects.create(
organization_id=org.id,
name='example/example2',
provider='dummy',
)
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
project = self.create_project(team=team1, organization=org)
project2 = self.create_project(team=team2, organization=org)
base_release = Release.objects.create(
organization_id=org.id,
version='000000000',
)
base_release.add_project(project)
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release2 = Release.objects.create(
organization_id=org.id,
version='12345678',
)
release.add_project(project)
release2.add_project(project2)
self.create_member(teams=[team1], user=user, organization=org)
self.login_as(user=user)
url = reverse('sentry-api-0-organization-release-details', kwargs={
'organization_slug': org.slug,
'version': base_release.version,
})
self.client.put(url, {
'ref': 'master',
'headCommits': [
{'currentId': '0' * 40, 'repository': repo.name},
{'currentId': '0' * 40, 'repository': repo2.name},
],
})
url = reverse('sentry-api-0-organization-release-details', kwargs={
'organization_slug': org.slug,
'version': release.version,
})
response = self.client.put(url, {
'ref': 'master',
'refs': [
{'commit': 'a' * 40, 'repository': repo.name},
{'commit': 'b' * 40, 'repository': repo2.name},
],
})
mock_fetch_commits.apply_async.assert_called_with(
kwargs={
'release_id': release.id,
'user_id': user.id,
'refs': [
{'commit': 'a' * 40, 'repository': repo.name},
{'commit': 'b' * 40, 'repository': repo2.name},
],
'prev_release_id': base_release.id,
}
)
assert response.status_code == 200, response.content
assert response.data['version'] == release.version
release = Release.objects.get(id=release.id)
assert release.ref == 'master'
# no access
url = reverse('sentry-api-0-organization-release-details', kwargs={
'organization_slug': org.slug,
'version': release2.version,
})
response = self.client.put(url, {'ref': 'master'})
assert response.status_code == 403
@patch('sentry.tasks.commits.fetch_commits')
def test_deprecated_head_commits(self, mock_fetch_commits):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
repo = Repository.objects.create(
organization_id=org.id,
name='example/example',
provider='dummy',
)
repo2 = Repository.objects.create(
organization_id=org.id,
name='example/example2',
provider='dummy',
)
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
project = self.create_project(team=team1, organization=org)
project2 = self.create_project(team=team2, organization=org)
base_release = Release.objects.create(
organization_id=org.id,
version='000000000',
)
base_release.add_project(project)
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release2 = Release.objects.create(
organization_id=org.id,
version='12345678',
)
release.add_project(project)
release2.add_project(project2)
self.create_member(teams=[team1], user=user, organization=org)
self.login_as(user=user)
url = reverse('sentry-api-0-organization-release-details', kwargs={
'organization_slug': org.slug,
'version': base_release.version,
})
self.client.put(url, {
'ref': 'master',
'headCommits': [
{'currentId': '0' * 40, 'repository': repo.name},
{'currentId': '0' * 40, 'repository': repo2.name},
],
})
url = reverse('sentry-api-0-organization-release-details', kwargs={
'organization_slug': org.slug,
'version': release.version,
})
response = self.client.put(url, {
'ref': 'master',
'headCommits': [
{'currentId': 'a' * 40, 'repository': repo.name},
{'currentId': 'b' * 40, 'repository': repo2.name},
],
})
mock_fetch_commits.apply_async.assert_called_with(
kwargs={
'release_id': release.id,
'user_id': user.id,
'refs': [
{'commit': 'a' * 40, 'previousCommit': None, 'repository': repo.name},
{'commit': 'b' * 40, 'previousCommit': None, 'repository': repo2.name},
],
'prev_release_id': base_release.id,
}
)
assert response.status_code == 200, response.content
assert response.data['version'] == release.version
release = Release.objects.get(id=release.id)
assert release.ref == 'master'
# no access
url = reverse('sentry-api-0-organization-release-details', kwargs={
'organization_slug': org.slug,
'version': release2.version,
})
response = self.client.put(url, {'ref': 'master'})
assert response.status_code == 403
def test_commits(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(team=team, organization=org)
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release.add_project(project)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse('sentry-api-0-organization-release-details', kwargs={
'organization_slug': org.slug,
'version': release.version,
})
response = self.client.put(url, data={
'commits': [
{'id': 'a' * 40},
{'id': 'b' * 40},
],
})
assert response.status_code == 200, (response.status_code, response.content)
rc_list = list(ReleaseCommit.objects.filter(
release=release,
).select_related('commit', 'commit__author').order_by('order'))
assert len(rc_list) == 2
for rc in rc_list:
assert rc.organization_id == org.id
def test_activity_generation(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(team=team, organization=org)
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release.add_project(project)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse('sentry-api-0-organization-release-details', kwargs={
'organization_slug': org.slug,
'version': release.version,
})
response = self.client.put(url, data={
'dateReleased': datetime.utcnow().isoformat() + 'Z',
})
assert response.status_code == 200, (response.status_code, response.content)
release = Release.objects.get(id=release.id)
assert release.date_released
activity = Activity.objects.filter(
type=Activity.RELEASE,
project=project,
ident=release.version,
)
assert activity.exists()
class ReleaseDeleteTest(APITestCase):
def test_simple(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(team=team, organization=org)
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release.add_project(project)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
release_file = ReleaseFile.objects.create(
organization_id=project.organization_id,
release=release,
file=File.objects.create(
name='application.js',
type='release.file',
),
name='http://example.com/application.js'
)
url = reverse('sentry-api-0-organization-release-details', kwargs={
'organization_slug': org.slug,
'version': release.version,
})
response = self.client.delete(url)
assert response.status_code == 204, response.content
assert not Release.objects.filter(id=release.id).exists()
assert not ReleaseFile.objects.filter(id=release_file.id).exists()
def test_existing_group(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(team=team, organization=org)
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release.add_project(project)
self.create_group(first_release=release)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse('sentry-api-0-organization-release-details', kwargs={
'organization_slug': org.slug,
'version': release.version,
})
response = self.client.delete(url)
assert response.status_code == 400, response.content
assert Release.objects.filter(id=release.id).exists()
def test_bad_repo_name(self):
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(
name='foo',
organization=org,
team=team
)
release = Release.objects.create(
organization_id=org.id,
version='abcabcabc',
)
release.add_project(project)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
url = reverse('sentry-api-0-organization-release-details', kwargs={
'organization_slug': org.slug,
'version': release.version,
})
response = self.client.put(url, data={
'version': '1.2.1',
'projects': [project.slug],
'refs': [{
'repository': 'not_a_repo',
'commit': 'a' * 40,
}]
})
assert response.status_code == 400
assert response.data == {
'refs': [u'Invalid repository names: not_a_repo']
}
| {
"content_hash": "dd860b7442069527bcf03f955b101167",
"timestamp": "",
"source": "github",
"line_count": 481,
"max_line_length": 91,
"avg_line_length": 32.22037422037422,
"alnum_prop": 0.5696864111498258,
"repo_name": "JackDanger/sentry",
"id": "cf580b1f27f91799891f950deb0128abe7fc5438",
"size": "15498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sentry/api/endpoints/test_organization_release_details.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "583430"
},
{
"name": "HTML",
"bytes": "319622"
},
{
"name": "JavaScript",
"bytes": "624672"
},
{
"name": "Makefile",
"bytes": "2660"
},
{
"name": "Python",
"bytes": "6279717"
}
],
"symlink_target": ""
} |
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qrtextedit import ScanQRTextEdit
import re
from decimal import Decimal
from electrum import bitcoin
import util
RE_ADDRESS = '[1-9A-HJ-NP-Za-km-z]{26,}'
RE_ALIAS = '(.*?)\s*\<([1-9A-HJ-NP-Za-km-z]{26,})\>'
frozen_style = "QWidget { background-color:none; border:none;}"
normal_style = "QPlainTextEdit { }"
class PayToEdit(ScanQRTextEdit):
def __init__(self, win):
ScanQRTextEdit.__init__(self)
self.win = win
self.amount_edit = win.amount_e
self.document().contentsChanged.connect(self.update_size)
self.heightMin = 0
self.heightMax = 150
self.c = None
self.textChanged.connect(self.check_text)
self.outputs = []
self.errors = []
self.is_pr = False
self.is_alias = False
self.scan_f = win.pay_to_URI
self.update_size()
self.payto_address = None
self.previous_payto = ''
def setFrozen(self, b):
self.setReadOnly(b)
self.setStyleSheet(frozen_style if b else normal_style)
for button in self.buttons:
button.setHidden(b)
def setGreen(self):
self.setStyleSheet(util.GREEN_BG)
def setExpired(self):
self.setStyleSheet(util.RED_BG)
def parse_address_and_amount(self, line):
x, y = line.split(',')
out_type, out = self.parse_output(x)
amount = self.parse_amount(y)
return out_type, out, amount
def parse_output(self, x):
try:
address = self.parse_address(x)
return bitcoin.TYPE_ADDRESS, address
except:
script = self.parse_script(x)
return bitcoin.TYPE_SCRIPT, script
def parse_script(self, x):
from electrum.transaction import opcodes, push_script
script = ''
for word in x.split():
if word[0:3] == 'OP_':
assert word in opcodes.lookup
script += chr(opcodes.lookup[word])
else:
script += push_script(word).decode('hex')
return script
def parse_amount(self, x):
if x.strip() == '!':
return '!'
p = pow(10, self.amount_edit.decimal_point())
return int(p * Decimal(x.strip()))
def parse_address(self, line):
r = line.strip()
m = re.match('^'+RE_ALIAS+'$', r)
address = str(m.group(2) if m else r)
assert bitcoin.is_address(address)
return address
def check_text(self):
self.errors = []
if self.is_pr:
return
# filter out empty lines
lines = filter(lambda x: x, self.lines())
outputs = []
total = 0
self.payto_address = None
if len(lines) == 1:
data = lines[0]
if data.startswith("bitcoin:"):
self.scan_f(data)
return
try:
self.payto_address = self.parse_output(data)
except:
pass
if self.payto_address:
self.win.lock_amount(False)
return
is_max = False
for i, line in enumerate(lines):
try:
_type, to_address, amount = self.parse_address_and_amount(line)
except:
self.errors.append((i, line.strip()))
continue
outputs.append((_type, to_address, amount))
if amount == '!':
is_max = True
else:
total += amount
self.win.is_max = is_max
self.outputs = outputs
self.payto_address = None
if self.win.is_max:
self.win.do_update_fee()
else:
self.amount_edit.setAmount(total if outputs else None)
self.win.lock_amount(total or len(lines)>1)
def get_errors(self):
return self.errors
def get_recipient(self):
return self.payto_address
def get_outputs(self, is_max):
if self.payto_address:
if is_max:
amount = '!'
else:
amount = self.amount_edit.get_amount()
_type, addr = self.payto_address
self.outputs = [(_type, addr, amount)]
return self.outputs[:]
def lines(self):
return unicode(self.toPlainText()).split('\n')
def is_multiline(self):
return len(self.lines()) > 1
def paytomany(self):
self.setText("\n\n\n")
self.update_size()
def update_size(self):
docHeight = self.document().size().height()
h = docHeight*17 + 11
if self.heightMin <= h <= self.heightMax:
self.setMinimumHeight(h)
self.setMaximumHeight(h)
self.verticalScrollBar().hide()
def setCompleter(self, completer):
self.c = completer
self.c.setWidget(self)
self.c.setCompletionMode(QCompleter.PopupCompletion)
self.c.activated.connect(self.insertCompletion)
def insertCompletion(self, completion):
if self.c.widget() != self:
return
tc = self.textCursor()
extra = completion.length() - self.c.completionPrefix().length()
tc.movePosition(QTextCursor.Left)
tc.movePosition(QTextCursor.EndOfWord)
tc.insertText(completion.right(extra))
self.setTextCursor(tc)
def textUnderCursor(self):
tc = self.textCursor()
tc.select(QTextCursor.WordUnderCursor)
return tc.selectedText()
def keyPressEvent(self, e):
if self.isReadOnly():
return
if self.c.popup().isVisible():
if e.key() in [Qt.Key_Enter, Qt.Key_Return]:
e.ignore()
return
if e.key() in [Qt.Key_Tab]:
e.ignore()
return
if e.key() in [Qt.Key_Down, Qt.Key_Up] and not self.is_multiline():
e.ignore()
return
QPlainTextEdit.keyPressEvent(self, e)
ctrlOrShift = e.modifiers() and (Qt.ControlModifier or Qt.ShiftModifier)
if self.c is None or (ctrlOrShift and e.text().isEmpty()):
return
eow = QString("~!@#$%^&*()_+{}|:\"<>?,./;'[]\\-=")
hasModifier = (e.modifiers() != Qt.NoModifier) and not ctrlOrShift;
completionPrefix = self.textUnderCursor()
if hasModifier or e.text().isEmpty() or completionPrefix.length() < 1 or eow.contains(e.text().right(1)):
self.c.popup().hide()
return
if completionPrefix != self.c.completionPrefix():
self.c.setCompletionPrefix(completionPrefix);
self.c.popup().setCurrentIndex(self.c.completionModel().index(0, 0))
cr = self.cursorRect()
cr.setWidth(self.c.popup().sizeHintForColumn(0) + self.c.popup().verticalScrollBar().sizeHint().width())
self.c.complete(cr)
def qr_input(self):
data = super(PayToEdit,self).qr_input()
if data.startswith("bitcoin:"):
self.scan_f(data)
# TODO: update fee
def resolve(self):
self.is_alias = False
if self.hasFocus():
return
if self.is_multiline(): # only supports single line entries atm
return
if self.is_pr:
return
key = str(self.toPlainText())
if key == self.previous_payto:
return
self.previous_payto = key
if not (('.' in key) and (not '<' in key) and (not ' ' in key)):
return
try:
data = self.win.contacts.resolve(key)
except:
return
if not data:
return
self.is_alias = True
address = data.get('address')
name = data.get('name')
new_url = key + ' <' + address + '>'
self.setText(new_url)
self.previous_payto = new_url
#if self.win.config.get('openalias_autoadd') == 'checked':
self.win.contacts[key] = ('openalias', name)
self.win.contact_list.on_update()
self.setFrozen(True)
if data.get('type') == 'openalias':
self.validated = data.get('validated')
if self.validated:
self.setGreen()
else:
self.setExpired()
else:
self.validated = None
| {
"content_hash": "bbffec352899ac05322054676d39fe7a",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 113,
"avg_line_length": 29.661921708185055,
"alnum_prop": 0.5454109178164367,
"repo_name": "fireduck64/electrum",
"id": "d71753162f23e61f07605703fa1e265f5069e731",
"size": "9500",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "gui/qt/paytoedit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "3867"
},
{
"name": "Makefile",
"bytes": "836"
},
{
"name": "NSIS",
"bytes": "7125"
},
{
"name": "PHP",
"bytes": "404"
},
{
"name": "Protocol Buffer",
"bytes": "2354"
},
{
"name": "Python",
"bytes": "1241321"
},
{
"name": "Shell",
"bytes": "7035"
}
],
"symlink_target": ""
} |
import os
from flask import Flask, request
from freeipa_api_client import IPAPassword
import requests
app = Flask(__name__)
FREEIPA_API_SERVER_URL = os.environ['FREEIPA_API_SERVER_URL']
@app.route('%s/' % os.environ.get('FREEIPA_CHANGE_PASSWORD_URL_PREFIX'), methods=['GET', 'POST'])
def hello_world():
if request.method == 'POST':
error_message = None
if not request.form.get('username'):
error_message = "Please, enter username."
elif not request.form.get('current_password'):
error_message = "Please, enter current password."
elif not request.form.get('new_password1'):
error_message = "Please, enter new password."
elif request.form.get('new_password1') != request.form.get('new_password2'):
error_message = "Passwords don't match."
else:
ipa_password_api = IPAPassword(requests, FREEIPA_API_SERVER_URL)
password_change_status, password_change_response = ipa_password_api.changePassword(
request.form['username'],
request.form['current_password'],
request.form['new_password1']
)
if not password_change_status:
error_message = password_change_response.headers.get('x-ipa-pwchange-policy-error')
if not error_message:
error_message = password_change_response.headers.get('x-ipa-pwchange-result')
if not error_message:
error_message = "Unexpected error: <pre>%r</pre><br><pre>%r</pre>" % (
password_change_response.headers,
password_change_response.content,
)
return (
('<div>%s</div>' % (error_message if error_message else 'Password is changed successfuly!')) +
'<a href="">Back</a>'
)
return (
'<form method="POST">'
'<label style="display: block">Username: <input type="text" name="username"></label>'
'<label style="display: block">Current Password: <input type="password" name="current_password"></label>'
'<label style="display: block">New Password: <input type="password" name="new_password1"></label>'
'<label style="display: block">New Password (once again): <input type="password" name="new_password2"></label>'
'<input type="submit" value="Change Password">'
'</form>'
)
| {
"content_hash": "39823aeb82c701d0b9bfcef6be3b8a1d",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 123,
"avg_line_length": 44.464285714285715,
"alnum_prop": 0.5839357429718876,
"repo_name": "frol/freeipa-change-password-service",
"id": "f11e09946b95b5e39d4fa1d2e1e90e2315df31b1",
"size": "2490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "freeipa_change_password_service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2490"
}
],
"symlink_target": ""
} |
from .model import * # noqa
from .transformer_layer_xmod import * # noqa
| {
"content_hash": "e5978217f7a75fc972f72a91a4fa6863",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 45,
"avg_line_length": 37.5,
"alnum_prop": 0.7066666666666667,
"repo_name": "pytorch/fairseq",
"id": "bbf7694920eb00bed27e17dac272611be1ab44f9",
"size": "253",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "fairseq/models/xmod/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "21106"
},
{
"name": "Cuda",
"bytes": "38166"
},
{
"name": "Cython",
"bytes": "13294"
},
{
"name": "Lua",
"bytes": "4210"
},
{
"name": "Python",
"bytes": "3699357"
},
{
"name": "Shell",
"bytes": "2182"
}
],
"symlink_target": ""
} |
from . import Arduino
from . import ARDUINO_GROVE_I2C
__author__ = "Marco Rabozzi, Luca Cerina, Giuseppe Natale"
__copyright__ = "Copyright 2016, NECST Laboratory, Politecnico di Milano"
ARDUINO_GROVE_HAPTIC_MOTOR_PROGRAM = "arduino_grove_haptic_motor.bin"
CONFIG_IOP_SWITCH = 0x1
START_WAVEFORM = 0x2
STOP_WAVEFORM = 0x3
READ_IS_PLAYING = 0x4
class Grove_HapticMotor(object):
"""This class controls the Grove Haptic Motor based on the DRV2605L.
Hardware version v0.9.
Attributes
----------
microblaze : Arduino
Microblaze processor instance used by this module.
"""
def __init__(self, mb_info, gr_pin):
"""Return a new instance of an Grove_Haptic_Motor object.
Parameters
----------
mb_info : dict
A dictionary storing Microblaze information, such as the
IP name and the reset name.
gr_pin: list
A group of pins on arduino-grove shield.
"""
if gr_pin not in [ARDUINO_GROVE_I2C]:
raise ValueError("Group number can only be I2C.")
self.microblaze = Arduino(mb_info, ARDUINO_GROVE_HAPTIC_MOTOR_PROGRAM)
self.microblaze.write_blocking_command(CONFIG_IOP_SWITCH)
def play(self, effect):
"""Play a vibration effect on the Grove Haptic Motor peripheral.
Valid effect identifiers are in the range [1, 127].
Parameters
----------
effect : int
An integer that specifies the effect.
Returns
-------
None
"""
if (effect < 1) or (effect > 127):
raise ValueError("Valid effect identifiers are within 1 and 127.")
self.microblaze.write_mailbox(0, [effect, 0])
self.microblaze.write_blocking_command(START_WAVEFORM)
def play_sequence(self, sequence):
"""Play a sequence of effects possibly separated by pauses.
At most 8 effects or pauses can be specified at a time.
Pauses are defined using negative integer values in the
range [-1, -127] that correspond to a pause length in the
range [10, 1270] ms
Valid effect identifiers are in the range [1, 127]
As an example, in the following sequence example: [4,-20,5]
effect 4 is played and after a pause of 200 ms effect 5 is played
Parameters
----------
sequence : list
At most 8 values specifying effects and pauses.
Returns
-------
None
"""
length = len(sequence)
if length < 1:
raise ValueError("The sequence must contain at least one value.")
if length > 8:
raise ValueError("The sequence cannot contain more than 8 values.")
for i in range(length):
if sequence[i] < 0:
if sequence[i] < -127:
raise ValueError("Pause value must be smaller than -127")
sequence[i] = -sequence[i] + 128
else:
if (sequence[i] < 1) or (sequence[i] > 127):
raise ValueError("Valid effect identifiers are within " +
"1 and 127.")
sequence += [0] * (8 - length)
self.microblaze.write_mailbox(0, sequence)
self.microblaze.write_blocking_command(START_WAVEFORM)
def stop(self):
"""Stop an effect or a sequence on the motor peripheral.
Returns
-------
None
"""
self.microblaze.write_blocking_command(STOP_WAVEFORM)
def is_playing(self):
"""Check if a vibration effect is running on the motor.
Returns
-------
bool
True if a vibration effect is playing, false otherwise
"""
self.microblaze.write_blocking_command(READ_IS_PLAYING)
flag = self.microblaze.read_mailbox(0)
return flag == 1
| {
"content_hash": "cbee1e122bae1a0a95e461a774f0638d",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 79,
"avg_line_length": 30.875,
"alnum_prop": 0.5789473684210527,
"repo_name": "schelleg/PYNQ",
"id": "b42bef17bc5650c803824b1df50df26c0876cac5",
"size": "5600",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pynq/lib/arduino/arduino_grove_haptic_motor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "BitBake",
"bytes": "1840"
},
{
"name": "C",
"bytes": "1062607"
},
{
"name": "C++",
"bytes": "76769"
},
{
"name": "CMake",
"bytes": "578"
},
{
"name": "JavaScript",
"bytes": "239958"
},
{
"name": "Jupyter Notebook",
"bytes": "17148467"
},
{
"name": "Makefile",
"bytes": "165279"
},
{
"name": "Python",
"bytes": "1388540"
},
{
"name": "Shell",
"bytes": "67192"
},
{
"name": "SystemVerilog",
"bytes": "53374"
},
{
"name": "Tcl",
"bytes": "1383109"
},
{
"name": "VHDL",
"bytes": "738710"
},
{
"name": "Verilog",
"bytes": "284588"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_commoner_naboo_bothan_female_02.iff"
result.attribute_template_id = 9
result.stfName("npc_name","bothan_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "f067d5dd93de67bcb129b2190fd1cce9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 85,
"avg_line_length": 24.76923076923077,
"alnum_prop": 0.7049689440993789,
"repo_name": "anhstudios/swganh",
"id": "4c5a397282a24369e190d17e20f3828335e18041",
"size": "467",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_dressed_commoner_naboo_bothan_female_02.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
"""Common utility classes and functions for testing."""
import io
import os
import pytest
from google.auth import credentials
from google.auth import transport
from requests import adapters
from requests import models
import firebase_admin
def resource_filename(filename):
"""Returns the absolute path to a test resource."""
return os.path.join(os.path.dirname(__file__), 'data', filename)
def resource(filename):
"""Returns the contents of a test resource."""
with open(resource_filename(filename), 'r') as file_obj:
return file_obj.read()
def cleanup_apps():
with firebase_admin._apps_lock:
apps = list(firebase_admin._apps.values())
for app in apps:
firebase_admin.delete_app(app)
def run_without_project_id(func):
env_vars = ['GCLOUD_PROJECT', 'GOOGLE_CLOUD_PROJECT']
env_values = []
for env_var in env_vars:
gcloud_project = os.environ.get(env_var)
if gcloud_project:
del os.environ[env_var]
env_values.append(gcloud_project)
try:
func()
finally:
for idx, env_var in enumerate(env_vars):
gcloud_project = env_values[idx]
if gcloud_project:
os.environ[env_var] = gcloud_project
def new_monkeypatch():
return pytest.MonkeyPatch()
class MockResponse(transport.Response):
def __init__(self, status, response):
self._status = status
self._response = response
@property
def status(self):
return self._status
@property
def headers(self):
return {}
@property
def data(self):
return self._response.encode()
class MockRequest(transport.Request):
"""A mock HTTP requests implementation.
This can be used whenever an HTTP interaction needs to be mocked
for testing purposes. For example HTTP calls to fetch public key
certificates, and HTTP calls to retrieve access tokens can be
mocked using this class.
"""
def __init__(self, status, response):
self.response = MockResponse(status, response)
self.log = []
def __call__(self, *args, **kwargs): # pylint: disable=arguments-differ
self.log.append((args, kwargs))
return self.response
class MockFailedRequest(transport.Request):
"""A mock HTTP request that fails by raising an exception."""
def __init__(self, error):
self.error = error
self.log = []
def __call__(self, *args, **kwargs): # pylint: disable=arguments-differ
self.log.append((args, kwargs))
raise self.error
# Temporarily disable the lint rule. For more information see:
# https://github.com/googleapis/google-auth-library-python/pull/561
# pylint: disable=abstract-method
class MockGoogleCredential(credentials.Credentials):
"""A mock Google authentication credential."""
def refresh(self, request):
self.token = 'mock-token'
class MockCredential(firebase_admin.credentials.Base):
"""A mock Firebase credential implementation."""
def __init__(self):
self._g_credential = MockGoogleCredential()
def get_credential(self):
return self._g_credential
class MockMultiRequestAdapter(adapters.HTTPAdapter):
"""A mock HTTP adapter that supports multiple responses for the Python requests module."""
def __init__(self, responses, statuses, recorder):
"""Constructs a MockMultiRequestAdapter.
The lengths of the responses and statuses parameters must match.
Each incoming request consumes a response and a status, in order. If all responses and
statuses are exhausted, further requests will reuse the last response and status.
"""
adapters.HTTPAdapter.__init__(self)
if len(responses) != len(statuses):
raise ValueError('The lengths of responses and statuses do not match.')
self._current_response = 0
self._responses = list(responses) # Make a copy.
self._statuses = list(statuses)
self._recorder = recorder
def send(self, request, **kwargs): # pylint: disable=arguments-differ
request._extra_kwargs = kwargs
self._recorder.append(request)
resp = models.Response()
resp.url = request.url
resp.status_code = self._statuses[self._current_response]
resp.raw = io.BytesIO(self._responses[self._current_response].encode())
self._current_response = min(self._current_response + 1, len(self._responses) - 1)
return resp
class MockAdapter(MockMultiRequestAdapter):
"""A mock HTTP adapter for the Python requests module."""
def __init__(self, data, status, recorder):
super(MockAdapter, self).__init__([data], [status], recorder)
@property
def status(self):
return self._statuses[0]
@property
def data(self):
return self._responses[0]
| {
"content_hash": "b4e02677d6c8b5d7e07220ede4bd9fd8",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 94,
"avg_line_length": 30.68553459119497,
"alnum_prop": 0.6591514654642344,
"repo_name": "firebase/firebase-admin-python",
"id": "92755107c2a44fea231ec1475bbb907304c33def",
"size": "5455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1142237"
},
{
"name": "Shell",
"bytes": "1682"
}
],
"symlink_target": ""
} |
""" vim:ts=4:expandtab
(c) Jerzy Kędra 2013
TODO:
1.Check only last few podcasts by date
Don't try to download older podcasts than date specified.
For example - test last week only for regular podcast
donwload.
2.Non-verbose mode to be used from cron.
"""
import os
import urllib2
import urllib
import httplib
from BeautifulSoup import BeautifulSoup, BeautifulStoneSoup
import email.utils as eut
#from datetime import datetime
import datetime
import codecs
from urlparse import urlparse
import shutil
class PodcastURLopener(urllib.FancyURLopener):
"""Create sub-class in order to overide error 206. This error means a
partial file is being sent, which is ok in this case.
Do nothing with this error.
"""
def http_error_206(self, url, fp, errcode, errmsg, headers, data=None):
pass
def reporthook(blocks_read, block_size, total_size):
"""Progress printing, it is an argument to urlretrieve."""
total_size = podsize
if not blocks_read:
return
if total_size < 0:
# Unknown size
print ' Read %d blocks' % blocks_read
else:
amount_read = blocks_read * block_size + podsiz3
print ' Read ',
if amount_read < 1024*1024 :
print '%dkB ' % (amount_read/1024),
elif amount_read > 1024*1024 :
print '%dMB ' % (amount_read/1024/1024),
print '%d%% \r' % (100*amount_read/total_size),
return
def getsize(url):
"""Returns Content-Length value for given URL. Follows redirs."""
o = urlparse(url)
conn = httplib.HTTPConnection(o.netloc)
conn.request("HEAD", o.path)
res = conn.getresponse()
if res.status == 301 or res.status == 302: # poprawic na kod opisowy
# print res.reason, ": ", res.getheader('location')
return getsize(res.getheader('location'))
elif res.status == 200:
# inne interesujace tagi: etag
# print res.getheader('content-length')
return res.getheader('content-length')
else:
print "getsize() UNKNOWN PROBLEM"
print res.reason, ": ", res.getheader('location')
print res.getheaders()
raise IOError
def descwrite(i):
"""Writes a description in a file for given podcast."""
podname = i.title.string
f = codecs.open(podftxt, encoding='utf-8', mode='w')
f.write(podname)
f.write("\n\n")
# enclosing in try-exception because of this error
# TypeError: coercing to Unicode: need string or buffer, Tag found
try:
# This is to decode </> before writing it to the file
# BeautifulStoneSoup(items[1].description.string, convertEntities=BeautifulStoneSoup.HTML_ENTITIES).contents[0]
f.write(BeautifulStoneSoup(i.description.string,
convertEntities=
BeautifulStoneSoup.HTML_ENTITIES).contents[0])
except TypeError:
f.write(i.description.string)
f.close
""" MAIN PROGRAM STARTS HERE
"""
#baseurl = 'http://feeds.feedburner.com/zdzis?format=xml/'
baseurl = 'http://feeds.feedburner.com/dailyaudiobible/'
current_page = urllib2.urlopen(baseurl)
#current_page = open('cache-soup.html')
soup = BeautifulSoup(current_page)
# SAVING THE SOUP IN A FILE
fs = open('cache-soup.html', 'w')
fs.write(soup.prettify())
exit()
"""
c = soup.find('div', {'id':'story'})
contpage = c.find('div', {'class':'articlePaged_Next'}).a['href']
soup.find('div', {'id':'story'})
if len(contpage) > 0 :
current_page = urllib2.urlopen(baseurl+contpage)
soupadd = BeautifulSoup(current_page).find('div', {'id':'story'})
items = soup.findAll('item')
print items[1].find('media:content')['url'], "\n\n\n",
items[1].title.string, "\n\n\n", items[1].description.string
"""
os.chdir('DATA')
for i in soup.findAll('item'):
podname = i.title.string
poddate = datetime.datetime(*eut.parsedate(i.pubdate.string)[:6])
podfmp3 = podname + '.mp3'
podtmp3 = podname + '.mp3.part'
podftxt = podname + '.txt'
podurl = i.find('media:content')['url']
podsiz3 = 0
posize = 0
# nie sprawdzaj starszych
if (datetime.datetime.today() - poddate).days > 30 :
continue
# sprawdźmy czy plik w ogóle da się ściągnąć
# jak nie - iterujemy od początku
try:
podsize = int(getsize(podurl))
except IOError:
continue
# write description to description file
if not os.path.exists(podftxt) :
descwrite(i)
if os.path.exists(podfmp3) :
podsiz3 = os.stat(podfmp3).st_size
if podsiz3 == podsize :
print "Skipping %s (%s)" % (podfmp3, poddate)
continue
else:
print "{} only {}<{} retrived - resuming".format(podfmp3,
podsiz3, podsize)
try:
# it takes some time for large files
urllib._urlopener = PodcastURLopener()
urllib._urlopener.addheader("Range","bytes=%s-" % (podsiz3))
urllib.urlretrieve(podurl, podtmp3, reporthook=reporthook)
urllib._urlopener = urllib.FancyURLopener()
fsrc = open(podtmp3)
fdst = open(podfmp3, "a")
shutil.copyfileobj(fsrc, fdst)
fsrc.close()
fdst.close()
os.unlink(podtmp3)
except urllib.ContentTooShortError:
print "\tfailed to retrieve ", podurl
if os.path.exists(podtmp3) :
print "\tremoving ", podtmp3
os.unlink(podtmp3)
continue
else :
print "Downloading ", podfmp3
try:
# it takes some time for large files
urllib.urlretrieve(podurl, podfmp3, reporthook=reporthook)
except urllib.ContentTooShortError:
print "\tfailed to retrieve ", podurl
if os.path.exists(podfmp3) :
print "\tremoving ", podfmp3
os.unlink(podfmp3)
continue
print "stored as ", podfmp3
| {
"content_hash": "43253e248449ce581c38047d7689bf2e",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 119,
"avg_line_length": 31.835897435897436,
"alnum_prop": 0.5958440721649485,
"repo_name": "jkedra/PodcastMirror",
"id": "6b748e57345164d5d2eeefd45509bcba413e1c2b",
"size": "6258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Debug.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41600"
},
{
"name": "Shell",
"bytes": "31"
}
],
"symlink_target": ""
} |
import arcpy
import os
import sys
import time
import string
import subprocess
#set executable program location
executablepath = os.path.dirname(os.path.abspath(__file__))
arcpy.AddMessage(executablepath)
executablename = '\CutFillStatistics.exe'
executablestr = '"' + executablepath + executablename + '"'
arcpy.AddMessage(executablestr)
# Get Original DEM (ASCII)
#
inLyr = arcpy.GetParameterAsText(0)
desc = arcpy.Describe(inLyr)
OrigDEM=str(desc.catalogPath)
arcpy.AddMessage("\nOriginal Elevation file: "+OrigDEM)
OriginalDEMstr = ' -orig "' + OrigDEM + '"'
arcpy.AddMessage(OriginalDEMstr)
# Get Modified DEM (ASCII)
ModDEM = arcpy.GetParameterAsText(1)
arcpy.AddMessage("\Modified Elevation file: "+ModDEM)
ModDEMstr = ' -mod "' + ModDEM + '"'
# Get Output Statistics File (ASCII)
StatFile = arcpy.GetParameterAsText(2)
arcpy.AddMessage("\nOutput Statistics file: "+StatFile)
StatFilestr = ' -stat "' + StatFile + '"'
# Construct the command line. Put quotes around file names in case there are spaces
cmd = executablestr + OriginalDEMstr + ModDEMstr + StatFilestr
arcpy.AddMessage(cmd)
os.system(cmd)
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# print the output lines as they happen
while True:
line = pipe.stdout.readline()
if not line:
break
arcpy.AddMessage(line)
| {
"content_hash": "e3688bc470ee820d1b9ec67d4f776983",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 84,
"avg_line_length": 29.065217391304348,
"alnum_prop": 0.74943904263276,
"repo_name": "crwr/OptimizedPitRemoval",
"id": "bd30b9de11209b04b63c3164e70645432b241a29",
"size": "1480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ArcGIS Code/CutFillStatistics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "35339"
},
{
"name": "Python",
"bytes": "3754"
}
],
"symlink_target": ""
} |
"""
vg composite command
Build composite images from centered images, based on records in composites.csv.
See also vgInitComposites.py, which builds initial pass at composites.csv.
Note: even single channel images get a composite image (bw).
Uses centered image if available, otherwise uses the plain adjusted image.
"""
import os
import csv
import cv2
import config
import lib
import libimg
import vgCenter
import vgInpaint
def printStatus(channelRows,volume,nfile,startId):
"print status message"
nchannels = len(channelRows)
print 'Volume %s compositing %d: %s (%d channels) \r' % \
(volume,nfile,startId,nchannels),
def processChannels(channelRows, optionAlign):
"""
Combine channel images into new file, attempting to align them if optionAlign is True.
channelRows is an array of rows corresponding to rows in the composites.csv file.
should have [compositeId,centerId,volnum,filter,weight,x,y]
eg [
['C434823','C434823','5101','Orange']
['C434823','C434825','5101','Blue','0.8','42','18']
['C434823','C434827','5101','Green','1','-50','83']
]
they are combined and written to a file in the composites folder, step05_composites.
Can have single channel groups.
If optionAlign is True, will attempt to align the channels, and will return updated
x,y values in channelRows.
"""
#. could also have zoom factor, warp info, rotate
# for row in channelRows: print row
centered = False
weightXYFilledOut = False
if len(channelRows) > 0:
volume = ''
compositeId = ''
channels = []
for row in channelRows:
compositeId = row[config.colCompositesCompositeId]
fileId = row[config.colCompositesFileId]
volume = row[config.colCompositesVolume]
filter = row[config.colCompositesFilter]
weight = float(row[config.colCompositesWeight]) \
if len(row)>config.colCompositesWeight else 1.0
x = int(row[config.colCompositesX]) if len(row)>config.colCompositesX else 0
y = int(row[config.colCompositesY]) if len(row)>config.colCompositesY else 0
if len(row)>config.colCompositesWeight: weightXYFilledOut = True
# if don't have an inpaint or centered file, use the adjusted file
channelfilepath = lib.getFilepath('inpaint', volume, fileId)
if os.path.isfile(channelfilepath):
centered = True
else:
channelfilepath = lib.getFilepath('center', volume, fileId, filter)
if os.path.isfile(channelfilepath):
centered = True
else:
channelfilepath = lib.getFilepath('adjust', volume, fileId, filter)
if os.path.isfile(channelfilepath):
channel = [fileId,filter,channelfilepath,weight,x,y]
channels.append(channel)
if len(channels)>0:
outfilepath = lib.getFilepath('composite', volume, compositeId)
if centered: optionAlign = False # don't try to align images if already centered
if weightXYFilledOut: optionAlign = False # don't align if already have values
# combine the channel images
im, channels = libimg.combineChannels(channels, optionAlign)
libimg.imwrite(outfilepath, im)
# if -align: update channels x,y etc
if optionAlign:
# make sure all the rows have all their columns
for row in channelRows:
while len(row)<=config.colCompositesY:
row.append('')
# find each row in channelRows and update weights and x,y translation
for row in channels:
for row2 in channelRows:
if row2[config.colCompositesFileId]==row[config.colChannelFileId]:
row2[config.colCompositesWeight]=row[config.colChannelWeight]
row2[config.colCompositesX]=row[config.colChannelX]
row2[config.colCompositesY]=row[config.colChannelY]
# print [ch[:-1] for ch in channels if ch]
# return channels
# caller needs to know if x,y values were changed
xyChanged = not centered
return xyChanged
def writeUpdates(csvNew, channelRows):
""
for row in channelRows:
# row = [compositeId, fileId, volume, filter, weight, x, y]
csvNew.writerow(row)
# print row
def vgComposite(filterVolume=None, filterCompositeId=None, filterTargetPath=None,
optionOverwrite=False, optionAlign=False, directCall=True):
"""
Build composite images by combining channel images.
Walks over records in composites.csv, merges channel images, writes to composites folder.
eg
composites.csv:
compositeId,centerId,volume,filter,weight,x,y
C1537728,C1537728,5103,Blue
C1537728,C1537730,5103,Orange,0.8
C1537728,C1537732,5103,Green,1,10,3
=>
step05_composites/VGISS_5103/C1537728_composite.jpg
Note: need to run vg init composites first.
Note: weight,x,y are optional - default to 1,0,0
"""
if filterCompositeId: filterCompositeId = filterCompositeId.upper() # always capital C
# note: targetPathParts = [system, craft, target, camera]
targetPathParts = lib.parseTargetPath(filterTargetPath)
# build volume for previous step
if filterVolume:
filterVolume = str(filterVolume)
outputSubfolder = lib.getSubfolder('composite', filterVolume)
# quit if volume folder exists
if os.path.isdir(outputSubfolder) and optionOverwrite==False:
if directCall: print "Folder exists: " + outputSubfolder
return
# build the previous step, if not already there
vgCenter.vgCenter(filterVolume, '', optionOverwrite=False, directCall=False)
# vgInpaint.vgInpaint(filterVolume, '', optionOverwrite=False, directCall=False)
# make folder
lib.mkdir_p(outputSubfolder)
# read small dbs into memory
compositingInfo = lib.readCsv(config.dbCompositing) # when to turn centering on/off
retargetingInfo = lib.readCsv(config.dbRetargeting) # remapping listed targets
# open files.csv so can join to it
csvFiles, fFiles = lib.openCsvReader(config.dbFiles)
# open compositesNew.csv for writing
if optionAlign:
lib.rm(config.dbCompositesNew)
csvNew, fNew = lib.openCsvWriter(config.dbCompositesNew)
# iterate over composites.csv records
csvComposites, fComposites = lib.openCsvReader(config.dbComposites)
startId = ''
startVol = ''
channelRows = []
nfile = 0
for row in csvComposites:
# get composite info
compositeId = row[config.colCompositesCompositeId]
fileId = row[config.colCompositesFileId]
volume = row[config.colCompositesVolume]
# join on files.csv to get more image properties
# (note: since compositeId repeats, we might have already advanced to the next record,
# in which case rowFiles will be None. But the target properties will remain the same.)
rowFiles = lib.getJoinRow(csvFiles, config.colFilesFileId, compositeId)
if rowFiles:
# get file info
filter = rowFiles[config.colFilesFilter]
system = rowFiles[config.colFilesSystem]
craft = rowFiles[config.colFilesCraft]
target = rowFiles[config.colFilesTarget]
camera = rowFiles[config.colFilesCamera]
# relabel target field if necessary - see db/targets.csv for more info
target = lib.retarget(retargetingInfo, compositeId, target)
# filter on volume, composite id and targetpath
volumeOk = (volume==filterVolume if filterVolume else True)
compositeOk = (compositeId==filterCompositeId if filterCompositeId else True)
targetPathOk = (lib.targetMatches(targetPathParts, system, craft, target, camera) \
if filterTargetPath else True)
doComposite = (volumeOk and compositeOk and targetPathOk)
if doComposite:
# gather image filenames into channelRows so can merge them
if compositeId == startId:
channelRows.append(row)
else:
# we're seeing a new compositeId, so process all the gathered channels
printStatus(channelRows,startVol,nfile,startId)
processChannels(channelRows, optionAlign)
# processChannels(channelRows, optionAlign, csvNew)
# xyChanged = processChannels(channelRows, optionAlign)
# if optionAlign and xyChanged:
# writeUpdates(csvNew, channelRows)
startId = compositeId
startVol = volume
channelRows = [row]
nfile += 1
# process the last leftover group
# print channelRows
printStatus(channelRows,startVol,nfile,startId)
processChannels(channelRows, optionAlign)
# processChannels(channelRows, optionAlign, csvNew)
# xyChanged = processChannels(channelRows,optionAlign)
# if optionAlign and xyChanged:
# writeUpdates(csvNew, channelRows)
print
if optionAlign: fNew.close()
fFiles.close()
fComposites.close()
if __name__ == '__main__':
os.chdir('..')
# vgComposite(5117)
# vgComposite(8207)
# vgComposite(None,'c1617245')
# ariel - works
# vgComposite(None,'c2684338',None,optionOverwrite=True)
# automatic - nowork
# vgComposite(None,'c2684338',None,optionOverwrite=True, optionAlign=True)
# filename = lib.getFilepath('composite','7206','c2684338')
# ganymede
# folder = '../../data/step04_adjust/VGISS_5117/'
# file1 = folder + 'C1640236_adjusted_Blue.jpg'
# file2 = folder + 'C1640234_adjusted_Violet.jpg'
# file3 = folder + 'C1640238_adjusted_Orange.jpg'
# vgComposite(None,'C1640232',None,optionOverwrite=True, optionAlign=True)
# filename = lib.getFilepath('composite','5117','C1640232')
# vgComposite(None,'C1640222',None,optionOverwrite=True, optionAlign=True)
# filename = lib.getFilepath('composite','5117','C1640222')
vgComposite(None,'C1642718',None,optionOverwrite=True, optionAlign=True)
filename = lib.getFilepath('composite','5117','C1642718')
im = cv2.imread(filename)
libimg.show(im)
# uranus
# vgComposite(None,'C2656801',True)
# filename = lib.getFilepath('composite','7205','C2656801')
# im = cv2.imread(filename)
# libimg.show(im)
print 'done'
| {
"content_hash": "595fa7a3ea898fa4d0b6116facf76c12",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 95,
"avg_line_length": 38.611510791366904,
"alnum_prop": 0.6520402459474567,
"repo_name": "bburns/PyVoyager",
"id": "21536581f691fc0827baa1ae8416c69cd4b3387a",
"size": "10734",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/vgComposite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1380"
},
{
"name": "Dockerfile",
"bytes": "966"
},
{
"name": "Python",
"bytes": "286683"
}
],
"symlink_target": ""
} |
"""Utilities used throughout for internationalization
"""
from django.utils import formats
import six
import decimal
def _date_format(date, date_format, decode=False):
formatted = formats.date_format(
date,
date_format,
use_l10n=True
)
if decode:
return formatted
return formatted.encode('UTF-8')
def l10n_date_iso(date, decode=False):
return _date_format(date, 'DATE_FORMAT', decode=decode)
def l10n_date_short(date, decode=False):
return _date_format(date, 'SHORT_DATE_FORMAT', decode=decode)
def l10n_date_medium(date, decode=False):
return _date_format(date, 'MEDIUM_DATE_FORMAT', decode=decode)
def l10n_date_long(date, decode=False):
return _date_format(date, 'LONG_DATE_FORMAT', decode=decode)
def l10n_date_year_month(date, decode=False):
return _date_format(date, 'YEAR_MONTH_FORMAT', decode=decode)
def l10n_monthname(date, decode=False):
return _date_format(date, 'N', decode=decode)
def l10n_number(value):
if isinstance(value, (decimal.Decimal, float) + six.integer_types):
return formats.number_format(value, use_l10n=True)
else:
suffix = ''
try:
value = str(value).rstrip()
if len(value) > 1 and value[-1] == '%':
suffix = '%'
value = value[:-1]
if value.isdigit():
value = int(value)
elif value.replace('.', '', 1).isdigit():
value = float(value)
else:
return str(value) + suffix
return formats.number_format(value, use_l10n=True) + suffix
except ValueError:
return value
return value
| {
"content_hash": "1c453a4054f0ae079aa6d0cd7b51ca0f",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 71,
"avg_line_length": 27.80327868852459,
"alnum_prop": 0.6120283018867925,
"repo_name": "mercycorps/TolaActivity",
"id": "d8fdec8f0c9c27396d986dbf2391eb6d4c0e17f6",
"size": "1696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tola/l10n_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "432462"
},
{
"name": "Dockerfile",
"bytes": "109"
},
{
"name": "HTML",
"bytes": "437661"
},
{
"name": "JavaScript",
"bytes": "5654491"
},
{
"name": "Python",
"bytes": "1741812"
},
{
"name": "Shell",
"bytes": "4752"
}
],
"symlink_target": ""
} |
import os
import sys
import tempfile
import pprint
import traceback
# disable python from generating a .pyc file
sys.dont_write_bytecode = True
# change me to the path of pytan if this script is not running from EXAMPLES/PYTAN_API
pytan_loc = "~/gh/pytan"
pytan_static_path = os.path.join(os.path.expanduser(pytan_loc), 'lib')
# Determine our script name, script dir
my_file = os.path.abspath(sys.argv[0])
my_dir = os.path.dirname(my_file)
# try to automatically determine the pytan lib directory by assuming it is in '../../lib/'
parent_dir = os.path.dirname(my_dir)
pytan_root_dir = os.path.dirname(parent_dir)
lib_dir = os.path.join(pytan_root_dir, 'lib')
# add pytan_loc and lib_dir to the PYTHONPATH variable
path_adds = [lib_dir, pytan_static_path]
[sys.path.append(aa) for aa in path_adds if aa not in sys.path]
# import pytan
import pytan
# create a dictionary of arguments for the pytan handler
handler_args = {}
# establish our connection info for the Tanium Server
handler_args['username'] = "Administrator"
handler_args['password'] = "Tanium2015!"
handler_args['host'] = "10.0.1.240"
handler_args['port'] = "443" # optional
# optional, level 0 is no output except warnings/errors
# level 1 through 12 are more and more verbose
handler_args['loglevel'] = 1
# optional, use a debug format for the logging output (uses two lines per log entry)
handler_args['debugformat'] = False
# optional, this saves all response objects to handler.session.ALL_REQUESTS_RESPONSES
# very useful for capturing the full exchange of XML requests and responses
handler_args['record_all_requests'] = True
# instantiate a handler using all of the arguments in the handler_args dictionary
print "...CALLING: pytan.handler() with args: {}".format(handler_args)
handler = pytan.Handler(**handler_args)
# print out the handler string
print "...OUTPUT: handler string: {}".format(handler)
# setup the arguments for the handler() class
kwargs = {}
kwargs["export_format"] = u'xml'
kwargs["minimal"] = False
# setup the arguments for handler.get()
get_kwargs = {
'name': [
"Computer Name", "IP Route Details", "IP Address",
'Folder Contents',
],
'objtype': 'sensor',
}
# get the objects that will provide the basetype that we want to export
print "...CALLING: handler.get() with args: {}".format(get_kwargs)
response = handler.get(**get_kwargs)
# store the basetype object as the obj we want to export
kwargs['obj'] = response
# export the object to a string
# (we could just as easily export to a file using export_to_report_file)
print "...CALLING: handler.export_obj() with args {}".format(kwargs)
out = handler.export_obj(**kwargs)
# trim the output if it is more than 15 lines long
if len(out.splitlines()) > 15:
out = out.splitlines()[0:15]
out.append('..trimmed for brevity..')
out = '\n'.join(out)
print "...OUTPUT: print the export_str returned from export_obj():"
print out
| {
"content_hash": "9dc1c6d373f6daebaa0f1d5ef0c25e14",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 90,
"avg_line_length": 32.51111111111111,
"alnum_prop": 0.7194121667805878,
"repo_name": "tanium/pytan",
"id": "c6b52b36baddda05cc513a639b8c407fadb6a123",
"size": "2969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BUILD/doc/source/examples/export_basetype_xml_minimal_false_code.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13251"
},
{
"name": "CSS",
"bytes": "32442"
},
{
"name": "HTML",
"bytes": "1232764"
},
{
"name": "JavaScript",
"bytes": "375167"
},
{
"name": "Makefile",
"bytes": "4287"
},
{
"name": "Python",
"bytes": "2541262"
},
{
"name": "Shell",
"bytes": "3194"
}
],
"symlink_target": ""
} |
import functools
import inspect
import math
import time
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import strutils
import six
import webob
import webob.exc
from cinder.api.openstack import api_version_request as api_version
from cinder.api.openstack import versioned_method
from cinder import exception
from cinder import i18n
from cinder.i18n import _, _LE, _LI
from cinder import utils
from cinder.wsgi import common as wsgi
LOG = logging.getLogger(__name__)
SUPPORTED_CONTENT_TYPES = (
'application/json',
'application/vnd.openstack.volume+json',
)
_MEDIA_TYPE_MAP = {
'application/vnd.openstack.volume+json': 'json',
'application/json': 'json',
}
# name of attribute to keep version method information
VER_METHOD_ATTR = 'versioned_methods'
# Name of header used by clients to request a specific version
# of the REST API
API_VERSION_REQUEST_HEADER = 'OpenStack-API-Version'
VOLUME_SERVICE = 'volume'
class Request(webob.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def __init__(self, *args, **kwargs):
super(Request, self).__init__(*args, **kwargs)
self._resource_cache = {}
if not hasattr(self, 'api_version_request'):
self.api_version_request = api_version.APIVersionRequest()
def cache_resource(self, resource_to_cache, id_attribute='id', name=None):
"""Cache the given resource.
Allow API methods to cache objects, such as results from a DB query,
to be used by API extensions within the same API request.
The resource_to_cache can be a list or an individual resource,
but ultimately resources are cached individually using the given
id_attribute.
Different resources types might need to be cached during the same
request, they can be cached using the name parameter. For example:
Controller 1:
request.cache_resource(db_volumes, 'volumes')
request.cache_resource(db_volume_types, 'types')
Controller 2:
db_volumes = request.cached_resource('volumes')
db_type_1 = request.cached_resource_by_id('1', 'types')
If no name is given, a default name will be used for the resource.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
if not isinstance(resource_to_cache, list):
resource_to_cache = [resource_to_cache]
if not name:
name = self.path
cached_resources = self._resource_cache.setdefault(name, {})
for resource in resource_to_cache:
cached_resources[resource[id_attribute]] = resource
def cached_resource(self, name=None):
"""Get the cached resources cached under the given resource name.
Allow an API extension to get previously stored objects within
the same API request.
Note that the object data will be slightly stale.
:returns: a dict of id_attribute to the resource from the cached
resources, an empty map if an empty collection was cached,
or None if nothing has been cached yet under this name
"""
if not name:
name = self.path
if name not in self._resource_cache:
# Nothing has been cached for this key yet
return None
return self._resource_cache[name]
def cached_resource_by_id(self, resource_id, name=None):
"""Get a resource by ID cached under the given resource name.
Allow an API extension to get a previously stored object
within the same API request. This is basically a convenience method
to lookup by ID on the dictionary of all cached resources.
Note that the object data will be slightly stale.
:returns: the cached resource or None if the item is not in the cache
"""
resources = self.cached_resource(name)
if not resources:
# Nothing has been cached yet for this key yet
return None
return resources.get(resource_id)
def cache_db_items(self, key, items, item_key='id'):
"""Get cached database items.
Allow API methods to store objects from a DB query to be
used by API extensions within the same API request.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
self.cache_resource(items, item_key, key)
def get_db_items(self, key):
"""Get database items.
Allow an API extension to get previously stored objects within
the same API request.
Note that the object data will be slightly stale.
"""
return self.cached_resource(key)
def get_db_item(self, key, item_key):
"""Get database item.
Allow an API extension to get a previously stored object
within the same API request.
Note that the object data will be slightly stale.
"""
return self.get_db_items(key).get(item_key)
def cache_db_volumes(self, volumes):
# NOTE(mgagne) Cache it twice for backward compatibility reasons
self.cache_db_items('volumes', volumes, 'id')
self.cache_db_items(self.path, volumes, 'id')
def cache_db_volume(self, volume):
# NOTE(mgagne) Cache it twice for backward compatibility reasons
self.cache_db_items('volumes', [volume], 'id')
self.cache_db_items(self.path, [volume], 'id')
def get_db_volumes(self):
return (self.get_db_items('volumes') or
self.get_db_items(self.path))
def get_db_volume(self, volume_id):
return (self.get_db_item('volumes', volume_id) or
self.get_db_item(self.path, volume_id))
def cache_db_volume_types(self, volume_types):
self.cache_db_items('volume_types', volume_types, 'id')
def cache_db_volume_type(self, volume_type):
self.cache_db_items('volume_types', [volume_type], 'id')
def get_db_volume_types(self):
return self.get_db_items('volume_types')
def get_db_volume_type(self, volume_type_id):
return self.get_db_item('volume_types', volume_type_id)
def cache_db_snapshots(self, snapshots):
self.cache_db_items('snapshots', snapshots, 'id')
def cache_db_snapshot(self, snapshot):
self.cache_db_items('snapshots', [snapshot], 'id')
def get_db_snapshots(self):
return self.get_db_items('snapshots')
def get_db_snapshot(self, snapshot_id):
return self.get_db_item('snapshots', snapshot_id)
def cache_db_backups(self, backups):
self.cache_db_items('backups', backups, 'id')
def cache_db_backup(self, backup):
self.cache_db_items('backups', [backup], 'id')
def get_db_backups(self):
return self.get_db_items('backups')
def get_db_backup(self, backup_id):
return self.get_db_item('backups', backup_id)
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'cinder.best_content_type' not in self.environ:
# Calculate the best MIME type
content_type = None
# Check URL path suffix
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in SUPPORTED_CONTENT_TYPES:
content_type = possible_type
if not content_type:
content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES)
self.environ['cinder.best_content_type'] = (content_type or
'application/json')
return self.environ['cinder.best_content_type']
def get_content_type(self):
"""Determine content type of the request body.
Does not do any body introspection, only checks header
"""
if "Content-Type" not in self.headers:
return None
allowed_types = SUPPORTED_CONTENT_TYPES
content_type = self.content_type
if content_type not in allowed_types:
raise exception.InvalidContentType(content_type=content_type)
return content_type
def best_match_language(self):
"""Determines best available locale from the Accept-Language header.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not self.accept_language:
return None
all_languages = i18n.get_available_languages()
return self.accept_language.best_match(all_languages)
def set_api_version_request(self, url):
"""Set API version request based on the request header information.
Microversions starts with /v3, so if a client sends a request for
version 1.0 or 2.0 with the /v3 endpoint, throw an exception.
Sending a header with any microversion to a /v1 or /v2 endpoint will
be ignored.
Note that a microversion must be set for the legacy endpoints. This
will appear as 1.0 and 2.0 for /v1 and /v2.
"""
if API_VERSION_REQUEST_HEADER in self.headers and 'v3' in url:
hdr_string = self.headers[API_VERSION_REQUEST_HEADER]
# 'latest' is a special keyword which is equivalent to requesting
# the maximum version of the API supported
hdr_string_list = hdr_string.split(",")
volume_version = None
for hdr in hdr_string_list:
if VOLUME_SERVICE in hdr:
service, volume_version = hdr.split()
break
if not volume_version:
raise exception.VersionNotFoundForAPIMethod(
version=volume_version)
if volume_version == 'latest':
self.api_version_request = api_version.max_api_version()
else:
self.api_version_request = api_version.APIVersionRequest(
volume_version)
# Check that the version requested is within the global
# minimum/maximum of supported API versions
if not self.api_version_request.matches(
api_version.min_api_version(),
api_version.max_api_version()):
raise exception.InvalidGlobalAPIVersion(
req_ver=self.api_version_request.get_string(),
min_ver=api_version.min_api_version().get_string(),
max_ver=api_version.max_api_version().get_string())
else:
if 'v1' in url:
self.api_version_request = api_version.legacy_api_version1()
elif 'v2' in url:
self.api_version_request = api_version.legacy_api_version2()
else:
self.api_version_request = api_version.APIVersionRequest(
api_version._MIN_API_VERSION)
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, six.text_type(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization."""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class DictSerializer(ActionDispatcher):
"""Default request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization."""
def default(self, data):
return jsonutils.dump_as_bytes(data)
def serializers(**serializers):
"""Attaches serializers to a method.
This decorator associates a dictionary of serializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_serializers'):
func.wsgi_serializers = {}
func.wsgi_serializers.update(serializers)
return func
return decorator
def deserializers(**deserializers):
"""Attaches deserializers to a method.
This decorator associates a dictionary of deserializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_deserializers'):
func.wsgi_deserializers = {}
func.wsgi_deserializers.update(deserializers)
return func
return decorator
def response(code):
"""Attaches response code to a method.
This decorator associates a response code with a method. Note
that the function attributes are directly manipulated; the method
is not wrapped.
"""
def decorator(func):
func.wsgi_code = code
return func
return decorator
class ResponseObject(object):
"""Bundles a response object with appropriate serializers.
Object that app methods may return in order to bind alternate
serializers with a response object to be serialized. Its use is
optional.
"""
def __init__(self, obj, code=None, headers=None, **serializers):
"""Binds serializers with an object.
Takes keyword arguments akin to the @serializer() decorator
for specifying serializers. Serializers specified will be
given preference over default serializers or method-specific
serializers on return.
"""
self.obj = obj
self.serializers = serializers
self._default_code = 200
self._code = code
self._headers = headers or {}
self.serializer = None
self.media_type = None
def __getitem__(self, key):
"""Retrieves a header with the given name."""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""Sets a header with the given name to the given value."""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""Deletes the header with the given name."""
del self._headers[key.lower()]
def _bind_method_serializers(self, meth_serializers):
"""Binds method serializers with the response object.
Binds the method serializers with the response object.
Serializers specified to the constructor will take precedence
over serializers specified to this method.
:param meth_serializers: A dictionary with keys mapping to
response types and values containing
serializer objects.
"""
# We can't use update because that would be the wrong
# precedence
for mtype, serializer in meth_serializers.items():
self.serializers.setdefault(mtype, serializer)
def get_serializer(self, content_type, default_serializers=None):
"""Returns the serializer for the wrapped object.
Returns the serializer for the wrapped object subject to the
indicated content type. If no serializer matching the content
type is attached, an appropriate serializer drawn from the
default serializers will be used. If no appropriate
serializer is available, raises InvalidContentType.
"""
default_serializers = default_serializers or {}
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in self.serializers:
return mtype, self.serializers[mtype]
else:
return mtype, default_serializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
def preserialize(self, content_type, default_serializers=None):
"""Prepares the serializer that will be used to serialize.
Determines the serializer that will be used and prepares an
instance of it for later call. This allows the serializer to
be accessed by extensions for, e.g., template extension.
"""
mtype, serializer = self.get_serializer(content_type,
default_serializers)
self.media_type = mtype
self.serializer = serializer()
def attach(self, **kwargs):
"""Attach slave templates to serializers."""
if self.media_type in kwargs:
self.serializer.attach(kwargs[self.media_type])
def serialize(self, request, content_type, default_serializers=None):
"""Serializes the wrapped object.
Utility method for serializing the wrapped object. Returns a
webob.Response object.
"""
if self.serializer:
serializer = self.serializer
else:
_mtype, _serializer = self.get_serializer(content_type,
default_serializers)
serializer = _serializer()
response = webob.Response()
response.status_int = self.code
for hdr, value in self._headers.items():
response.headers[hdr] = six.text_type(value)
response.headers['Content-Type'] = six.text_type(content_type)
if self.obj is not None:
body = serializer.serialize(self.obj)
if isinstance(body, six.text_type):
body = body.encode('utf-8')
response.body = body
return response
@property
def code(self):
"""Retrieve the response status."""
return self._code or self._default_code
@property
def headers(self):
"""Retrieve the headers."""
return self._headers.copy()
def action_peek_json(body):
"""Determine action to invoke."""
try:
decoded = jsonutils.loads(body)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
# Make sure there's exactly one key...
if len(decoded) != 1:
msg = _("too many body keys")
raise exception.MalformedRequestBody(reason=msg)
# Return the action and the decoded body...
return list(decoded.keys())[0]
class ResourceExceptionHandler(object):
"""Context manager to handle Resource exceptions.
Used when processing exceptions generated by API implementation
methods (or their extensions). Converts most exceptions to Fault
exceptions, with the appropriate logging.
"""
def __enter__(self):
return None
def __exit__(self, ex_type, ex_value, ex_traceback):
if not ex_value:
return True
if isinstance(ex_value, exception.NotAuthorized):
msg = six.text_type(ex_value)
raise Fault(webob.exc.HTTPForbidden(explanation=msg))
elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod):
raise
elif isinstance(ex_value, (exception.Invalid, exception.NotFound)):
raise Fault(exception.ConvertedException(
code=ex_value.code, explanation=six.text_type(ex_value)))
elif isinstance(ex_value, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
LOG.error(_LE(
'Exception handling resource: %s'),
ex_value, exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info(_LI("Fault thrown: %s"), six.text_type(ex_value))
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
LOG.info(_LI("HTTP exception thrown: %s"), six.text_type(ex_value))
raise Fault(ex_value)
# We didn't handle the exception
return False
class Resource(wsgi.Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
Exceptions derived from webob.exc.HTTPException will be automatically
wrapped in Fault() to provide API friendly error responses.
"""
support_api_request_version = True
def __init__(self, controller, action_peek=None, **deserializers):
"""Initialize Resource.
:param controller: object that implement methods created by routes lib
:param action_peek: dictionary of routines for peeking into an action
request body to determine the desired action
"""
self.controller = controller
default_deserializers = dict(json=JSONDeserializer)
default_deserializers.update(deserializers)
self.default_deserializers = default_deserializers
self.default_serializers = dict(json=JSONDictSerializer)
self.action_peek = dict(json=action_peek_json)
self.action_peek.update(action_peek or {})
# Copy over the actions dictionary
self.wsgi_actions = {}
if controller:
self.register_actions(controller)
# Save a mapping of extensions
self.wsgi_extensions = {}
self.wsgi_action_extensions = {}
def register_actions(self, controller):
"""Registers controller actions with this resource."""
actions = getattr(controller, 'wsgi_actions', {})
for key, method_name in actions.items():
self.wsgi_actions[key] = getattr(controller, method_name)
def register_extensions(self, controller):
"""Registers controller extensions with this resource."""
extensions = getattr(controller, 'wsgi_extensions', [])
for method_name, action_name in extensions:
# Look up the extending method
extension = getattr(controller, method_name)
if action_name:
# Extending an action...
if action_name not in self.wsgi_action_extensions:
self.wsgi_action_extensions[action_name] = []
self.wsgi_action_extensions[action_name].append(extension)
else:
# Extending a regular method
if method_name not in self.wsgi_extensions:
self.wsgi_extensions[method_name] = []
self.wsgi_extensions[method_name].append(extension)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
# NOTE(Vek): Check for get_action_args() override in the
# controller
if hasattr(self.controller, 'get_action_args'):
return self.controller.get_action_args(request_environment)
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except (KeyError, IndexError, AttributeError):
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def get_body(self, request):
if len(request.body) == 0:
LOG.debug("Empty body provided in request")
return None, ''
try:
content_type = request.get_content_type()
except exception.InvalidContentType:
LOG.debug("Unrecognized Content-Type provided in request")
return None, ''
if not content_type:
LOG.debug("No Content-Type provided in request")
return None, ''
return content_type, request.body
def deserialize(self, meth, content_type, body):
meth_deserializers = getattr(meth, 'wsgi_deserializers', {})
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in meth_deserializers:
deserializer = meth_deserializers[mtype]
else:
deserializer = self.default_deserializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
return deserializer().deserialize(body)
def pre_process_extensions(self, extensions, request, action_args):
# List of callables for post-processing extensions
post = []
for ext in extensions:
if inspect.isgeneratorfunction(ext):
response = None
# If it's a generator function, the part before the
# yield is the preprocessing stage
try:
with ResourceExceptionHandler():
gen = ext(req=request, **action_args)
response = next(gen)
except Fault as ex:
response = ex
# We had a response...
if response:
return response, []
# No response, queue up generator for post-processing
post.append(gen)
else:
# Regular functions only perform post-processing
post.append(ext)
# Run post-processing in the reverse order
return None, reversed(post)
def post_process_extensions(self, extensions, resp_obj, request,
action_args):
for ext in extensions:
response = None
if inspect.isgenerator(ext):
# If it's a generator, run the second half of
# processing
try:
with ResourceExceptionHandler():
response = ext.send(resp_obj)
except StopIteration:
# Normal exit of generator
continue
except Fault as ex:
response = ex
else:
# Regular functions get post-processing...
try:
with ResourceExceptionHandler():
response = ext(req=request, resp_obj=resp_obj,
**action_args)
except exception.VersionNotFoundForAPIMethod:
# If an attached extension (@wsgi.extends) for the
# method has no version match its not an error. We
# just don't run the extends code
continue
except Fault as ex:
response = ex
# We had a response...
if response:
return response
return None
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
LOG.info(_LI("%(method)s %(url)s"),
{"method": request.method,
"url": request.url})
if self.support_api_request_version:
# Set the version of the API requested based on the header
try:
request.set_api_version_request(request.url)
except exception.InvalidAPIVersionString as e:
return Fault(webob.exc.HTTPBadRequest(
explanation=six.text_type(e)))
except exception.InvalidGlobalAPIVersion as e:
return Fault(webob.exc.HTTPNotAcceptable(
explanation=six.text_type(e)))
# Identify the action, its arguments, and the requested
# content type
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
content_type, body = self.get_body(request)
accept = request.best_match_content_type()
# NOTE(Vek): Splitting the function up this way allows for
# auditing by external tools that wrap the existing
# function. If we try to audit __call__(), we can
# run into troubles due to the @webob.dec.wsgify()
# decorator.
return self._process_stack(request, action, action_args,
content_type, body, accept)
def _process_stack(self, request, action, action_args,
content_type, body, accept):
"""Implement the processing stack."""
# Get the implementing method
try:
meth, extensions = self.get_method(request, action,
content_type, body)
except (AttributeError, TypeError):
return Fault(webob.exc.HTTPNotFound())
except KeyError as ex:
msg = _("There is no such action: %s") % ex.args[0]
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
if body:
decoded_body = encodeutils.safe_decode(body, errors='ignore')
msg = ("Action: '%(action)s', calling method: %(meth)s, body: "
"%(body)s") % {'action': action,
'body': six.text_type(decoded_body),
'meth': six.text_type(meth)}
LOG.debug(strutils.mask_password(msg))
else:
LOG.debug("Calling method '%(meth)s'",
{'meth': six.text_type(meth)})
# Now, deserialize the request body...
try:
if content_type:
contents = self.deserialize(meth, content_type, body)
else:
contents = {}
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Update the action args
action_args.update(contents)
project_id = action_args.pop("project_id", None)
context = request.environ.get('cinder.context')
if (context and project_id and (project_id != context.project_id)):
msg = _("Malformed request url")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Run pre-processing extensions
response, post = self.pre_process_extensions(extensions,
request, action_args)
if not response:
try:
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
response = ex
if not response:
# No exceptions; convert action_result into a
# ResponseObject
resp_obj = None
if isinstance(action_result, dict) or action_result is None:
resp_obj = ResponseObject(action_result)
elif isinstance(action_result, ResponseObject):
resp_obj = action_result
else:
response = action_result
# Run post-processing extensions
if resp_obj:
_set_request_id_header(request, resp_obj)
# Do a preserialize to set up the response object
serializers = getattr(meth, 'wsgi_serializers', {})
resp_obj._bind_method_serializers(serializers)
if hasattr(meth, 'wsgi_code'):
resp_obj._default_code = meth.wsgi_code
resp_obj.preserialize(accept, self.default_serializers)
# Process post-processing extensions
response = self.post_process_extensions(post, resp_obj,
request, action_args)
if resp_obj and not response:
response = resp_obj.serialize(request, accept,
self.default_serializers)
try:
msg_dict = dict(url=request.url, status=response.status_int)
msg = _LI("%(url)s returned with HTTP %(status)d")
except AttributeError as e:
msg_dict = dict(url=request.url, e=e)
msg = _LI("%(url)s returned a fault: %(e)s")
LOG.info(msg, msg_dict)
if hasattr(response, 'headers'):
for hdr, val in response.headers.items():
# Headers must be utf-8 strings
val = utils.convert_str(val)
response.headers[hdr] = val
if (not request.api_version_request.is_null() and
not _is_legacy_endpoint(request)):
response.headers[API_VERSION_REQUEST_HEADER] = (
VOLUME_SERVICE + ' ' +
request.api_version_request.get_string())
response.headers['Vary'] = API_VERSION_REQUEST_HEADER
return response
def get_method(self, request, action, content_type, body):
"""Look up the action-specific method and its extensions."""
# Look up the method
try:
if not self.controller:
meth = getattr(self, action)
else:
meth = getattr(self.controller, action)
except AttributeError as e:
with excutils.save_and_reraise_exception(e) as ctxt:
if (not self.wsgi_actions or action not in ['action',
'create',
'delete',
'update']):
LOG.exception(_LE('Get method error.'))
else:
ctxt.reraise = False
else:
return meth, self.wsgi_extensions.get(action, [])
if action == 'action':
# OK, it's an action; figure out which action...
mtype = _MEDIA_TYPE_MAP.get(content_type)
action_name = self.action_peek[mtype](body)
LOG.debug("Action body: %s", body)
else:
action_name = action
# Look up the action method
return (self.wsgi_actions[action_name],
self.wsgi_action_extensions.get(action_name, []))
def dispatch(self, method, request, action_args):
"""Dispatch a call to the action-specific method."""
try:
return method(req=request, **action_args)
except exception.VersionNotFoundForAPIMethod:
# We deliberately don't return any message information
# about the exception to the user so it looks as if
# the method is simply not implemented.
return Fault(webob.exc.HTTPNotFound())
def action(name):
"""Mark a function as an action.
The given name will be taken as the action key in the body.
This is also overloaded to allow extensions to provide
non-extending definitions of create and delete operations.
"""
def decorator(func):
func.wsgi_action = name
return func
return decorator
def extends(*args, **kwargs):
"""Indicate a function extends an operation.
Can be used as either::
@extends
def index(...):
pass
or as::
@extends(action='resize')
def _action_resize(...):
pass
"""
def decorator(func):
# Store enough information to find what we're extending
func.wsgi_extends = (func.__name__, kwargs.get('action'))
return func
# If we have positional arguments, call the decorator
if args:
return decorator(*args)
# OK, return the decorator instead
return decorator
class ControllerMetaclass(type):
"""Controller metaclass.
This metaclass automates the task of assembling a dictionary
mapping action keys to method names.
"""
def __new__(mcs, name, bases, cls_dict):
"""Adds the wsgi_actions dictionary to the class."""
# Find all actions
actions = {}
extensions = []
# NOTE(geguileo): We'll keep a list of versioned methods that have been
# added by the new metaclass (dictionary in attribute VER_METHOD_ATTR
# on Controller class) and all the versioned methods from the different
# base classes so we can consolidate them.
versioned_methods = []
# NOTE(cyeoh): This resets the VER_METHOD_ATTR attribute
# between API controller class creations. This allows us
# to use a class decorator on the API methods that doesn't
# require naming explicitly what method is being versioned as
# it can be implicit based on the method decorated. It is a bit
# ugly.
if bases != (object,) and VER_METHOD_ATTR in vars(Controller):
# Get the versioned methods that this metaclass creation has added
# to the Controller class
versioned_methods.append(getattr(Controller, VER_METHOD_ATTR))
# Remove them so next metaclass has a clean start
delattr(Controller, VER_METHOD_ATTR)
# start with wsgi actions from base classes
for base in bases:
actions.update(getattr(base, 'wsgi_actions', {}))
# Get the versioned methods that this base has
if VER_METHOD_ATTR in vars(base):
versioned_methods.append(getattr(base, VER_METHOD_ATTR))
for key, value in cls_dict.items():
if not callable(value):
continue
if getattr(value, 'wsgi_action', None):
actions[value.wsgi_action] = key
elif getattr(value, 'wsgi_extends', None):
extensions.append(value.wsgi_extends)
# Add the actions and extensions to the class dict
cls_dict['wsgi_actions'] = actions
cls_dict['wsgi_extensions'] = extensions
if versioned_methods:
cls_dict[VER_METHOD_ATTR] = mcs.consolidate_vers(versioned_methods)
return super(ControllerMetaclass, mcs).__new__(mcs, name, bases,
cls_dict)
@staticmethod
def consolidate_vers(versioned_methods):
"""Consolidates a list of versioned methods dictionaries."""
if not versioned_methods:
return {}
result = versioned_methods.pop(0)
for base_methods in versioned_methods:
for name, methods in base_methods.items():
method_list = result.setdefault(name, [])
method_list.extend(methods)
method_list.sort(reverse=True)
return result
@six.add_metaclass(ControllerMetaclass)
class Controller(object):
"""Default controller."""
_view_builder_class = None
def __init__(self, view_builder=None):
"""Initialize controller with a view builder instance."""
if view_builder:
self._view_builder = view_builder
elif self._view_builder_class:
self._view_builder = self._view_builder_class()
else:
self._view_builder = None
def __getattribute__(self, key):
def version_select(*args, **kwargs):
"""Select and call the matching version of the specified method.
Look for the method which matches the name supplied and version
constraints and calls it with the supplied arguments.
:returns: Returns the result of the method called
:raises: VersionNotFoundForAPIMethod if there is no method which
matches the name and version constraints
"""
# The first arg to all versioned methods is always the request
# object. The version for the request is attached to the
# request object
if len(args) == 0:
version_request = kwargs['req'].api_version_request
else:
version_request = args[0].api_version_request
func_list = self.versioned_methods[key]
for func in func_list:
if version_request.matches_versioned_method(func):
# Update the version_select wrapper function so
# other decorator attributes like wsgi.response
# are still respected.
functools.update_wrapper(version_select, func.func)
return func.func(self, *args, **kwargs)
# No version match
raise exception.VersionNotFoundForAPIMethod(
version=version_request)
try:
version_meth_dict = object.__getattribute__(self, VER_METHOD_ATTR)
except AttributeError:
# No versioning on this class
return object.__getattribute__(self, key)
if (version_meth_dict and key in
object.__getattribute__(self, VER_METHOD_ATTR)):
return version_select
return object.__getattribute__(self, key)
# NOTE(cyeoh): This decorator MUST appear first (the outermost
# decorator) on an API method for it to work correctly
@classmethod
def api_version(cls, min_ver, max_ver=None, experimental=False):
"""Decorator for versioning API methods.
Add the decorator to any method which takes a request object
as the first parameter and belongs to a class which inherits from
wsgi.Controller.
:param min_ver: string representing minimum version
:param max_ver: optional string representing maximum version
"""
def decorator(f):
obj_min_ver = api_version.APIVersionRequest(min_ver)
if max_ver:
obj_max_ver = api_version.APIVersionRequest(max_ver)
else:
obj_max_ver = api_version.APIVersionRequest()
# Add to list of versioned methods registered
func_name = f.__name__
new_func = versioned_method.VersionedMethod(
func_name, obj_min_ver, obj_max_ver, experimental, f)
func_dict = getattr(cls, VER_METHOD_ATTR, {})
if not func_dict:
setattr(cls, VER_METHOD_ATTR, func_dict)
func_list = func_dict.get(func_name, [])
if not func_list:
func_dict[func_name] = func_list
func_list.append(new_func)
# Ensure the list is sorted by minimum version (reversed)
# so later when we work through the list in order we find
# the method which has the latest version which supports
# the version requested.
# TODO(cyeoh): Add check to ensure that there are no overlapping
# ranges of valid versions as that is ambiguous
func_list.sort(reverse=True)
return f
return decorator
@staticmethod
def is_valid_body(body, entity_name):
if not (body and entity_name in body):
return False
def is_dict(d):
try:
d.get(None)
return True
except AttributeError:
return False
if not is_dict(body[entity_name]):
return False
return True
@staticmethod
def assert_valid_body(body, entity_name):
# NOTE: After v1 api is deprecated need to merge 'is_valid_body' and
# 'assert_valid_body' in to one method. Right now it is not
# possible to modify 'is_valid_body' to raise exception because
# in case of V1 api when 'is_valid_body' return False,
# 'HTTPUnprocessableEntity' exception is getting raised and in
# V2 api 'HTTPBadRequest' exception is getting raised.
if not Controller.is_valid_body(body, entity_name):
raise webob.exc.HTTPBadRequest(
explanation=_("Missing required element '%s' in "
"request body.") % entity_name)
@staticmethod
def validate_name_and_description(body):
name = body.get('name')
if name is not None:
if isinstance(name, six.string_types):
body['name'] = name.strip()
try:
utils.check_string_length(body['name'], 'Name',
min_length=0, max_length=255)
except exception.InvalidInput as error:
raise webob.exc.HTTPBadRequest(explanation=error.msg)
description = body.get('description')
if description is not None:
try:
utils.check_string_length(description, 'Description',
min_length=0, max_length=255)
except exception.InvalidInput as error:
raise webob.exc.HTTPBadRequest(explanation=error.msg)
@staticmethod
def validate_string_length(value, entity_name, min_length=0,
max_length=None, remove_whitespaces=False):
"""Check the length of specified string.
:param value: the value of the string
:param entity_name: the name of the string
:param min_length: the min_length of the string
:param max_length: the max_length of the string
:param remove_whitespaces: True if trimming whitespaces is needed
else False
"""
if isinstance(value, six.string_types) and remove_whitespaces:
value = value.strip()
try:
utils.check_string_length(value, entity_name,
min_length=min_length,
max_length=max_length)
except exception.InvalidInput as error:
raise webob.exc.HTTPBadRequest(explanation=error.msg)
class Fault(webob.exc.HTTPException):
"""Wrap webob.exc.HTTPException to provide API friendly response."""
_fault_names = {400: "badRequest",
401: "unauthorized",
403: "forbidden",
404: "itemNotFound",
405: "badMethod",
409: "conflictingRequest",
413: "overLimit",
415: "badMediaType",
501: "notImplemented",
503: "serviceUnavailable"}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
self.status_int = exception.status_int
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details.
locale = req.best_match_language()
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "computeFault")
explanation = self.wrapped_exc.explanation
fault_data = {
fault_name: {
'code': code,
'message': i18n.translate(explanation, locale)}}
if code == 413:
retry = self.wrapped_exc.headers.get('Retry-After', None)
if retry:
fault_data[fault_name]['retryAfter'] = retry
if (not req.api_version_request.is_null() and not
_is_legacy_endpoint(req)):
self.wrapped_exc.headers[API_VERSION_REQUEST_HEADER] = (
VOLUME_SERVICE + ' ' + req.api_version_request.get_string())
self.wrapped_exc.headers['Vary'] = API_VERSION_REQUEST_HEADER
content_type = req.best_match_content_type()
serializer = {
'application/json': JSONDictSerializer(),
}[content_type]
body = serializer.serialize(fault_data)
if isinstance(body, six.text_type):
body = body.encode('utf-8')
self.wrapped_exc.body = body
self.wrapped_exc.content_type = content_type
_set_request_id_header(req, self.wrapped_exc.headers)
return self.wrapped_exc
def __str__(self):
return self.wrapped_exc.__str__()
def _set_request_id_header(req, headers):
context = req.environ.get('cinder.context')
if context:
headers['x-compute-request-id'] = context.request_id
def _is_legacy_endpoint(request):
version_str = request.api_version_request.get_string()
return '1.0' in version_str or '2.0' in version_str
class OverLimitFault(webob.exc.HTTPException):
"""Rate-limited request response."""
def __init__(self, message, details, retry_time):
"""Initialize new `OverLimitFault` with relevant information."""
hdrs = OverLimitFault._retry_after(retry_time)
self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs)
self.content = {
"overLimitFault": {
"code": self.wrapped_exc.status_int,
"message": message,
"details": details,
},
}
@staticmethod
def _retry_after(retry_time):
delay = int(math.ceil(retry_time - time.time()))
retry_after = delay if delay > 0 else 0
headers = {'Retry-After': '%d' % retry_after}
return headers
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""Serializes the wrapped exception conforming to our error format."""
content_type = request.best_match_content_type()
def translate(msg):
locale = request.best_match_language()
return i18n.translate(msg, locale)
self.content['overLimitFault']['message'] = \
translate(self.content['overLimitFault']['message'])
self.content['overLimitFault']['details'] = \
translate(self.content['overLimitFault']['details'])
serializer = {
'application/json': JSONDictSerializer(),
}[content_type]
content = serializer.serialize(self.content)
self.wrapped_exc.body = content
return self.wrapped_exc
| {
"content_hash": "7c9ab927738697d269044fc314651f2c",
"timestamp": "",
"source": "github",
"line_count": 1397,
"max_line_length": 79,
"avg_line_length": 36.87831066571224,
"alnum_prop": 0.5949455540674314,
"repo_name": "bswartz/cinder",
"id": "cc4718e55fdac01cb9e03dc5138dfe5b83cf85aa",
"size": "52182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/api/openstack/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16345375"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from codecs import open
from os import path
setup(
name='multilabel-metrics',
version='0.0.1',
description='Multilabel classification metrics for Python',
long_description=open('README.txt').read(),
url='https://github.com/llabhishekll/multi-label-metrics',
author=u'Abhishek Verma',
author_email='[email protected]',
license='GNU_GPL licence, see LICENCE.txt',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='classification metrics machine-learning ',
py_modules=["mlmetrics"],
)
| {
"content_hash": "11302423ee2e31244d77e1500551f9c2",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 63,
"avg_line_length": 37.25,
"alnum_prop": 0.6577181208053692,
"repo_name": "hell-sing/multi-label-metrics",
"id": "d5e756be0b723718ef8093fc205db9dd68641b54",
"size": "918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11486"
}
],
"symlink_target": ""
} |
from pyxley.charts import Chart
from flask import jsonify, request
class PlotlyAPI(Chart):
def __init__(self, options, route_func):
super(PlotlyAPI, self).__init__("PlotlyAPI", options, route_func)
class PlotlyLines(PlotlyAPI):
def __init__(self, xypairs, data_source,
mode="lines+markers", layout={},
init_params={},
chart_id="plotlyid", url="/plotlyurl/",
route_func=None):
self.options = {
"chartid": chart_id,
"url": url,
"params": init_params
}
def get_data():
args = {}
for c in init_params:
if request.args.get(c):
args[c] = request.args[c]
else:
args[c] = init_params[c]
return jsonify(PlotlyLines.to_json(
self.apply_filters(data_source, args),
xypairs,
mode,
layout
))
if not route_func:
route_func = get_data
super(PlotlyLines, self).__init__(self.options, route_func)
@staticmethod
def to_json(df, xypairs, mode, layout):
if df.empty:
return {
"x": [],
"y": [],
"mode": mode
}
_data = []
for x, y in xypairs:
if (x in df.columns) and (y in df.columns):
_data.append(
{
"x": df[x].values.tolist(),
"y": df[y].values.tolist(),
"mode": mode
}
)
return {
"data": _data,
"layout": layout
}
| {
"content_hash": "49d79874e532e4e39e3572cad297bb09",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 73,
"avg_line_length": 28.65573770491803,
"alnum_prop": 0.425629290617849,
"repo_name": "subodhchhabra/pyxley",
"id": "2d443b10082d908b7c581ce49b638a1aab1a87d5",
"size": "1749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/plotly/demo/helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3839"
},
{
"name": "HTML",
"bytes": "2640"
},
{
"name": "JavaScript",
"bytes": "87379"
},
{
"name": "Python",
"bytes": "41725"
}
],
"symlink_target": ""
} |
import os
import getopt
import pickle
import sys
import time
import simpleubjson
try:
import json
except ImportError:
json = None
try:
import simplejson
except ImportError:
simplejson = None
try:
import ujson
except ImportError:
ujson = None
try:
import erlport
except ImportError:
erlport = None
def timeit(func):
def wrapper(*args, **kwargs):
start = time.time()
func(*args, **kwargs)
return time.time() - start
return wrapper
def load_case(name):
fname = os.path.join(os.path.dirname(__file__), '../tests/data', name)
data = open(fname).read()
return json.loads(data)
def format_results(lib, version, msg, total, count):
return ' * [%s @ %s] %s in %f (%f / call)' % (lib, version, msg, total,
(total / float(count)))
def run_test(func, times, *args, **kwargs):
tfunc = timeit(lambda: func(*args, **kwargs))
return sum(tfunc() for i in range(times))
def make_benchmark(name, count):
data = load_case(name)
src = simpleubjson.encode(data, spec='draft-8')
total = run_test(simpleubjson.decode, count, src, spec='draft-8')
print(format_results('simpleubjson', simpleubjson.__version__,
'Decoded Draft-8', total, count))
total = run_test(simpleubjson.encode, count, data, spec='draft-8')
print(format_results('simpleubjson', simpleubjson.__version__,
'Encoded Draft-8', total, count))
print
src = simpleubjson.encode(data, spec='draft-9')
func = lambda *a, **k: list(simpleubjson.decode(*a, **k))
total = run_test(func, count, src, spec='draft-9')
print(format_results('simpleubjson', simpleubjson.__version__,
'Decoded Draft-9', total, count))
total = run_test(simpleubjson.encode, count, data, spec='draft-9')
print(format_results('simpleubjson', simpleubjson.__version__,
'Encoded Draft-9', total, count))
if json:
print
total = run_test(json.loads, count, json.dumps(data))
print(format_results('json_stdlib', json.__version__,
'Decoded', total, count))
total = run_test(json.dumps, count, data)
print(format_results('json_stdlib', json.__version__,
'Encoded', total, count))
if simplejson:
print
simplejson._toggle_speedups(True)
total = run_test(simplejson.loads, count, simplejson.dumps(data))
print(format_results('simplejson_c', simplejson.__version__,
'Decoded', total, count))
simplejson._toggle_speedups(True)
total = run_test(simplejson.dumps, count, data)
print(format_results('simplejson_c', simplejson.__version__,
'Encoded', total, count))
print
simplejson._toggle_speedups(False)
total = run_test(simplejson.loads, count, simplejson.dumps(data))
print(format_results('simplejson_py', simplejson.__version__,
'Decoded', total, count))
simplejson._toggle_speedups(False)
total = run_test(simplejson.dumps, count, data)
print(format_results('simplejson_py', simplejson.__version__,
'Encoded', total, count))
if ujson:
print
total = run_test(ujson.decode, count, ujson.encode(data))
print(format_results('ujson', ujson.__version__,
'Decoded', total, count))
total = run_test(ujson.encode, count, data)
print(format_results('ujson', ujson.__version__,
'Encoded', total, count))
if erlport:
print
total = run_test(erlport.decode, count, erlport.encode(data))
print(format_results('erlport', erlport.__version__,
'Decoded', total, count))
total = run_test(erlport.encode, count, data)
print(format_results('erlport', erlport.__version__,
'Encoded', total, count))
print
total = run_test(pickle.loads, count, pickle.dumps(data))
print(format_results('pickle', pickle.__version__,
'Decoded', total, count))
total = run_test(pickle.dumps, count, data)
print(format_results('pickle', pickle.__version__,
'Encoded', total, count))
def test_1(count):
print('* [test_1] CouchDB4k.compact.json %d times' % count)
make_benchmark('CouchDB4k.compact.json', count)
print
print
def test_2(count):
print('* [test_2] MediaContent.compact.json %d times' % count)
make_benchmark('MediaContent.compact.json', count)
print
print
def test_3(count):
print('* [test_3] TwitterTimeline.compact.json %d times' % count)
make_benchmark('TwitterTimeline.compact.json', count)
print
print
def run(count):
print('sys.version : %r' % (sys.version,))
print('sys.platform : %r' % (sys.platform,))
test_1(count)
test_2(count)
test_3(count)
def main():
"""benchmark.py - UBJSON vs JSON performance test script.
Usage:
-h, --help Prints this help
-c, --count= Rounds per test.
Default: 1000.
"""
try:
opts, args = getopt.getopt(sys.argv[1:], 'hc:', ['help', 'count='])
except getopt.GetoptError:
print(main.__doc__)
sys.exit(2)
count = 100000
for key, value in opts:
print(key, value)
if key in ('-h', '--help'):
print(main.__doc__)
sys.exit()
elif key in ('-c', '--count'):
count = int(value)
else:
assert False, 'unhandled option %s' % key
run(count)
if __name__ == '__main__':
main()
| {
"content_hash": "dc4df59b4783ff357bdb17a4b777ede1",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 78,
"avg_line_length": 29.223880597014926,
"alnum_prop": 0.5677562138236295,
"repo_name": "samipshah/simpleubjson",
"id": "d05c7f7767ad0c4a6c8c6f84455492d6f6d016f3",
"size": "6096",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "simpleubjson/tools/benchmark.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "90353"
}
],
"symlink_target": ""
} |
from amaranth import *
from amaranth_cfu import InstructionBase, InstructionTestBase, simple_cfu, CfuTestBase
import unittest
# See proj_example for further example instructions
class TemplateInstruction(InstructionBase):
"""Template instruction
"""
def elab(self, m):
with m.If(self.start):
m.d.sync += self.output.eq(self.in0 + self.in1)
m.d.sync += self.done.eq(1)
with m.Else():
m.d.sync += self.done.eq(0)
class TemplateInstructionTest(InstructionTestBase):
def create_dut(self):
return TemplateInstruction()
def test(self):
self.verify([
(0, 0, 0),
(4, 5, 9),
(0xffffffff, 0xffffffff, 0xfffffffe),
])
def make_cfu():
return simple_cfu({
# Add instructions here...
0: TemplateInstruction(),
})
class CfuTest(CfuTestBase):
def create_dut(self):
return make_cfu()
def test(self):
DATA = [
# Test CFU calls here...
((0, 22, 22), 44),
]
return self.run_ops(DATA)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "001c4e4d897f36aae6f793862a402e66",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 86,
"avg_line_length": 22.01923076923077,
"alnum_prop": 0.5694323144104804,
"repo_name": "google/CFU-Playground",
"id": "3761c1231e17abc25f98e9edf17bf6e42f1f62ee",
"size": "1739",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "proj/proj_template/cfu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3800"
},
{
"name": "C",
"bytes": "449862"
},
{
"name": "C++",
"bytes": "4931362"
},
{
"name": "CMake",
"bytes": "976"
},
{
"name": "Dockerfile",
"bytes": "1026"
},
{
"name": "Jupyter Notebook",
"bytes": "35820"
},
{
"name": "Makefile",
"bytes": "40046"
},
{
"name": "Python",
"bytes": "1764584"
},
{
"name": "RobotFramework",
"bytes": "6125"
},
{
"name": "Scala",
"bytes": "18649"
},
{
"name": "Shell",
"bytes": "25687"
},
{
"name": "SystemVerilog",
"bytes": "6923"
},
{
"name": "Verilog",
"bytes": "6884686"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.index, name="index"),
url(r'^about', views.about, name="about"),
url(r'^settings', views.settings, name="settings")
]
| {
"content_hash": "2d26ef7b4f10eaed11b427c2c06b8308",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 54,
"avg_line_length": 28.25,
"alnum_prop": 0.6548672566371682,
"repo_name": "signalw/charliechat",
"id": "0a7ac96f08e86fcc7b4bad353b5de6025053de11",
"size": "226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12395"
},
{
"name": "HTML",
"bytes": "9118"
},
{
"name": "JavaScript",
"bytes": "2510"
},
{
"name": "Python",
"bytes": "97132"
}
],
"symlink_target": ""
} |
import sys
from keystone.common import cms
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import timeutils
import six
from keystone.common import controller
from keystone.common import dependency
from keystone.common import wsgi
from keystone import config
from keystone.models import token_model
from keystone import exception
from keystone.i18n import _, _LI, _LW
from keystone.resource import controllers as resource_controllers
LOG = log.getLogger(__name__)
CONF = cfg.CONF
# registry of authentication methods
AUTH_METHODS = {}
AUTH_PLUGINS_LOADED = False
def load_auth_methods():
global AUTH_PLUGINS_LOADED
if AUTH_PLUGINS_LOADED:
# Only try and load methods a single time.
return
# config.setup_authentication should be idempotent, call it to ensure we
# have setup all the appropriate configuration options we may need.
config.setup_authentication()
for plugin in CONF.auth.methods:
if '.' in plugin:
# NOTE(morganfainberg): if '.' is in the plugin name, it should be
# imported rather than used as a plugin identifier.
plugin_class = plugin
driver = importutils.import_object(plugin)
if not hasattr(driver, 'method'):
raise ValueError(_('Cannot load an auth-plugin by class-name '
'without a "method" attribute defined: %s'),
plugin_class)
LOG.info(_LI('Loading auth-plugins by class-name is deprecated.'))
plugin_name = driver.method
else:
plugin_name = plugin
plugin_class = CONF.auth.get(plugin)
driver = importutils.import_object(plugin_class)
if plugin_name in AUTH_METHODS:
raise ValueError(_('Auth plugin %(plugin)s is requesting '
'previously registered method %(method)s') %
{'plugin': plugin_class, 'method': driver.method})
AUTH_METHODS[plugin_name] = driver
AUTH_PLUGINS_LOADED = True
def get_auth_method(method_name):
global AUTH_METHODS
if method_name not in AUTH_METHODS:
raise exception.AuthMethodNotSupported()
return AUTH_METHODS[method_name]
class AuthContext(dict):
"""Retrofitting auth_context to reconcile identity attributes.
The identity attributes must not have conflicting values among the
auth plug-ins. The only exception is `expires_at`, which is set to its
earliest value.
"""
# identity attributes need to be reconciled among the auth plugins
IDENTITY_ATTRIBUTES = frozenset(['user_id', 'project_id',
'access_token_id', 'domain_id',
'expires_at'])
def __setitem__(self, key, val):
if key in self.IDENTITY_ATTRIBUTES and key in self:
existing_val = self[key]
if key == 'expires_at':
# special treatment for 'expires_at', we are going to take
# the earliest expiration instead.
if existing_val != val:
LOG.info(_LI('"expires_at" has conflicting values '
'%(existing)s and %(new)s. Will use the '
'earliest value.'),
{'existing': existing_val, 'new': val})
if existing_val is None or val is None:
val = existing_val or val
else:
val = min(existing_val, val)
elif existing_val != val:
msg = _('Unable to reconcile identity attribute %(attribute)s '
'as it has conflicting values %(new)s and %(old)s') % (
{'attribute': key,
'new': val,
'old': existing_val})
raise exception.Unauthorized(msg)
return super(AuthContext, self).__setitem__(key, val)
# TODO(blk-u): this class doesn't use identity_api directly, but makes it
# available for consumers. Consumers should probably not be getting
# identity_api from this since it's available in global registry, then
# identity_api should be removed from this list.
@dependency.requires('identity_api', 'resource_api')
class AuthInfo(object):
"""Encapsulation of "auth" request."""
@staticmethod
def create(context, auth=None):
auth_info = AuthInfo(context, auth=auth)
auth_info._validate_and_normalize_auth_data()
return auth_info
def __init__(self, context, auth=None):
self.context = context
self.auth = auth
self._scope_data = (None, None, None, None)
# self._scope_data is (domain_id, project_id, unscoped)
# project scope: (None, project_id, None, None)
# domain scope: (domain_id, None, None, None)
# unscoped: (None, None, None, 'unscoped')
def _assert_project_is_enabled(self, project_ref):
# ensure the project is enabled
try:
self.resource_api.assert_project_enabled(
project_id=project_ref['id'],
project=project_ref)
except AssertionError as e:
LOG.warning(six.text_type(e))
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
def _assert_domain_is_enabled(self, domain_ref):
try:
self.resource_api.assert_domain_enabled(
domain_id=domain_ref['id'],
domain=domain_ref)
except AssertionError as e:
LOG.warning(six.text_type(e))
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
def _lookup_domain(self, domain_info):
domain_id = domain_info.get('id')
domain_name = domain_info.get('name')
domain_ref = None
if not domain_id and not domain_name:
raise exception.ValidationError(attribute='id or name',
target='domain')
try:
if domain_name:
domain_ref = self.resource_api.get_domain_by_name(
domain_name)
else:
domain_ref = self.resource_api.get_domain(domain_id)
except exception.DomainNotFound as e:
LOG.exception(six.text_type(e))
raise exception.Unauthorized(e)
self._assert_domain_is_enabled(domain_ref)
return domain_ref
def _lookup_project(self, project_info):
project_id = project_info.get('id')
project_name = project_info.get('name')
project_ref = None
if not project_id and not project_name:
raise exception.ValidationError(attribute='id or name',
target='project')
try:
if project_name:
if 'domain' not in project_info:
raise exception.ValidationError(attribute='domain',
target='project')
domain_ref = self._lookup_domain(project_info['domain'])
project_ref = self.resource_api.get_project_by_name(
project_name, domain_ref['id'])
else:
project_ref = self.resource_api.get_project(project_id)
# NOTE(morganfainberg): The _lookup_domain method will raise
# exception.Unauthorized if the domain isn't found or is
# disabled.
self._lookup_domain({'id': project_ref['domain_id']})
except exception.ProjectNotFound as e:
raise exception.Unauthorized(e)
self._assert_project_is_enabled(project_ref)
return project_ref
def _validate_and_normalize_scope_data(self):
"""Validate and normalize scope data."""
if 'scope' not in self.auth:
return
if sum(['project' in self.auth['scope'],
'domain' in self.auth['scope'],
'unscoped' in self.auth['scope']]) != 1:
raise exception.ValidationError(
attribute='project, domain or unscoped',
target='scope')
if 'unscoped' in self.auth['scope']:
self._scope_data = (None, None, 'unscoped')
return
if 'project' in self.auth['scope']:
project_ref = self._lookup_project(self.auth['scope']['project'])
self._scope_data = (None, project_ref['id'], None)
elif 'domain' in self.auth['scope']:
domain_ref = self._lookup_domain(self.auth['scope']['domain'])
self._scope_data = (domain_ref['id'], None, None)
def _validate_auth_methods(self):
if 'identity' not in self.auth:
raise exception.ValidationError(attribute='identity',
target='auth')
# make sure auth methods are provided
if 'methods' not in self.auth['identity']:
raise exception.ValidationError(attribute='methods',
target='identity')
# make sure all the method data/payload are provided
for method_name in self.get_method_names():
if method_name not in self.auth['identity']:
raise exception.ValidationError(attribute=method_name,
target='identity')
# make sure auth method is supported
for method_name in self.get_method_names():
if method_name not in AUTH_METHODS:
raise exception.AuthMethodNotSupported()
def _validate_and_normalize_auth_data(self):
"""Make sure "auth" is valid."""
# make sure "auth" exist
if not self.auth:
raise exception.ValidationError(attribute='auth',
target='request body')
self._validate_auth_methods()
self._validate_and_normalize_scope_data()
def get_method_names(self):
"""Returns the identity method names.
:returns: list of auth method names
"""
# Sanitizes methods received in request's body
# Filters out duplicates, while keeping elements' order.
method_names = []
for method in self.auth['identity']['methods']:
if method not in method_names:
method_names.append(method)
return method_names
def get_method_data(self, method):
"""Get the auth method payload.
:returns: auth method payload
"""
if method not in self.auth['identity']['methods']:
raise exception.ValidationError(attribute=method,
target='identity')
return self.auth['identity'][method]
def get_scope(self):
"""Get scope information.
Verify and return the scoping information.
:returns: (domain_id, project_id, unscoped).
If scope to a project, (None, project_id, None)
will be returned.
If scoped to a domain, (domain_id, None, None)
will be returned.
If unscoped, (None, None, 'unscoped') will be
returned.
"""
return self._scope_data
def set_scope(self, domain_id=None, project_id=None, unscoped=None):
"""Set scope information."""
if domain_id and project_id:
msg = _('Scoping to both domain and project is not allowed')
raise ValueError(msg)
self._scope_data = (domain_id, project_id, unscoped)
@dependency.requires('assignment_api', 'catalog_api', 'identity_api',
'resource_api', 'token_provider_api')
class Auth(controller.Controller):
# Note(atiwari): From V3 auth controller code we are
# calling protection() wrappers, so we need to setup
# the member_name and collection_name attributes of
# auth controller code.
# In the absence of these attributes, default 'entity'
# string will be used to represent the target which is
# generic. Policy can be defined using 'entity' but it
# would not reflect the exact entity that is in context.
# We are defining collection_name = 'tokens' and
# member_name = 'token' to facilitate policy decisions.
collection_name = 'tokens'
member_name = 'token'
def __init__(self, *args, **kw):
super(Auth, self).__init__(*args, **kw)
config.setup_authentication()
def authenticate_for_token(self, context, auth=None):
"""Authenticate user and issue a token."""
include_catalog = 'nocatalog' not in context['query_string']
auth_info = AuthInfo.create(context, auth=auth)
auth_context = AuthContext(extras={},
method_names=[],
bind={})
self.authenticate(context, auth_info, auth_context)
self._check_and_set_default_scoping(auth_info, auth_context)
(domain_id, project_id, unscoped) = auth_info.get_scope()
method_names = auth_info.get_method_names()
method_names += auth_context.get('method_names', [])
# make sure the list is unique
method_names = list(set(method_names))
expires_at = auth_context.get('expires_at')
# NOTE(morganfainberg): define this here so it is clear what the
# argument is during the issue_v3_token provider call.
metadata_ref = None
token_audit_id = auth_context.get('audit_id')
(token_id, token_data) = self.token_provider_api.issue_v3_token(
auth_context['user_id'], method_names, expires_at, project_id,
domain_id, auth_context, metadata_ref, include_catalog,
parent_audit_id=token_audit_id)
return render_token_data_response(token_id, token_data,
created=True)
def _check_and_set_default_scoping(self, auth_info, auth_context):
(domain_id, project_id, unscoped) = auth_info.get_scope()
if domain_id or project_id:
# scope is specified
return
# Do not scope if request is for explicitly unscoped token
if unscoped is not None:
return
def authenticate(self, context, auth_info, auth_context):
"""Authenticate user."""
# The 'external' method allows any 'REMOTE_USER' based authentication
# In some cases the server can set REMOTE_USER as '' instead of
# dropping it, so this must be filtered out
if context['environment'].get('REMOTE_USER'):
try:
external = get_auth_method('external')
external.authenticate(context, auth_info, auth_context)
except exception.AuthMethodNotSupported:
# This will happen there is no 'external' plugin registered
# and the container is performing authentication.
# The 'kerberos' and 'saml' methods will be used this way.
# In those cases, it is correct to not register an
# 'external' plugin; if there is both an 'external' and a
# 'kerberos' plugin, it would run the check on identity twice.
LOG.debug("No 'external' plugin is registered.")
except exception.Unauthorized:
# If external fails then continue and attempt to determine
# user identity using remaining auth methods
LOG.debug("Authorization failed for 'external' auth method.")
# need to aggregate the results in case two or more methods
# are specified
auth_response = {'methods': []}
for method_name in auth_info.get_method_names():
method = get_auth_method(method_name)
resp = method.authenticate(context,
auth_info.get_method_data(method_name),
auth_context)
if resp:
auth_response['methods'].append(method_name)
auth_response[method_name] = resp
if auth_response["methods"]:
# authentication continuation required
raise exception.AdditionalAuthRequired(auth_response)
if 'user_id' not in auth_context:
msg = _('User not found')
raise exception.Unauthorized(msg)
def _check_subject_token(self, context, protection, *args, **kwargs):
target = {}
if context.get('subject_token_id') is not None:
ks_token = token_model.KeystoneToken(
token_id=context['subject_token_id'],
token_data=self.token_provider_api.validate_token(
context['subject_token_id']))
target.setdefault('token', {})
target['token']['user_id'] = ks_token.user_id
target['token']['id'] = ks_token.token_id
target['token']['domain_id'] = ks_token.user_domain_id
if ks_token.project_scoped:
target['token']['scope'] = 'project'
target['token']['scope_project_id'] = ks_token.project_id
target['token']['scope_domain_id'] = (ks_token.
project_domain_id)
elif ks_token.domain_scoped:
target['token']['scope'] = 'domain'
target['token']['scope_domain_id'] = ks_token.domain_id
else:
raise exception.UnsupportedTokenScope()
return self.check_protection(context, protection, target)
@controller.protected(callback=_check_subject_token)
def check_token(self, context):
token_id = context.get('subject_token_id')
token_data = self.token_provider_api.validate_v3_token(
token_id)
# NOTE(morganfainberg): The code in
# ``keystone.common.wsgi.render_response`` will remove the content
# body.
return render_token_data_response(token_id, token_data)
@controller.protected(callback=_check_subject_token)
def revoke_token(self, context):
token_id = context.get('subject_token_id')
return self.token_provider_api.revoke_token(token_id)
@controller.protected(callback=_check_subject_token)
def validate_token(self, context):
token_id = context.get('subject_token_id')
include_catalog = 'nocatalog' not in context['query_string']
token_data = self.token_provider_api.validate_v3_token(
token_id)
if not include_catalog and 'catalog' in token_data['token']:
del token_data['token']['catalog']
return render_token_data_response(token_id, token_data)
@controller.protected()
def revocation_list(self, context, auth=None):
if not CONF.token.revoke_by_id:
raise exception.Gone()
tokens = self.token_provider_api.list_revoked_tokens()
for t in tokens:
expires = t['expires']
if not (expires and isinstance(expires, six.text_type)):
t['expires'] = timeutils.isotime(expires)
data = {'revoked': tokens}
json_data = jsonutils.dumps(data)
signed_text = cms.cms_sign_text(json_data,
CONF.signing.certfile,
CONF.signing.keyfile)
return {'signed': signed_text}
def _combine_lists_uniquely(self, a, b):
# it's most likely that only one of these will be filled so avoid
# the combination if possible.
if a and b:
return {x['id']: x for x in a + b}.values()
else:
return a or b
@controller.protected()
def get_auth_projects(self, context):
auth_context = self.get_auth_context(context)
user_id = auth_context.get('user_id')
user_refs = []
if user_id:
try:
user_refs = self.assignment_api.list_projects_for_user(user_id)
except exception.UserNotFound:
# federated users have an id but they don't link to anything
pass
group_ids = auth_context.get('group_ids')
grp_refs = []
if group_ids:
grp_refs = self.assignment_api.list_projects_for_groups(group_ids)
refs = self._combine_lists_uniquely(user_refs, grp_refs)
return resource_controllers.Project.wrap_collection(context, refs)
@controller.protected()
def get_auth_domains(self, context):
auth_context = self.get_auth_context(context)
user_id = auth_context.get('user_id')
user_refs = []
if user_id:
try:
user_refs = self.assignment_api.list_domains_for_user(user_id)
except exception.UserNotFound:
# federated users have an id but they don't link to anything
pass
group_ids = auth_context.get('group_ids')
grp_refs = []
if group_ids:
grp_refs = self.assignment_api.list_domains_for_groups(group_ids)
refs = self._combine_lists_uniquely(user_refs, grp_refs)
return resource_controllers.Domain.wrap_collection(context, refs)
@controller.protected()
def get_auth_catalog(self, context):
auth_context = self.get_auth_context(context)
user_id = auth_context.get('user_id')
project_id = auth_context.get('scope_project_id')
if not project_id:
raise exception.Forbidden(
_('A project-scoped token is required to produce a service '
'catalog.'))
# The Controller base methods mostly assume that you're returning
# either a collection or a single element from a collection, neither of
# which apply to the catalog. Because this is a special case, this
# re-implements a tiny bit of work done by the base controller (such as
# self-referential link building) to avoid overriding or refactoring
# several private methods.
return {
'catalog': self.catalog_api.get_v3_catalog(user_id, project_id),
'links': {'self': self.base_url(context, path='auth/catalog')}
}
# FIXME(gyee): not sure if it belongs here or keystone.common. Park it here
# for now.
def render_token_data_response(token_id, token_data, created=False):
"""Render token data HTTP response.
Stash token ID into the X-Subject-Token header.
"""
headers = [('X-Subject-Token', token_id)]
if created:
status = (201, 'Created')
else:
status = (200, 'OK')
return wsgi.render_response(body=token_data,
status=status, headers=headers)
| {
"content_hash": "75fe02aa5ed7414e19300cb64fcdc7ba",
"timestamp": "",
"source": "github",
"line_count": 561,
"max_line_length": 79,
"avg_line_length": 41.03030303030303,
"alnum_prop": 0.5816752107046659,
"repo_name": "darren-wang/ks3",
"id": "47c36ada0ce1c8bb7e861d685e7ed49a532661ff",
"size": "23604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystone/auth/controllers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "864167"
},
{
"name": "Shell",
"bytes": "4861"
}
],
"symlink_target": ""
} |
from django.conf import settings as django_settings
from django.contrib.sites.models import Site
from django.template.defaultfilters import slugify
from django.test import RequestFactory, TestCase
from django.utils import translation
from feincms.extensions.translations import (
translation_set_language,
user_has_language_set,
)
from feincms.module.page.models import Page
class TranslationTestCase(TestCase):
def setUp(self):
Page.register_templates(
{
"key": "base",
"title": "Standard template",
"path": "feincms_base.html",
"regions": (
("main", "Main content area"),
("sidebar", "Sidebar", "inherited"),
),
}
)
self.site_1 = Site.objects.all()[0]
# create a bunch of pages
en = self.create_default_page_set(language="en")
de = self.create_default_page_set(language="de", title="Testseite")
de.translation_of = en
de.save()
de.parent.translation_of = en.parent
de.parent.save()
self.page_de = de.parent
self.page_en = en.parent
if hasattr(translation, "LANGUAGE_SESSION_KEY"):
self.language_session_key = translation.LANGUAGE_SESSION_KEY
else:
# Django 1.6
self.language_session_key = django_settings.LANGUAGE_COOKIE_NAME
def create_page(self, title="Test page", parent=None, **kwargs):
defaults = {
"template_key": "base",
"site": self.site_1,
"in_navigation": False,
"active": False,
}
defaults.update(kwargs)
return Page.objects.create(
title=title,
slug=kwargs.get("slug", slugify(title)),
parent=parent,
**defaults
)
def create_default_page_set(self, **kwargs):
return self.create_page("Test child page", parent=self.create_page(**kwargs))
def testPage(self):
page = Page()
self.assertTrue(hasattr(page, "language"))
self.assertTrue(hasattr(page, "translation_of"))
self.assertEqual(self.page_de.translation_of, self.page_en)
self.assertEqual(self.page_de.original_translation, self.page_en)
# TODO: add request tests
# with translation.override('de'):
def test_user_has_language_set_with_session(self):
factory = RequestFactory()
request = factory.get(self.page_en.get_navigation_url())
setattr(request, "session", dict())
request.session[self.language_session_key] = "en"
self.assertEqual(user_has_language_set(request), True)
def test_user_has_language_set_with_cookie(self):
factory = RequestFactory()
request = factory.get(self.page_en.get_navigation_url())
request.COOKIES[django_settings.LANGUAGE_COOKIE_NAME] = "en"
self.assertEqual(user_has_language_set(request), True)
def test_translation_set_language_to_session(self):
factory = RequestFactory()
request = factory.get(self.page_de.get_navigation_url())
setattr(request, "session", dict())
translation_set_language(request, "de")
self.assertEqual(request.LANGUAGE_CODE, "de")
self.assertEqual(request.session[self.language_session_key], "de")
def test_translation_set_language_to_session_primary(self):
factory = RequestFactory()
request = factory.get(self.page_en.get_navigation_url())
setattr(request, "session", dict())
translation_set_language(request, "en")
self.assertEqual(request.LANGUAGE_CODE, "en")
# We avoid setting the translation language to the primary language, so should not be set
self.assertEqual(
request.session.get(self.language_session_key, "unset"), "unset"
)
def test_translation_set_language_to_cookie(self):
factory = RequestFactory()
request = factory.get(self.page_en.get_navigation_url())
response = translation_set_language(request, "en")
self.assertEqual(request.LANGUAGE_CODE, "en")
c_key = django_settings.LANGUAGE_COOKIE_NAME
self.assertEqual(response.cookies[c_key].value, "en")
| {
"content_hash": "ca6da1a000e5d5ebc751478eaff25bf9",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 97,
"avg_line_length": 36.956896551724135,
"alnum_prop": 0.6207137858642408,
"repo_name": "feincms/feincms",
"id": "803c18221e357c231141de2e2ccdb8c884eed0de",
"size": "4287",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/testapp/tests/test_extensions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6217"
},
{
"name": "HTML",
"bytes": "30915"
},
{
"name": "JavaScript",
"bytes": "35874"
},
{
"name": "Python",
"bytes": "427172"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name='djangocms-tonicdev',
version='0.1.7',
description='DjangoCMS Tonic Notebook',
author='Aleksandr Zykov',
author_email='[email protected]',
url='https://github.com/TigerND/djangocms-tonicdev',
packages=[
'djangocms_tonicdev',
'djangocms_tonicdev.migrations',
],
data_files=[
],
install_requires = [
'django-cms>=3.2.5',
],
classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
include_package_data=True,
zip_safe=False,
)
| {
"content_hash": "048a316c9e7ff6779334881656869160",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 56,
"avg_line_length": 25.85185185185185,
"alnum_prop": 0.6045845272206304,
"repo_name": "TigerND/djangocms-tonicdev",
"id": "307dd1ea8a3638f3ddf6cedc11e8799b78f5d2fc",
"size": "721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "709"
},
{
"name": "JavaScript",
"bytes": "588"
},
{
"name": "Python",
"bytes": "9715"
}
],
"symlink_target": ""
} |
import requests
# Create your views here.
from django.views.generic import TemplateView
from intro.models import Staff
from intro.utils import toPersianDigit
class HomeView(TemplateView):
template_name = 'intro/home.html'
def get_context_data(self, *args, **kwargs):
registration_url = "http://ce.sharif.edu/~golezardi/ssc-count/webelopers.php"
content = requests.get(registration_url).text
return {"numberOfTeams":toPersianDigit(content)}
class StaffView(TemplateView):
template_name = 'intro/staff_list.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['staffs'] = Staff.objects.all().order_by('?')
return context
| {
"content_hash": "1e39a370092f41603a2e3b2e8ea7b750",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 85,
"avg_line_length": 29.4,
"alnum_prop": 0.6993197278911565,
"repo_name": "Kianoosh76/webelopers-scoreboard",
"id": "88846b4106c3e03e46d6e111c9e6921f0e0a7591",
"size": "735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "intro/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24707"
},
{
"name": "HTML",
"bytes": "41939"
},
{
"name": "JavaScript",
"bytes": "688151"
},
{
"name": "Python",
"bytes": "33281"
}
],
"symlink_target": ""
} |
"""Unit tests for the "list_collectors" function."""
import unittest
from unittest import mock
from google.auth.transport import requests
from . import list_collectors
class ListCollectorsTest(unittest.TestCase):
@mock.patch.object(requests, "AuthorizedSession", autospec=True)
@mock.patch.object(requests.requests, "Response", autospec=True)
def test_http_error(self, mock_response, mock_session):
mock_session.request.return_value = mock_response
type(mock_response).status_code = mock.PropertyMock(return_value=400)
mock_response.raise_for_status.side_effect = (
requests.requests.exceptions.HTTPError())
with self.assertRaises(requests.requests.exceptions.HTTPError):
list_collectors.list_collectors(mock_session, "forwarder name")
@mock.patch.object(requests, "AuthorizedSession", autospec=True)
@mock.patch.object(requests.requests, "Response", autospec=True)
def test_happy_path(self, mock_response, mock_session):
mock_session.request.return_value = mock_response
type(mock_response).status_code = mock.PropertyMock(return_value=200)
expected_collectors = {
"collectors": [{
"name": "forwarders/uuid/collectors/uuid",
"displayName": "SplunkCollector",
"config": {
"logType": "WINDOWS_DNS",
"maxSecondsPerBatch": 10,
"maxBytesPerBatch": "1048576",
"splunkSettings": {
"host": "127.0.0.1",
"minimumWindowSize": 10,
"maximumWindowSize": 30,
"queryString": "search index=* sourcetype=dns",
"queryMode": "realtime",
"port": 8089
}
},
"state": "ACTIVE"
}, {
"name": "forwarders/uuid/collectors/uuid",
"displayName": "SyslogCollector",
"config": {
"logType": "PAN_FIREWALL",
"maxSecondsPerBatch": 10,
"maxBytesPerBatch": "1048576",
"syslogSettings": {
"protocol": "TCP",
"address": "0.0.0.0",
"port": 10514,
"bufferSize": "65536",
"connectionTimeout": 60
}
},
"state": "ACTIVE"
}]
}
mock_response.json.return_value = expected_collectors
actual_collectors = list_collectors.list_collectors(mock_session,
"forwarder name")
self.assertEqual(actual_collectors, expected_collectors)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "6d23d21dd92c69859b0a7098041f0e66",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 73,
"avg_line_length": 36.2972972972973,
"alnum_prop": 0.5662695457930007,
"repo_name": "chronicle/api-samples-python",
"id": "d2d07a7999da9c0f666e1b52eb7a453ed31fb6a2",
"size": "3262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forwarders/list_collectors_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "556471"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import units
import taskflow.engines
from taskflow.patterns import linear_flow
from taskflow.types import failure as ft
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LW
from cinder import objects
from cinder import policy
from cinder import quota
from cinder import utils
from cinder.volume.flows import common
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
ACTION = 'volume:create'
CONF = cfg.CONF
GB = units.Gi
QUOTAS = quota.QUOTAS
# Only in these 'sources' status can we attempt to create a volume from a
# source volume or a source snapshot, other status states we can not create
# from, 'error' being the common example.
SNAPSHOT_PROCEED_STATUS = ('available',)
SRC_VOL_PROCEED_STATUS = ('available', 'in-use',)
REPLICA_PROCEED_STATUS = ('active', 'active-stopped',)
CG_PROCEED_STATUS = ('available', 'creating',)
CGSNAPSHOT_PROCEED_STATUS = ('available',)
class ExtractVolumeRequestTask(flow_utils.CinderTask):
"""Processes an api request values into a validated set of values.
This tasks responsibility is to take in a set of inputs that will form
a potential volume request and validates those values against a set of
conditions and/or translates those values into a valid set and then returns
the validated/translated values for use by other tasks.
Reversion strategy: N/A
"""
# This task will produce the following outputs (said outputs can be
# saved to durable storage in the future so that the flow can be
# reconstructed elsewhere and continued).
default_provides = set(['availability_zone', 'size', 'snapshot_id',
'source_volid', 'volume_type', 'volume_type_id',
'encryption_key_id', 'source_replicaid',
'consistencygroup_id', 'cgsnapshot_id'])
def __init__(self, image_service, availability_zones, **kwargs):
super(ExtractVolumeRequestTask, self).__init__(addons=[ACTION],
**kwargs)
self.image_service = image_service
self.availability_zones = availability_zones
@staticmethod
def _extract_resource(resource, allowed_vals, exc, resource_name,
props=('status',)):
"""Extracts the resource id from the provided resource.
This method validates the input resource dict and checks that the
properties which names are passed in `props` argument match
corresponding lists in `allowed` argument. In case of mismatch
exception of type exc is raised.
:param resource: Resource dict.
:param allowed_vals: Tuple of allowed values lists.
:param exc: Exception type to raise.
:param resource_name: Name of resource - used to construct log message.
:param props: Tuple of resource properties names to validate.
:return: Id of a resource.
"""
resource_id = None
if resource:
for prop, allowed_states in zip(props, allowed_vals):
if resource[prop] not in allowed_states:
msg = _("Originating %(res)s %(prop)s must be one of"
"'%(vals)s' values")
msg = msg % {'res': resource_name,
'prop': prop,
'vals': ', '.join(allowed_states)}
# TODO(harlowja): what happens if the status changes after
# this initial resource status check occurs??? Seems like
# someone could delete the resource after this check passes
# but before the volume is officially created?
raise exc(reason=msg)
resource_id = resource['id']
return resource_id
def _extract_consistencygroup(self, consistencygroup):
return self._extract_resource(consistencygroup, (CG_PROCEED_STATUS,),
exception.InvalidConsistencyGroup,
'consistencygroup')
def _extract_cgsnapshot(self, cgsnapshot):
return self._extract_resource(cgsnapshot, (CGSNAPSHOT_PROCEED_STATUS,),
exception.InvalidCgSnapshot,
'CGSNAPSHOT')
def _extract_snapshot(self, snapshot):
return self._extract_resource(snapshot, (SNAPSHOT_PROCEED_STATUS,),
exception.InvalidSnapshot, 'snapshot')
def _extract_source_volume(self, source_volume):
return self._extract_resource(source_volume, (SRC_VOL_PROCEED_STATUS,),
exception.InvalidVolume, 'source volume')
def _extract_source_replica(self, source_replica):
return self._extract_resource(source_replica, (SRC_VOL_PROCEED_STATUS,
REPLICA_PROCEED_STATUS),
exception.InvalidVolume,
'replica', ('status',
'replication_status'))
@staticmethod
def _extract_size(size, source_volume, snapshot):
"""Extracts and validates the volume size.
This function will validate or when not provided fill in the provided
size variable from the source_volume or snapshot and then does
validation on the size that is found and returns said validated size.
"""
def validate_snap_size(size):
if snapshot and size < snapshot['volume_size']:
msg = _("Volume size '%(size)s'GB cannot be smaller than"
" the snapshot size %(snap_size)sGB. "
"They must be >= original snapshot size.")
msg = msg % {'size': size,
'snap_size': snapshot['volume_size']}
raise exception.InvalidInput(reason=msg)
def validate_source_size(size):
if source_volume and size < source_volume['size']:
msg = _("Volume size '%(size)s'GB cannot be smaller than "
"original volume size %(source_size)sGB. "
"They must be >= original volume size.")
msg = msg % {'size': size,
'source_size': source_volume['size']}
raise exception.InvalidInput(reason=msg)
def validate_int(size):
if not isinstance(size, int) or size <= 0:
msg = _("Volume size '%(size)s' must be an integer and"
" greater than 0") % {'size': size}
raise exception.InvalidInput(reason=msg)
# Figure out which validation functions we should be applying
# on the size value that we extract.
validator_functors = [validate_int]
if source_volume:
validator_functors.append(validate_source_size)
elif snapshot:
validator_functors.append(validate_snap_size)
# If the size is not provided then try to provide it.
if not size and source_volume:
size = source_volume['size']
elif not size and snapshot:
size = snapshot['volume_size']
size = utils.as_int(size)
LOG.debug("Validating volume '%(size)s' using %(functors)s" %
{'size': size,
'functors': ", ".join([common.make_pretty_name(func)
for func in validator_functors])})
for func in validator_functors:
func(size)
return size
def _check_image_metadata(self, context, image_id, size):
"""Checks image existence and validates that the image metadata."""
# Check image existence
if image_id is None:
return
# NOTE(harlowja): this should raise an error if the image does not
# exist, this is expected as it signals that the image_id is missing.
image_meta = self.image_service.show(context, image_id)
# check whether image is active
if image_meta['status'] != 'active':
msg = _('Image %(image_id)s is not active.')\
% {'image_id': image_id}
raise exception.InvalidInput(reason=msg)
# Check image size is not larger than volume size.
image_size = utils.as_int(image_meta['size'], quiet=False)
image_size_in_gb = (image_size + GB - 1) / GB
if image_size_in_gb > size:
msg = _('Size of specified image %(image_size)sGB'
' is larger than volume size %(volume_size)sGB.')
msg = msg % {'image_size': image_size_in_gb, 'volume_size': size}
raise exception.InvalidInput(reason=msg)
# Check image min_disk requirement is met for the particular volume
min_disk = image_meta.get('min_disk', 0)
if size < min_disk:
msg = _('Volume size %(volume_size)sGB cannot be smaller'
' than the image minDisk size %(min_disk)sGB.')
msg = msg % {'volume_size': size, 'min_disk': min_disk}
raise exception.InvalidInput(reason=msg)
@staticmethod
def _check_metadata_properties(metadata=None):
"""Checks that the volume metadata properties are valid."""
if not metadata:
metadata = {}
for (k, v) in metadata.items():
if len(k) == 0:
msg = _("Metadata property key blank")
LOG.warning(msg)
raise exception.InvalidVolumeMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key %s greater than 255 "
"characters") % k
LOG.warning(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property key %s value greater than"
" 255 characters") % k
LOG.warning(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
def _extract_availability_zone(self, availability_zone, snapshot,
source_volume):
"""Extracts and returns a validated availability zone.
This function will extract the availability zone (if not provided) from
the snapshot or source_volume and then performs a set of validation
checks on the provided or extracted availability zone and then returns
the validated availability zone.
"""
# Try to extract the availability zone from the corresponding snapshot
# or source volume if either is valid so that we can be in the same
# availability zone as the source.
if availability_zone is None:
if snapshot:
try:
availability_zone = snapshot['volume']['availability_zone']
except (TypeError, KeyError):
pass
if source_volume and availability_zone is None:
try:
availability_zone = source_volume['availability_zone']
except (TypeError, KeyError):
pass
if availability_zone is None:
if CONF.default_availability_zone:
availability_zone = CONF.default_availability_zone
else:
# For backwards compatibility use the storage_availability_zone
availability_zone = CONF.storage_availability_zone
if availability_zone not in self.availability_zones:
msg = _("Availability zone '%s' is invalid") % (availability_zone)
LOG.warning(msg)
raise exception.InvalidInput(reason=msg)
# If the configuration only allows cloning to the same availability
# zone then we need to enforce that.
if CONF.cloned_volume_same_az:
snap_az = None
try:
snap_az = snapshot['volume']['availability_zone']
except (TypeError, KeyError):
pass
if snap_az and snap_az != availability_zone:
msg = _("Volume must be in the same "
"availability zone as the snapshot")
raise exception.InvalidInput(reason=msg)
source_vol_az = None
try:
source_vol_az = source_volume['availability_zone']
except (TypeError, KeyError):
pass
if source_vol_az and source_vol_az != availability_zone:
msg = _("Volume must be in the same "
"availability zone as the source volume")
raise exception.InvalidInput(reason=msg)
return availability_zone
def _get_encryption_key_id(self, key_manager, context, volume_type_id,
snapshot, source_volume):
encryption_key_id = None
if volume_types.is_encrypted(context, volume_type_id):
if snapshot is not None: # creating from snapshot
encryption_key_id = snapshot['encryption_key_id']
elif source_volume is not None: # cloning volume
encryption_key_id = source_volume['encryption_key_id']
# NOTE(joel-coffman): References to the encryption key should *not*
# be copied because the key is deleted when the volume is deleted.
# Clone the existing key and associate a separate -- but
# identical -- key with each volume.
if encryption_key_id is not None:
encryption_key_id = key_manager.copy_key(context,
encryption_key_id)
else:
encryption_key_id = key_manager.create_key(context)
return encryption_key_id
def _get_volume_type_id(self, volume_type, source_volume, snapshot):
if not volume_type and source_volume:
return source_volume['volume_type_id']
elif snapshot is not None:
if volume_type:
current_volume_type_id = volume_type.get('id')
if current_volume_type_id != snapshot['volume_type_id']:
msg = _LW("Volume type will be changed to "
"be the same as the source volume.")
LOG.warning(msg)
return snapshot['volume_type_id']
else:
return volume_type.get('id')
def execute(self, context, size, snapshot, image_id, source_volume,
availability_zone, volume_type, metadata, key_manager,
source_replica, consistencygroup, cgsnapshot):
utils.check_exclusive_options(snapshot=snapshot,
imageRef=image_id,
source_volume=source_volume)
policy.enforce_action(context, ACTION)
# TODO(harlowja): what guarantee is there that the snapshot or source
# volume will remain available after we do this initial verification??
snapshot_id = self._extract_snapshot(snapshot)
source_volid = self._extract_source_volume(source_volume)
source_replicaid = self._extract_source_replica(source_replica)
size = self._extract_size(size, source_volume, snapshot)
consistencygroup_id = self._extract_consistencygroup(consistencygroup)
cgsnapshot_id = self._extract_cgsnapshot(cgsnapshot)
self._check_image_metadata(context, image_id, size)
availability_zone = self._extract_availability_zone(availability_zone,
snapshot,
source_volume)
# TODO(joel-coffman): This special handling of snapshots to ensure that
# their volume type matches the source volume is too convoluted. We
# should copy encryption metadata from the encrypted volume type to the
# volume upon creation and propagate that information to each snapshot.
# This strategy avoids any dependency upon the encrypted volume type.
def_vol_type = volume_types.get_default_volume_type()
if not volume_type and not source_volume and not snapshot:
volume_type = def_vol_type
# When creating a clone of a replica (replication test), we can't
# use the volume type of the replica, therefore, we use the default.
# NOTE(ronenkat): this assumes the default type is not replicated.
if source_replicaid:
volume_type = def_vol_type
volume_type_id = self._get_volume_type_id(volume_type,
source_volume, snapshot)
encryption_key_id = self._get_encryption_key_id(key_manager,
context,
volume_type_id,
snapshot,
source_volume)
specs = {}
if volume_type_id:
qos_specs = volume_types.get_volume_type_qos_specs(volume_type_id)
specs = qos_specs['qos_specs']
if not specs:
# to make sure we don't pass empty dict
specs = None
self._check_metadata_properties(metadata)
return {
'size': size,
'snapshot_id': snapshot_id,
'source_volid': source_volid,
'availability_zone': availability_zone,
'volume_type': volume_type,
'volume_type_id': volume_type_id,
'encryption_key_id': encryption_key_id,
'qos_specs': specs,
'source_replicaid': source_replicaid,
'consistencygroup_id': consistencygroup_id,
'cgsnapshot_id': cgsnapshot_id,
}
class EntryCreateTask(flow_utils.CinderTask):
"""Creates an entry for the given volume creation in the database.
Reversion strategy: remove the volume_id created from the database.
"""
default_provides = set(['volume_properties', 'volume_id', 'volume'])
def __init__(self, db):
requires = ['availability_zone', 'description', 'metadata',
'name', 'reservations', 'size', 'snapshot_id',
'source_volid', 'volume_type_id', 'encryption_key_id',
'source_replicaid', 'consistencygroup_id',
'cgsnapshot_id', 'multiattach']
super(EntryCreateTask, self).__init__(addons=[ACTION],
requires=requires)
self.db = db
def execute(self, context, optional_args, **kwargs):
"""Creates a database entry for the given inputs and returns details.
Accesses the database and creates a new entry for the to be created
volume using the given volume properties which are extracted from the
input kwargs (and associated requirements this task needs). These
requirements should be previously satisfied and validated by a
pre-cursor task.
"""
volume_properties = {
'size': kwargs.pop('size'),
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': 'detached',
'encryption_key_id': kwargs.pop('encryption_key_id'),
# Rename these to the internal name.
'display_description': kwargs.pop('description'),
'display_name': kwargs.pop('name'),
'replication_status': 'disabled',
'multiattach': kwargs.pop('multiattach'),
}
# Merge in the other required arguments which should provide the rest
# of the volume property fields (if applicable).
volume_properties.update(kwargs)
volume = self.db.volume_create(context, volume_properties)
return {
'volume_id': volume['id'],
'volume_properties': volume_properties,
# NOTE(harlowja): it appears like further usage of this volume
# result actually depend on it being a sqlalchemy object and not
# just a plain dictionary so that's why we are storing this here.
#
# In the future where this task results can be serialized and
# restored automatically for continued running we will need to
# resolve the serialization & recreation of this object since raw
# sqlalchemy objects can't be serialized.
'volume': volume,
}
def revert(self, context, result, optional_args, **kwargs):
if isinstance(result, ft.Failure):
# We never produced a result and therefore can't destroy anything.
return
if optional_args['is_quota_committed']:
# If quota got commited we shouldn't rollback as the volume has
# already been created and the quota has already been absorbed.
return
vol_id = result['volume_id']
try:
self.db.volume_destroy(context.elevated(), vol_id)
except exception.CinderException:
# We are already reverting, therefore we should silence this
# exception since a second exception being active will be bad.
#
# NOTE(harlowja): Being unable to destroy a volume is pretty
# bad though!!
LOG.exception(_LE("Failed destroying volume entry %s"), vol_id)
class QuotaReserveTask(flow_utils.CinderTask):
"""Reserves a single volume with the given size & the given volume type.
Reversion strategy: rollback the quota reservation.
Warning Warning: if the process that is running this reserve and commit
process fails (or is killed before the quota is rolled back or committed
it does appear like the quota will never be rolled back). This makes
software upgrades hard (inflight operations will need to be stopped or
allowed to complete before the upgrade can occur). *In the future* when
taskflow has persistence built-in this should be easier to correct via
an automated or manual process.
"""
default_provides = set(['reservations'])
def __init__(self):
super(QuotaReserveTask, self).__init__(addons=[ACTION])
def execute(self, context, size, volume_type_id, optional_args):
try:
values = {'per_volume_gigabytes': size}
QUOTAS.limit_check(context, project_id=context.project_id,
**values)
except exception.OverQuota as e:
quotas = e.kwargs['quotas']
raise exception.VolumeSizeExceedsLimit(
size=size, limit=quotas['per_volume_gigabytes'])
try:
reserve_opts = {'volumes': 1, 'gigabytes': size}
QUOTAS.add_volume_type_opts(context, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(context, **reserve_opts)
return {
'reservations': reservations,
}
except exception.OverQuota as e:
overs = e.kwargs['overs']
quotas = e.kwargs['quotas']
usages = e.kwargs['usages']
def _consumed(name):
return usages[name]['reserved'] + usages[name]['in_use']
def _get_over(name):
for over in overs:
if name in over:
return over
return None
over_name = _get_over('gigabytes')
if over_name:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG volume (%(d_consumed)dG "
"of %(d_quota)dG already consumed)")
LOG.warning(msg, {'s_pid': context.project_id,
's_size': size,
'd_consumed': _consumed(over_name),
'd_quota': quotas[over_name]})
raise exception.VolumeSizeExceedsAvailableQuota(
name=over_name,
requested=size,
consumed=_consumed(over_name),
quota=quotas[over_name])
elif _get_over('volumes'):
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"volume (%(d_consumed)d volumes "
"already consumed)")
LOG.warning(msg, {'s_pid': context.project_id,
'd_consumed': _consumed('volumes')})
raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])
else:
# If nothing was reraised, ensure we reraise the initial error
raise
def revert(self, context, result, optional_args, **kwargs):
# We never produced a result and therefore can't destroy anything.
if isinstance(result, ft.Failure):
return
if optional_args['is_quota_committed']:
# The reservations have already been committed and can not be
# rolled back at this point.
return
# We actually produced an output that we can revert so lets attempt
# to use said output to rollback the reservation.
reservations = result['reservations']
try:
QUOTAS.rollback(context, reservations)
except exception.CinderException:
# We are already reverting, therefore we should silence this
# exception since a second exception being active will be bad.
LOG.exception(_LE("Failed rolling back quota for"
" %s reservations"), reservations)
class QuotaCommitTask(flow_utils.CinderTask):
"""Commits the reservation.
Reversion strategy: N/A (the rollback will be handled by the task that did
the initial reservation (see: QuotaReserveTask).
Warning Warning: if the process that is running this reserve and commit
process fails (or is killed before the quota is rolled back or committed
it does appear like the quota will never be rolled back). This makes
software upgrades hard (inflight operations will need to be stopped or
allowed to complete before the upgrade can occur). *In the future* when
taskflow has persistence built-in this should be easier to correct via
an automated or manual process.
"""
def __init__(self):
super(QuotaCommitTask, self).__init__(addons=[ACTION])
def execute(self, context, reservations, volume_properties,
optional_args):
QUOTAS.commit(context, reservations)
# updating is_quota_committed attribute of optional_args dictionary
optional_args['is_quota_committed'] = True
return {'volume_properties': volume_properties}
def revert(self, context, result, **kwargs):
# We never produced a result and therefore can't destroy anything.
if isinstance(result, ft.Failure):
return
volume = result['volume_properties']
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume['volume_type_id'])
reservations = QUOTAS.reserve(context,
project_id=context.project_id,
**reserve_opts)
if reservations:
QUOTAS.commit(context, reservations,
project_id=context.project_id)
except Exception:
LOG.exception(_LE("Failed to update quota for deleting "
"volume: %s"), volume['id'])
class VolumeCastTask(flow_utils.CinderTask):
"""Performs a volume create cast to the scheduler or to the volume manager.
This will signal a transition of the api workflow to another child and/or
related workflow on another component.
Reversion strategy: rollback source volume status and error out newly
created volume.
"""
def __init__(self, scheduler_rpcapi, volume_rpcapi, db):
requires = ['image_id', 'scheduler_hints', 'snapshot_id',
'source_volid', 'volume_id', 'volume_type',
'volume_properties', 'source_replicaid',
'consistencygroup_id', 'cgsnapshot_id', ]
super(VolumeCastTask, self).__init__(addons=[ACTION],
requires=requires)
self.volume_rpcapi = volume_rpcapi
self.scheduler_rpcapi = scheduler_rpcapi
self.db = db
def _cast_create_volume(self, context, request_spec, filter_properties):
source_volid = request_spec['source_volid']
source_replicaid = request_spec['source_replicaid']
volume_id = request_spec['volume_id']
snapshot_id = request_spec['snapshot_id']
image_id = request_spec['image_id']
cgroup_id = request_spec['consistencygroup_id']
host = None
cgsnapshot_id = request_spec['cgsnapshot_id']
if cgroup_id:
cgroup = self.db.consistencygroup_get(context, cgroup_id)
if cgroup:
host = cgroup.get('host', None)
elif snapshot_id and CONF.snapshot_same_host:
# NOTE(Rongze Zhu): A simple solution for bug 1008866.
#
# If snapshot_id is set and CONF.snapshot_same_host is True, make
# the call create volume directly to the volume host where the
# snapshot resides instead of passing it through the scheduler, so
# snapshot can be copied to the new volume.
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
source_volume_ref = self.db.volume_get(context, snapshot.volume_id)
host = source_volume_ref['host']
elif source_volid:
source_volume_ref = self.db.volume_get(context, source_volid)
host = source_volume_ref['host']
elif source_replicaid:
source_volume_ref = self.db.volume_get(context, source_replicaid)
host = source_volume_ref['host']
if not host:
# Cast to the scheduler and let it handle whatever is needed
# to select the target host for this volume.
self.scheduler_rpcapi.create_volume(
context,
CONF.volume_topic,
volume_id,
snapshot_id=snapshot_id,
image_id=image_id,
request_spec=request_spec,
filter_properties=filter_properties)
else:
# Bypass the scheduler and send the request directly to the volume
# manager.
now = timeutils.utcnow()
values = {'host': host, 'scheduled_at': now}
volume_ref = self.db.volume_update(context, volume_id, values)
if not cgsnapshot_id:
self.volume_rpcapi.create_volume(
context,
volume_ref,
volume_ref['host'],
request_spec,
filter_properties,
allow_reschedule=False)
def execute(self, context, **kwargs):
scheduler_hints = kwargs.pop('scheduler_hints', None)
request_spec = kwargs.copy()
filter_properties = {}
if scheduler_hints:
filter_properties['scheduler_hints'] = scheduler_hints
self._cast_create_volume(context, request_spec, filter_properties)
def revert(self, context, result, flow_failures, **kwargs):
if isinstance(result, ft.Failure):
return
# Restore the source volume status and set the volume to error status.
volume_id = kwargs['volume_id']
common.restore_source_status(context, self.db, kwargs)
common.error_out_volume(context, self.db, volume_id)
LOG.error(_LE("Volume %s: create failed"), volume_id)
exc_info = False
if all(flow_failures[-1].exc_info):
exc_info = flow_failures[-1].exc_info
LOG.error(_LE('Unexpected build error:'), exc_info=exc_info)
def get_flow(db_api, image_service_api, availability_zones, create_what,
scheduler_rpcapi=None, volume_rpcapi=None):
"""Constructs and returns the api entrypoint flow.
This flow will do the following:
1. Inject keys & values for dependent tasks.
2. Extracts and validates the input keys & values.
3. Reserves the quota (reverts quota on any failures).
4. Creates the database entry.
5. Commits the quota.
6. Casts to volume manager or scheduler for further processing.
"""
flow_name = ACTION.replace(":", "_") + "_api"
api_flow = linear_flow.Flow(flow_name)
api_flow.add(ExtractVolumeRequestTask(
image_service_api,
availability_zones,
rebind={'size': 'raw_size',
'availability_zone': 'raw_availability_zone',
'volume_type': 'raw_volume_type'}))
api_flow.add(QuotaReserveTask(),
EntryCreateTask(db_api),
QuotaCommitTask())
if scheduler_rpcapi and volume_rpcapi:
# This will cast it out to either the scheduler or volume manager via
# the rpc apis provided.
api_flow.add(VolumeCastTask(scheduler_rpcapi, volume_rpcapi, db_api))
# Now load (but do not run) the flow using the provided initial data.
return taskflow.engines.load(api_flow, store=create_what)
| {
"content_hash": "2adaeb0e477945e0bd2cc6408adf3517",
"timestamp": "",
"source": "github",
"line_count": 763,
"max_line_length": 79,
"avg_line_length": 44.51376146788991,
"alnum_prop": 0.5818808149805677,
"repo_name": "JioCloud/cinder",
"id": "5f1a4adfcbb5b22d6f107ba73e1194d430a90a39",
"size": "34538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/flows/api/create_volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11977630"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
} |
class NetworkDevice(object):
def __init__(self, ip, username, password):
self.ip = ip
self.username = username
self.password = password
def connect(self):
pass
def enable(self):
pass
def sh_ver(self):
pass
class NetworkDevice2(object):
def __init__(self, ip, username, password):
self.x = ip
self.y = username
self.z = password
class SomeClass(object):
def __init__(self, x, y):
self.x = x
self.y = y
def a_sum(self):
return self.x + self.y
def a_product(self):
return self.x * self.y
class NewClass(SomeClass):
def __init__(self, x, y, z):
SomeClass.__init__(self, x, y)
self.z = z
def a_sum(self):
return self.x + self.y + self.z | {
"content_hash": "db39ef048c8fe3d6939928de8d85e52e",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 47,
"avg_line_length": 22.363636363636363,
"alnum_prop": 0.5894308943089431,
"repo_name": "bdlamprecht/python_class",
"id": "5286ba15a622b2d39cad96c1722463d0daf61f89",
"size": "738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neteng/class_ex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32103"
}
],
"symlink_target": ""
} |
from wires import *
from models.post import Post
from models.user import User
from models.comment import Comment
from pdb import set_trace as debug
def index(parameters):
template = open('./templates/posts/index.html').read()
posts = Post.all(Post.cxn, "posts")
post_template = open('./templates/posts/show.html').read()
rendered_posts = "<br><br>".join([TemplateEngine(post_template, definitions(post, {"id": post.id, "comments_link": '<p><a href="/posts/{0}#comments">{1} comments</a></p>'.format(post.id, len(post.comments(globals())))})).render_partial() for post in posts])
index_definitions = {"number_of_pages": str(parameters["number_of_pages"]), "rendered_posts": rendered_posts}
index_definitions["login_status_message"] = login_status_message(parameters)
return TemplateEngine(template, index_definitions).render()
def show(parameters):
post = Post.find(Post.cxn, "posts", parameters["id"])
template = open('./templates/posts/show.html').read()
comment_template = open('./templates/comments/show.html').read()
show_post_script_tag = '<script src="/show_post.js"></script>'
comments = post.comments(globals())
if comments:
rendered_comments = "<h3>Comments</h3>" + "".join([TemplateEngine(comment_template, comment.attributes).render_partial() for comment in comments])
else:
rendered_comments = '<p id="no_comments">No comments yet.</p>'
new_comment_link_html = '<a id="new_comment_link" href="#">Make a new comment!</a>'
parameters.update({"rendered_comments": rendered_comments, "new_comment_link": new_comment_link_html, "show_post_script_tag": show_post_script_tag})
return TemplateEngine(template, definitions(post, parameters)).render()
def new(parameters):
template = open('./templates/posts/new.html').read()
return TemplateEngine(template, parameters).render()
def create(parameters):
parameters["body"] = parameters["body"].replace("\n", "<br>")
user = current_user(parameters)
if user:
parameters.update({"author_id": user.id})
new_post = Post(Post.cxn, "posts", parameters)
new_post.save()
parameters.update({"id": str(new_post.id)})
return show(parameters)
else:
page = "<html><head></head><body><h2>{0}</h2>{1}</body></html>".format("You must be logged in to submit a new post", "<a href='/'><em>(home)</em></a>")
return page
# helper method for construction substitution definitions from supplied
# object and request parameters
def definitions(post, parameters):
defns_dict = dict(list(parameters.items()) + list(post.attributes.items()))
defns_dict["author_display_name"] = post.author(globals()).display_name
defns_dict["author_id"] = post.author(globals()).id
return {key: str(defns_dict[key]) for key in defns_dict}
def current_user(parameters):
if "session_token" in parameters:
user = User.find_where(User.cxn, "users", {"session_token": parameters["session_token"]})
return user
else:
return None
def login_status_message(parameters):
user = current_user(parameters)
if user:
return "logged in as {0} ({1})".format(user.display_name, user.username)
return "not logged in"
| {
"content_hash": "0cf7837dd7205daccc950dd69cad7bb5",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 261,
"avg_line_length": 47.80882352941177,
"alnum_prop": 0.6748692709935404,
"repo_name": "zackmdavis/Wires",
"id": "d6510d865568a586caac0046dd29735e2680f74f",
"size": "3251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog_engine/controllers/posts_controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "105"
},
{
"name": "JavaScript",
"bytes": "1940"
},
{
"name": "Python",
"bytes": "31048"
},
{
"name": "Shell",
"bytes": "291"
}
],
"symlink_target": ""
} |
from django.db.backends import BaseDatabaseIntrospection
import pyodbc as Database
SQL_AUTOFIELD = -777555
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Map type codes to Django Field types.
data_types_reverse = {
SQL_AUTOFIELD: 'AutoField',
Database.SQL_BIGINT: 'IntegerField',
#Database.SQL_BINARY: ,
Database.SQL_BIT: 'BooleanField',
Database.SQL_CHAR: 'CharField',
Database.SQL_DECIMAL: 'DecimalField',
Database.SQL_DOUBLE: 'FloatField',
Database.SQL_FLOAT: 'FloatField',
Database.SQL_GUID: 'TextField',
Database.SQL_INTEGER: 'IntegerField',
#Database.SQL_LONGVARBINARY: ,
#Database.SQL_LONGVARCHAR: ,
Database.SQL_NUMERIC: 'DecimalField',
Database.SQL_REAL: 'FloatField',
Database.SQL_SMALLINT: 'SmallIntegerField',
Database.SQL_TINYINT: 'SmallIntegerField',
Database.SQL_TYPE_DATE: 'DateField',
Database.SQL_TYPE_TIME: 'TimeField',
Database.SQL_TYPE_TIMESTAMP: 'DateTimeField',
#Database.SQL_VARBINARY: ,
Database.SQL_VARCHAR: 'TextField',
Database.SQL_WCHAR: 'CharField',
Database.SQL_WLONGVARCHAR: 'TextField',
Database.SQL_WVARCHAR: 'TextField',
}
def get_table_list(self, cursor):
"""
Returns a list of table names in the current database.
"""
# TABLES: http://msdn2.microsoft.com/en-us/library/ms186224.aspx
cursor.execute("SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE'")
return [row[0] for row in cursor.fetchall()]
# Or pyodbc specific:
#return [row[2] for row in cursor.tables(tableType='TABLE')]
def _is_auto_field(self, cursor, table_name, column_name):
"""
Checks whether column is Identity
"""
# COLUMNPROPERTY: http://msdn2.microsoft.com/en-us/library/ms174968.aspx
#from django.db import connection
#cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')",
# (connection.ops.quote_name(table_name), column_name))
cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')",
(self.connection.ops.quote_name(table_name), column_name))
return cursor.fetchall()[0][0]
def get_table_description(self, cursor, table_name, identity_check=True):
"""Returns a description of the table, with DB-API cursor.description interface.
The 'auto_check' parameter has been added to the function argspec.
If set to True, the function will check each of the table's fields for the
IDENTITY property (the IDENTITY property is the MSSQL equivalent to an AutoField).
When a field is found with an IDENTITY property, it is given a custom field number
of SQL_AUTOFIELD, which maps to the 'AutoField' value in the DATA_TYPES_REVERSE dict.
"""
# map pyodbc's cursor.columns to db-api cursor description
columns = [[c[3], c[4], None, c[6], c[6], c[8], c[10]] for c in cursor.columns(table=table_name)]
items = []
for column in columns:
if identity_check and self._is_auto_field(cursor, table_name, column[0]):
column[1] = SQL_AUTOFIELD
if column[1] == Database.SQL_WVARCHAR and column[3] < 4000:
column[1] = Database.SQL_WCHAR
items.append(column)
return items
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name, identity_check=False))])
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# CONSTRAINT_COLUMN_USAGE: http://msdn2.microsoft.com/en-us/library/ms174431.aspx
# CONSTRAINT_TABLE_USAGE: http://msdn2.microsoft.com/en-us/library/ms179883.aspx
# REFERENTIAL_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms179987.aspx
# TABLE_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms181757.aspx
table_index = self._name_to_index(cursor, table_name)
sql = """
SELECT e.COLUMN_NAME AS column_name,
c.TABLE_NAME AS referenced_table_name,
d.COLUMN_NAME AS referenced_column_name
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS a
INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS b
ON a.CONSTRAINT_NAME = b.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_TABLE_USAGE AS c
ON b.UNIQUE_CONSTRAINT_NAME = c.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS d
ON c.CONSTRAINT_NAME = d.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS e
ON a.CONSTRAINT_NAME = e.CONSTRAINT_NAME
WHERE a.TABLE_NAME = %s AND a.CONSTRAINT_TYPE = 'FOREIGN KEY'"""
cursor.execute(sql, (table_name,))
return dict([(table_index[item[0]], (self._name_to_index(cursor, item[1])[item[2]], item[1]))
for item in cursor.fetchall()])
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index,
'db_index': boolean representing whether it's a non-unique index}
"""
# CONSTRAINT_COLUMN_USAGE: http://msdn2.microsoft.com/en-us/library/ms174431.aspx
# TABLE_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms181757.aspx
pk_uk_sql = """
SELECT b.COLUMN_NAME, a.CONSTRAINT_TYPE
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS a
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS b
ON a.CONSTRAINT_NAME = b.CONSTRAINT_NAME AND a.TABLE_NAME = b.TABLE_NAME
WHERE a.TABLE_NAME = %s AND (CONSTRAINT_TYPE = 'PRIMARY KEY' OR CONSTRAINT_TYPE = 'UNIQUE')"""
field_names = [item[0] for item in self.get_table_description(cursor, table_name, identity_check=False)]
indexes, results = {}, {}
cursor.execute(pk_uk_sql, (table_name,))
data = cursor.fetchall()
if data:
results.update(data)
# non-unique, non-compound indexes, only in SS2005?
ix_sql = """
SELECT DISTINCT c.name
FROM sys.columns c
INNER JOIN sys.index_columns ic
ON ic.object_id = c.object_id AND ic.column_id = c.column_id
INNER JOIN sys.indexes ix
ON ix.object_id = ic.object_id AND ix.index_id = ic.index_id
INNER JOIN sys.tables t
ON t.object_id = ix.object_id
WHERE ix.object_id IN (
SELECT ix.object_id
FROM sys.indexes ix
GROUP BY ix.object_id, ix.index_id
HAVING count(1) = 1)
AND ix.is_primary_key = 0
AND ix.is_unique_constraint = 0
AND t.name = %s"""
if self.connection.ops.sql_server_ver >= 2005:
cursor.execute(ix_sql, (table_name,))
for column in [r[0] for r in cursor.fetchall()]:
if column not in results:
results[column] = 'IX'
for field in field_names:
val = results.get(field, None)
indexes[field] = dict(primary_key=(val=='PRIMARY KEY'), unique=(val=='UNIQUE'), db_index=(val=='IX'))
return indexes
#def get_collations_list(self, cursor):
# """
# Returns list of available collations and theirs descriptions.
# """
# # http://msdn2.microsoft.com/en-us/library/ms184391.aspx
# # http://msdn2.microsoft.com/en-us/library/ms179886.aspx
#
# cursor.execute("SELECT name, description FROM ::fn_helpcollations()")
# return [tuple(row) for row in cursor.fetchall()]
| {
"content_hash": "b2622595ce33b48d92147e0efe70ffee",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 124,
"avg_line_length": 45.26923076923077,
"alnum_prop": 0.6302949387061536,
"repo_name": "chuck211991/django-pyodbc",
"id": "128a77cd44b2983e50b4ce0ab16790b9b73abfae",
"size": "8239",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "sql_server/pyodbc/introspection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "220331"
},
{
"name": "Shell",
"bytes": "244"
}
],
"symlink_target": ""
} |
"""Test Home Assistant package util methods."""
import asyncio
import logging
import os
from subprocess import PIPE
import sys
from unittest.mock import MagicMock, call, patch
import pkg_resources
import pytest
import homeassistant.util.package as package
RESOURCE_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "resources")
)
TEST_NEW_REQ = "pyhelloworld3==1.0.0"
TEST_ZIP_REQ = "file://{}#{}".format(
os.path.join(RESOURCE_DIR, "pyhelloworld3.zip"), TEST_NEW_REQ
)
@pytest.fixture
def mock_sys():
"""Mock sys."""
with patch("homeassistant.util.package.sys", spec=object) as sys_mock:
sys_mock.executable = "python3"
yield sys_mock
@pytest.fixture
def deps_dir():
"""Return path to deps directory."""
return os.path.abspath("/deps_dir")
@pytest.fixture
def lib_dir(deps_dir):
"""Return path to lib directory."""
return os.path.join(deps_dir, "lib_dir")
@pytest.fixture
def mock_popen(lib_dir):
"""Return a Popen mock."""
with patch("homeassistant.util.package.Popen") as popen_mock:
popen_mock.return_value.communicate.return_value = (
bytes(lib_dir, "utf-8"),
b"error",
)
popen_mock.return_value.returncode = 0
yield popen_mock
@pytest.fixture
def mock_env_copy():
"""Mock os.environ.copy."""
with patch("homeassistant.util.package.os.environ.copy") as env_copy:
env_copy.return_value = {}
yield env_copy
@pytest.fixture
def mock_venv():
"""Mock homeassistant.util.package.is_virtual_env."""
with patch("homeassistant.util.package.is_virtual_env") as mock:
mock.return_value = True
yield mock
def mock_async_subprocess():
"""Return an async Popen mock."""
async_popen = MagicMock()
async def communicate(input=None):
"""Communicate mock."""
stdout = bytes("/deps_dir/lib_dir", "utf-8")
return (stdout, None)
async_popen.communicate = communicate
return async_popen
def test_install(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test an install attempt on a package that doesn't exist."""
env = mock_env_copy()
assert package.install_package(TEST_NEW_REQ, False)
assert mock_popen.call_count == 1
assert mock_popen.call_args == call(
[mock_sys.executable, "-m", "pip", "install", "--quiet", TEST_NEW_REQ],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
env=env,
)
assert mock_popen.return_value.communicate.call_count == 1
def test_install_upgrade(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test an upgrade attempt on a package."""
env = mock_env_copy()
assert package.install_package(TEST_NEW_REQ)
assert mock_popen.call_count == 1
assert mock_popen.call_args == call(
[
mock_sys.executable,
"-m",
"pip",
"install",
"--quiet",
TEST_NEW_REQ,
"--upgrade",
],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
env=env,
)
assert mock_popen.return_value.communicate.call_count == 1
def test_install_target(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test an install with a target."""
target = "target_folder"
env = mock_env_copy()
env["PYTHONUSERBASE"] = os.path.abspath(target)
mock_venv.return_value = False
mock_sys.platform = "linux"
args = [
mock_sys.executable,
"-m",
"pip",
"install",
"--quiet",
TEST_NEW_REQ,
"--user",
"--prefix=",
]
assert package.install_package(TEST_NEW_REQ, False, target=target)
assert mock_popen.call_count == 1
assert mock_popen.call_args == call(
args, stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env
)
assert mock_popen.return_value.communicate.call_count == 1
def test_install_target_venv(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test an install with a target in a virtual environment."""
target = "target_folder"
with pytest.raises(AssertionError):
package.install_package(TEST_NEW_REQ, False, target=target)
def test_install_error(caplog, mock_sys, mock_popen, mock_venv):
"""Test an install with a target."""
caplog.set_level(logging.WARNING)
mock_popen.return_value.returncode = 1
assert not package.install_package(TEST_NEW_REQ)
assert len(caplog.records) == 1
for record in caplog.records:
assert record.levelname == "ERROR"
def test_install_constraint(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test install with constraint file on not installed package."""
env = mock_env_copy()
constraints = "constraints_file.txt"
assert package.install_package(TEST_NEW_REQ, False, constraints=constraints)
assert mock_popen.call_count == 1
assert mock_popen.call_args == call(
[
mock_sys.executable,
"-m",
"pip",
"install",
"--quiet",
TEST_NEW_REQ,
"--constraint",
constraints,
],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
env=env,
)
assert mock_popen.return_value.communicate.call_count == 1
def test_install_find_links(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test install with find-links on not installed package."""
env = mock_env_copy()
link = "https://wheels-repository"
assert package.install_package(TEST_NEW_REQ, False, find_links=link)
assert mock_popen.call_count == 1
assert mock_popen.call_args == call(
[
mock_sys.executable,
"-m",
"pip",
"install",
"--quiet",
TEST_NEW_REQ,
"--find-links",
link,
"--prefer-binary",
],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
env=env,
)
assert mock_popen.return_value.communicate.call_count == 1
async def test_async_get_user_site(mock_env_copy):
"""Test async get user site directory."""
deps_dir = "/deps_dir"
env = mock_env_copy()
env["PYTHONUSERBASE"] = os.path.abspath(deps_dir)
args = [sys.executable, "-m", "site", "--user-site"]
with patch(
"homeassistant.util.package.asyncio.create_subprocess_exec",
return_value=mock_async_subprocess(),
) as popen_mock:
ret = await package.async_get_user_site(deps_dir)
assert popen_mock.call_count == 1
assert popen_mock.call_args == call(
*args,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
env=env,
)
assert ret == os.path.join(deps_dir, "lib_dir")
def test_check_package_global():
"""Test for an installed package."""
installed_package = list(pkg_resources.working_set)[0].project_name
assert package.is_installed(installed_package)
def test_check_package_zip():
"""Test for an installed zip package."""
assert not package.is_installed(TEST_ZIP_REQ)
| {
"content_hash": "4a21a0e1de3b6b4096b2dc7d0c18ca27",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 80,
"avg_line_length": 28.68951612903226,
"alnum_prop": 0.6143359100491919,
"repo_name": "partofthething/home-assistant",
"id": "0c25166244440670fefe784071ab1971906c64ba",
"size": "7115",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/util/test_package.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "31051838"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
} |
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
class PropertyValue_ReferenceValuePathElement(ProtocolBuffer.ProtocolMessage):
has_type_ = 0
type_ = ""
has_id_ = 0
id_ = 0
has_name_ = 0
name_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def type(self): return self.type_
def set_type(self, x):
self.has_type_ = 1
self.type_ = x
def clear_type(self):
if self.has_type_:
self.has_type_ = 0
self.type_ = ""
def has_type(self): return self.has_type_
def id(self): return self.id_
def set_id(self, x):
self.has_id_ = 1
self.id_ = x
def clear_id(self):
if self.has_id_:
self.has_id_ = 0
self.id_ = 0
def has_id(self): return self.has_id_
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def MergeFrom(self, x):
assert x is not self
if (x.has_type()): self.set_type(x.type())
if (x.has_id()): self.set_id(x.id())
if (x.has_name()): self.set_name(x.name())
def Equals(self, x):
if x is self: return 1
if self.has_type_ != x.has_type_: return 0
if self.has_type_ and self.type_ != x.type_: return 0
if self.has_id_ != x.has_id_: return 0
if self.has_id_ and self.id_ != x.id_: return 0
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_type_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: type not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.type_))
if (self.has_id_): n += 2 + self.lengthVarInt64(self.id_)
if (self.has_name_): n += 2 + self.lengthString(len(self.name_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_type_):
n += 1
n += self.lengthString(len(self.type_))
if (self.has_id_): n += 2 + self.lengthVarInt64(self.id_)
if (self.has_name_): n += 2 + self.lengthString(len(self.name_))
return n
def Clear(self):
self.clear_type()
self.clear_id()
self.clear_name()
def OutputUnchecked(self, out):
out.putVarInt32(122)
out.putPrefixedString(self.type_)
if (self.has_id_):
out.putVarInt32(128)
out.putVarInt64(self.id_)
if (self.has_name_):
out.putVarInt32(138)
out.putPrefixedString(self.name_)
def OutputPartial(self, out):
if (self.has_type_):
out.putVarInt32(122)
out.putPrefixedString(self.type_)
if (self.has_id_):
out.putVarInt32(128)
out.putVarInt64(self.id_)
if (self.has_name_):
out.putVarInt32(138)
out.putPrefixedString(self.name_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 116: break
if tt == 122:
self.set_type(d.getPrefixedString())
continue
if tt == 128:
self.set_id(d.getVarInt64())
continue
if tt == 138:
self.set_name(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_type_: res+=prefix+("type: %s\n" % self.DebugFormatString(self.type_))
if self.has_id_: res+=prefix+("id: %s\n" % self.DebugFormatInt64(self.id_))
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
return res
class PropertyValue_PointValue(ProtocolBuffer.ProtocolMessage):
has_x_ = 0
x_ = 0.0
has_y_ = 0
y_ = 0.0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def x(self): return self.x_
def set_x(self, x):
self.has_x_ = 1
self.x_ = x
def clear_x(self):
if self.has_x_:
self.has_x_ = 0
self.x_ = 0.0
def has_x(self): return self.has_x_
def y(self): return self.y_
def set_y(self, x):
self.has_y_ = 1
self.y_ = x
def clear_y(self):
if self.has_y_:
self.has_y_ = 0
self.y_ = 0.0
def has_y(self): return self.has_y_
def MergeFrom(self, x):
assert x is not self
if (x.has_x()): self.set_x(x.x())
if (x.has_y()): self.set_y(x.y())
def Equals(self, x):
if x is self: return 1
if self.has_x_ != x.has_x_: return 0
if self.has_x_ and self.x_ != x.x_: return 0
if self.has_y_ != x.has_y_: return 0
if self.has_y_ and self.y_ != x.y_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_x_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: x not set.')
if (not self.has_y_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: y not set.')
return initialized
def ByteSize(self):
n = 0
return n + 18
def ByteSizePartial(self):
n = 0
if (self.has_x_):
n += 9
if (self.has_y_):
n += 9
return n
def Clear(self):
self.clear_x()
self.clear_y()
def OutputUnchecked(self, out):
out.putVarInt32(49)
out.putDouble(self.x_)
out.putVarInt32(57)
out.putDouble(self.y_)
def OutputPartial(self, out):
if (self.has_x_):
out.putVarInt32(49)
out.putDouble(self.x_)
if (self.has_y_):
out.putVarInt32(57)
out.putDouble(self.y_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 44: break
if tt == 49:
self.set_x(d.getDouble())
continue
if tt == 57:
self.set_y(d.getDouble())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_x_: res+=prefix+("x: %s\n" % self.DebugFormat(self.x_))
if self.has_y_: res+=prefix+("y: %s\n" % self.DebugFormat(self.y_))
return res
class PropertyValue_UserValue(ProtocolBuffer.ProtocolMessage):
has_email_ = 0
email_ = ""
has_auth_domain_ = 0
auth_domain_ = ""
has_nickname_ = 0
nickname_ = ""
has_gaiaid_ = 0
gaiaid_ = 0
has_obfuscated_gaiaid_ = 0
obfuscated_gaiaid_ = ""
has_federated_identity_ = 0
federated_identity_ = ""
has_federated_provider_ = 0
federated_provider_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def email(self): return self.email_
def set_email(self, x):
self.has_email_ = 1
self.email_ = x
def clear_email(self):
if self.has_email_:
self.has_email_ = 0
self.email_ = ""
def has_email(self): return self.has_email_
def auth_domain(self): return self.auth_domain_
def set_auth_domain(self, x):
self.has_auth_domain_ = 1
self.auth_domain_ = x
def clear_auth_domain(self):
if self.has_auth_domain_:
self.has_auth_domain_ = 0
self.auth_domain_ = ""
def has_auth_domain(self): return self.has_auth_domain_
def nickname(self): return self.nickname_
def set_nickname(self, x):
self.has_nickname_ = 1
self.nickname_ = x
def clear_nickname(self):
if self.has_nickname_:
self.has_nickname_ = 0
self.nickname_ = ""
def has_nickname(self): return self.has_nickname_
def gaiaid(self): return self.gaiaid_
def set_gaiaid(self, x):
self.has_gaiaid_ = 1
self.gaiaid_ = x
def clear_gaiaid(self):
if self.has_gaiaid_:
self.has_gaiaid_ = 0
self.gaiaid_ = 0
def has_gaiaid(self): return self.has_gaiaid_
def obfuscated_gaiaid(self): return self.obfuscated_gaiaid_
def set_obfuscated_gaiaid(self, x):
self.has_obfuscated_gaiaid_ = 1
self.obfuscated_gaiaid_ = x
def clear_obfuscated_gaiaid(self):
if self.has_obfuscated_gaiaid_:
self.has_obfuscated_gaiaid_ = 0
self.obfuscated_gaiaid_ = ""
def has_obfuscated_gaiaid(self): return self.has_obfuscated_gaiaid_
def federated_identity(self): return self.federated_identity_
def set_federated_identity(self, x):
self.has_federated_identity_ = 1
self.federated_identity_ = x
def clear_federated_identity(self):
if self.has_federated_identity_:
self.has_federated_identity_ = 0
self.federated_identity_ = ""
def has_federated_identity(self): return self.has_federated_identity_
def federated_provider(self): return self.federated_provider_
def set_federated_provider(self, x):
self.has_federated_provider_ = 1
self.federated_provider_ = x
def clear_federated_provider(self):
if self.has_federated_provider_:
self.has_federated_provider_ = 0
self.federated_provider_ = ""
def has_federated_provider(self): return self.has_federated_provider_
def MergeFrom(self, x):
assert x is not self
if (x.has_email()): self.set_email(x.email())
if (x.has_auth_domain()): self.set_auth_domain(x.auth_domain())
if (x.has_nickname()): self.set_nickname(x.nickname())
if (x.has_gaiaid()): self.set_gaiaid(x.gaiaid())
if (x.has_obfuscated_gaiaid()): self.set_obfuscated_gaiaid(x.obfuscated_gaiaid())
if (x.has_federated_identity()): self.set_federated_identity(x.federated_identity())
if (x.has_federated_provider()): self.set_federated_provider(x.federated_provider())
def Equals(self, x):
if x is self: return 1
if self.has_email_ != x.has_email_: return 0
if self.has_email_ and self.email_ != x.email_: return 0
if self.has_auth_domain_ != x.has_auth_domain_: return 0
if self.has_auth_domain_ and self.auth_domain_ != x.auth_domain_: return 0
if self.has_nickname_ != x.has_nickname_: return 0
if self.has_nickname_ and self.nickname_ != x.nickname_: return 0
if self.has_gaiaid_ != x.has_gaiaid_: return 0
if self.has_gaiaid_ and self.gaiaid_ != x.gaiaid_: return 0
if self.has_obfuscated_gaiaid_ != x.has_obfuscated_gaiaid_: return 0
if self.has_obfuscated_gaiaid_ and self.obfuscated_gaiaid_ != x.obfuscated_gaiaid_: return 0
if self.has_federated_identity_ != x.has_federated_identity_: return 0
if self.has_federated_identity_ and self.federated_identity_ != x.federated_identity_: return 0
if self.has_federated_provider_ != x.has_federated_provider_: return 0
if self.has_federated_provider_ and self.federated_provider_ != x.federated_provider_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_email_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: email not set.')
if (not self.has_auth_domain_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: auth_domain not set.')
if (not self.has_gaiaid_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: gaiaid not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.email_))
n += self.lengthString(len(self.auth_domain_))
if (self.has_nickname_): n += 1 + self.lengthString(len(self.nickname_))
n += self.lengthVarInt64(self.gaiaid_)
if (self.has_obfuscated_gaiaid_): n += 2 + self.lengthString(len(self.obfuscated_gaiaid_))
if (self.has_federated_identity_): n += 2 + self.lengthString(len(self.federated_identity_))
if (self.has_federated_provider_): n += 2 + self.lengthString(len(self.federated_provider_))
return n + 4
def ByteSizePartial(self):
n = 0
if (self.has_email_):
n += 1
n += self.lengthString(len(self.email_))
if (self.has_auth_domain_):
n += 1
n += self.lengthString(len(self.auth_domain_))
if (self.has_nickname_): n += 1 + self.lengthString(len(self.nickname_))
if (self.has_gaiaid_):
n += 2
n += self.lengthVarInt64(self.gaiaid_)
if (self.has_obfuscated_gaiaid_): n += 2 + self.lengthString(len(self.obfuscated_gaiaid_))
if (self.has_federated_identity_): n += 2 + self.lengthString(len(self.federated_identity_))
if (self.has_federated_provider_): n += 2 + self.lengthString(len(self.federated_provider_))
return n
def Clear(self):
self.clear_email()
self.clear_auth_domain()
self.clear_nickname()
self.clear_gaiaid()
self.clear_obfuscated_gaiaid()
self.clear_federated_identity()
self.clear_federated_provider()
def OutputUnchecked(self, out):
out.putVarInt32(74)
out.putPrefixedString(self.email_)
out.putVarInt32(82)
out.putPrefixedString(self.auth_domain_)
if (self.has_nickname_):
out.putVarInt32(90)
out.putPrefixedString(self.nickname_)
out.putVarInt32(144)
out.putVarInt64(self.gaiaid_)
if (self.has_obfuscated_gaiaid_):
out.putVarInt32(154)
out.putPrefixedString(self.obfuscated_gaiaid_)
if (self.has_federated_identity_):
out.putVarInt32(170)
out.putPrefixedString(self.federated_identity_)
if (self.has_federated_provider_):
out.putVarInt32(178)
out.putPrefixedString(self.federated_provider_)
def OutputPartial(self, out):
if (self.has_email_):
out.putVarInt32(74)
out.putPrefixedString(self.email_)
if (self.has_auth_domain_):
out.putVarInt32(82)
out.putPrefixedString(self.auth_domain_)
if (self.has_nickname_):
out.putVarInt32(90)
out.putPrefixedString(self.nickname_)
if (self.has_gaiaid_):
out.putVarInt32(144)
out.putVarInt64(self.gaiaid_)
if (self.has_obfuscated_gaiaid_):
out.putVarInt32(154)
out.putPrefixedString(self.obfuscated_gaiaid_)
if (self.has_federated_identity_):
out.putVarInt32(170)
out.putPrefixedString(self.federated_identity_)
if (self.has_federated_provider_):
out.putVarInt32(178)
out.putPrefixedString(self.federated_provider_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 68: break
if tt == 74:
self.set_email(d.getPrefixedString())
continue
if tt == 82:
self.set_auth_domain(d.getPrefixedString())
continue
if tt == 90:
self.set_nickname(d.getPrefixedString())
continue
if tt == 144:
self.set_gaiaid(d.getVarInt64())
continue
if tt == 154:
self.set_obfuscated_gaiaid(d.getPrefixedString())
continue
if tt == 170:
self.set_federated_identity(d.getPrefixedString())
continue
if tt == 178:
self.set_federated_provider(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_email_: res+=prefix+("email: %s\n" % self.DebugFormatString(self.email_))
if self.has_auth_domain_: res+=prefix+("auth_domain: %s\n" % self.DebugFormatString(self.auth_domain_))
if self.has_nickname_: res+=prefix+("nickname: %s\n" % self.DebugFormatString(self.nickname_))
if self.has_gaiaid_: res+=prefix+("gaiaid: %s\n" % self.DebugFormatInt64(self.gaiaid_))
if self.has_obfuscated_gaiaid_: res+=prefix+("obfuscated_gaiaid: %s\n" % self.DebugFormatString(self.obfuscated_gaiaid_))
if self.has_federated_identity_: res+=prefix+("federated_identity: %s\n" % self.DebugFormatString(self.federated_identity_))
if self.has_federated_provider_: res+=prefix+("federated_provider: %s\n" % self.DebugFormatString(self.federated_provider_))
return res
class PropertyValue_ReferenceValue(ProtocolBuffer.ProtocolMessage):
has_app_ = 0
app_ = ""
has_name_space_ = 0
name_space_ = ""
def __init__(self, contents=None):
self.pathelement_ = []
if contents is not None: self.MergeFromString(contents)
def app(self): return self.app_
def set_app(self, x):
self.has_app_ = 1
self.app_ = x
def clear_app(self):
if self.has_app_:
self.has_app_ = 0
self.app_ = ""
def has_app(self): return self.has_app_
def name_space(self): return self.name_space_
def set_name_space(self, x):
self.has_name_space_ = 1
self.name_space_ = x
def clear_name_space(self):
if self.has_name_space_:
self.has_name_space_ = 0
self.name_space_ = ""
def has_name_space(self): return self.has_name_space_
def pathelement_size(self): return len(self.pathelement_)
def pathelement_list(self): return self.pathelement_
def pathelement(self, i):
return self.pathelement_[i]
def mutable_pathelement(self, i):
return self.pathelement_[i]
def add_pathelement(self):
x = PropertyValue_ReferenceValuePathElement()
self.pathelement_.append(x)
return x
def clear_pathelement(self):
self.pathelement_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_app()): self.set_app(x.app())
if (x.has_name_space()): self.set_name_space(x.name_space())
for i in xrange(x.pathelement_size()): self.add_pathelement().CopyFrom(x.pathelement(i))
def Equals(self, x):
if x is self: return 1
if self.has_app_ != x.has_app_: return 0
if self.has_app_ and self.app_ != x.app_: return 0
if self.has_name_space_ != x.has_name_space_: return 0
if self.has_name_space_ and self.name_space_ != x.name_space_: return 0
if len(self.pathelement_) != len(x.pathelement_): return 0
for e1, e2 in zip(self.pathelement_, x.pathelement_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_app_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app not set.')
for p in self.pathelement_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.app_))
if (self.has_name_space_): n += 2 + self.lengthString(len(self.name_space_))
n += 2 * len(self.pathelement_)
for i in xrange(len(self.pathelement_)): n += self.pathelement_[i].ByteSize()
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_app_):
n += 1
n += self.lengthString(len(self.app_))
if (self.has_name_space_): n += 2 + self.lengthString(len(self.name_space_))
n += 2 * len(self.pathelement_)
for i in xrange(len(self.pathelement_)): n += self.pathelement_[i].ByteSizePartial()
return n
def Clear(self):
self.clear_app()
self.clear_name_space()
self.clear_pathelement()
def OutputUnchecked(self, out):
out.putVarInt32(106)
out.putPrefixedString(self.app_)
for i in xrange(len(self.pathelement_)):
out.putVarInt32(115)
self.pathelement_[i].OutputUnchecked(out)
out.putVarInt32(116)
if (self.has_name_space_):
out.putVarInt32(162)
out.putPrefixedString(self.name_space_)
def OutputPartial(self, out):
if (self.has_app_):
out.putVarInt32(106)
out.putPrefixedString(self.app_)
for i in xrange(len(self.pathelement_)):
out.putVarInt32(115)
self.pathelement_[i].OutputPartial(out)
out.putVarInt32(116)
if (self.has_name_space_):
out.putVarInt32(162)
out.putPrefixedString(self.name_space_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 100: break
if tt == 106:
self.set_app(d.getPrefixedString())
continue
if tt == 115:
self.add_pathelement().TryMerge(d)
continue
if tt == 162:
self.set_name_space(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_: res+=prefix+("app: %s\n" % self.DebugFormatString(self.app_))
if self.has_name_space_: res+=prefix+("name_space: %s\n" % self.DebugFormatString(self.name_space_))
cnt=0
for e in self.pathelement_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("PathElement%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
return res
class PropertyValue(ProtocolBuffer.ProtocolMessage):
has_int64value_ = 0
int64value_ = 0
has_booleanvalue_ = 0
booleanvalue_ = 0
has_stringvalue_ = 0
stringvalue_ = ""
has_doublevalue_ = 0
doublevalue_ = 0.0
has_pointvalue_ = 0
pointvalue_ = None
has_uservalue_ = 0
uservalue_ = None
has_referencevalue_ = 0
referencevalue_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def int64value(self): return self.int64value_
def set_int64value(self, x):
self.has_int64value_ = 1
self.int64value_ = x
def clear_int64value(self):
if self.has_int64value_:
self.has_int64value_ = 0
self.int64value_ = 0
def has_int64value(self): return self.has_int64value_
def booleanvalue(self): return self.booleanvalue_
def set_booleanvalue(self, x):
self.has_booleanvalue_ = 1
self.booleanvalue_ = x
def clear_booleanvalue(self):
if self.has_booleanvalue_:
self.has_booleanvalue_ = 0
self.booleanvalue_ = 0
def has_booleanvalue(self): return self.has_booleanvalue_
def stringvalue(self): return self.stringvalue_
def set_stringvalue(self, x):
self.has_stringvalue_ = 1
self.stringvalue_ = x
def clear_stringvalue(self):
if self.has_stringvalue_:
self.has_stringvalue_ = 0
self.stringvalue_ = ""
def has_stringvalue(self): return self.has_stringvalue_
def doublevalue(self): return self.doublevalue_
def set_doublevalue(self, x):
self.has_doublevalue_ = 1
self.doublevalue_ = x
def clear_doublevalue(self):
if self.has_doublevalue_:
self.has_doublevalue_ = 0
self.doublevalue_ = 0.0
def has_doublevalue(self): return self.has_doublevalue_
def pointvalue(self):
if self.pointvalue_ is None:
self.lazy_init_lock_.acquire()
try:
if self.pointvalue_ is None: self.pointvalue_ = PropertyValue_PointValue()
finally:
self.lazy_init_lock_.release()
return self.pointvalue_
def mutable_pointvalue(self): self.has_pointvalue_ = 1; return self.pointvalue()
def clear_pointvalue(self):
if self.has_pointvalue_:
self.has_pointvalue_ = 0;
if self.pointvalue_ is not None: self.pointvalue_.Clear()
def has_pointvalue(self): return self.has_pointvalue_
def uservalue(self):
if self.uservalue_ is None:
self.lazy_init_lock_.acquire()
try:
if self.uservalue_ is None: self.uservalue_ = PropertyValue_UserValue()
finally:
self.lazy_init_lock_.release()
return self.uservalue_
def mutable_uservalue(self): self.has_uservalue_ = 1; return self.uservalue()
def clear_uservalue(self):
if self.has_uservalue_:
self.has_uservalue_ = 0;
if self.uservalue_ is not None: self.uservalue_.Clear()
def has_uservalue(self): return self.has_uservalue_
def referencevalue(self):
if self.referencevalue_ is None:
self.lazy_init_lock_.acquire()
try:
if self.referencevalue_ is None: self.referencevalue_ = PropertyValue_ReferenceValue()
finally:
self.lazy_init_lock_.release()
return self.referencevalue_
def mutable_referencevalue(self): self.has_referencevalue_ = 1; return self.referencevalue()
def clear_referencevalue(self):
if self.has_referencevalue_:
self.has_referencevalue_ = 0;
if self.referencevalue_ is not None: self.referencevalue_.Clear()
def has_referencevalue(self): return self.has_referencevalue_
def MergeFrom(self, x):
assert x is not self
if (x.has_int64value()): self.set_int64value(x.int64value())
if (x.has_booleanvalue()): self.set_booleanvalue(x.booleanvalue())
if (x.has_stringvalue()): self.set_stringvalue(x.stringvalue())
if (x.has_doublevalue()): self.set_doublevalue(x.doublevalue())
if (x.has_pointvalue()): self.mutable_pointvalue().MergeFrom(x.pointvalue())
if (x.has_uservalue()): self.mutable_uservalue().MergeFrom(x.uservalue())
if (x.has_referencevalue()): self.mutable_referencevalue().MergeFrom(x.referencevalue())
def Equals(self, x):
if x is self: return 1
if self.has_int64value_ != x.has_int64value_: return 0
if self.has_int64value_ and self.int64value_ != x.int64value_: return 0
if self.has_booleanvalue_ != x.has_booleanvalue_: return 0
if self.has_booleanvalue_ and self.booleanvalue_ != x.booleanvalue_: return 0
if self.has_stringvalue_ != x.has_stringvalue_: return 0
if self.has_stringvalue_ and self.stringvalue_ != x.stringvalue_: return 0
if self.has_doublevalue_ != x.has_doublevalue_: return 0
if self.has_doublevalue_ and self.doublevalue_ != x.doublevalue_: return 0
if self.has_pointvalue_ != x.has_pointvalue_: return 0
if self.has_pointvalue_ and self.pointvalue_ != x.pointvalue_: return 0
if self.has_uservalue_ != x.has_uservalue_: return 0
if self.has_uservalue_ and self.uservalue_ != x.uservalue_: return 0
if self.has_referencevalue_ != x.has_referencevalue_: return 0
if self.has_referencevalue_ and self.referencevalue_ != x.referencevalue_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_pointvalue_ and not self.pointvalue_.IsInitialized(debug_strs)): initialized = 0
if (self.has_uservalue_ and not self.uservalue_.IsInitialized(debug_strs)): initialized = 0
if (self.has_referencevalue_ and not self.referencevalue_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_int64value_): n += 1 + self.lengthVarInt64(self.int64value_)
if (self.has_booleanvalue_): n += 2
if (self.has_stringvalue_): n += 1 + self.lengthString(len(self.stringvalue_))
if (self.has_doublevalue_): n += 9
if (self.has_pointvalue_): n += 2 + self.pointvalue_.ByteSize()
if (self.has_uservalue_): n += 2 + self.uservalue_.ByteSize()
if (self.has_referencevalue_): n += 2 + self.referencevalue_.ByteSize()
return n
def ByteSizePartial(self):
n = 0
if (self.has_int64value_): n += 1 + self.lengthVarInt64(self.int64value_)
if (self.has_booleanvalue_): n += 2
if (self.has_stringvalue_): n += 1 + self.lengthString(len(self.stringvalue_))
if (self.has_doublevalue_): n += 9
if (self.has_pointvalue_): n += 2 + self.pointvalue_.ByteSizePartial()
if (self.has_uservalue_): n += 2 + self.uservalue_.ByteSizePartial()
if (self.has_referencevalue_): n += 2 + self.referencevalue_.ByteSizePartial()
return n
def Clear(self):
self.clear_int64value()
self.clear_booleanvalue()
self.clear_stringvalue()
self.clear_doublevalue()
self.clear_pointvalue()
self.clear_uservalue()
self.clear_referencevalue()
def OutputUnchecked(self, out):
if (self.has_int64value_):
out.putVarInt32(8)
out.putVarInt64(self.int64value_)
if (self.has_booleanvalue_):
out.putVarInt32(16)
out.putBoolean(self.booleanvalue_)
if (self.has_stringvalue_):
out.putVarInt32(26)
out.putPrefixedString(self.stringvalue_)
if (self.has_doublevalue_):
out.putVarInt32(33)
out.putDouble(self.doublevalue_)
if (self.has_pointvalue_):
out.putVarInt32(43)
self.pointvalue_.OutputUnchecked(out)
out.putVarInt32(44)
if (self.has_uservalue_):
out.putVarInt32(67)
self.uservalue_.OutputUnchecked(out)
out.putVarInt32(68)
if (self.has_referencevalue_):
out.putVarInt32(99)
self.referencevalue_.OutputUnchecked(out)
out.putVarInt32(100)
def OutputPartial(self, out):
if (self.has_int64value_):
out.putVarInt32(8)
out.putVarInt64(self.int64value_)
if (self.has_booleanvalue_):
out.putVarInt32(16)
out.putBoolean(self.booleanvalue_)
if (self.has_stringvalue_):
out.putVarInt32(26)
out.putPrefixedString(self.stringvalue_)
if (self.has_doublevalue_):
out.putVarInt32(33)
out.putDouble(self.doublevalue_)
if (self.has_pointvalue_):
out.putVarInt32(43)
self.pointvalue_.OutputPartial(out)
out.putVarInt32(44)
if (self.has_uservalue_):
out.putVarInt32(67)
self.uservalue_.OutputPartial(out)
out.putVarInt32(68)
if (self.has_referencevalue_):
out.putVarInt32(99)
self.referencevalue_.OutputPartial(out)
out.putVarInt32(100)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_int64value(d.getVarInt64())
continue
if tt == 16:
self.set_booleanvalue(d.getBoolean())
continue
if tt == 26:
self.set_stringvalue(d.getPrefixedString())
continue
if tt == 33:
self.set_doublevalue(d.getDouble())
continue
if tt == 43:
self.mutable_pointvalue().TryMerge(d)
continue
if tt == 67:
self.mutable_uservalue().TryMerge(d)
continue
if tt == 99:
self.mutable_referencevalue().TryMerge(d)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_int64value_: res+=prefix+("int64Value: %s\n" % self.DebugFormatInt64(self.int64value_))
if self.has_booleanvalue_: res+=prefix+("booleanValue: %s\n" % self.DebugFormatBool(self.booleanvalue_))
if self.has_stringvalue_: res+=prefix+("stringValue: %s\n" % self.DebugFormatString(self.stringvalue_))
if self.has_doublevalue_: res+=prefix+("doubleValue: %s\n" % self.DebugFormat(self.doublevalue_))
if self.has_pointvalue_:
res+=prefix+"PointValue {\n"
res+=self.pointvalue_.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
if self.has_uservalue_:
res+=prefix+"UserValue {\n"
res+=self.uservalue_.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
if self.has_referencevalue_:
res+=prefix+"ReferenceValue {\n"
res+=self.referencevalue_.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kint64Value = 1
kbooleanValue = 2
kstringValue = 3
kdoubleValue = 4
kPointValueGroup = 5
kPointValuex = 6
kPointValuey = 7
kUserValueGroup = 8
kUserValueemail = 9
kUserValueauth_domain = 10
kUserValuenickname = 11
kUserValuegaiaid = 18
kUserValueobfuscated_gaiaid = 19
kUserValuefederated_identity = 21
kUserValuefederated_provider = 22
kReferenceValueGroup = 12
kReferenceValueapp = 13
kReferenceValuename_space = 20
kReferenceValuePathElementGroup = 14
kReferenceValuePathElementtype = 15
kReferenceValuePathElementid = 16
kReferenceValuePathElementname = 17
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "int64Value",
2: "booleanValue",
3: "stringValue",
4: "doubleValue",
5: "PointValue",
6: "x",
7: "y",
8: "UserValue",
9: "email",
10: "auth_domain",
11: "nickname",
12: "ReferenceValue",
13: "app",
14: "PathElement",
15: "type",
16: "id",
17: "name",
18: "gaiaid",
19: "obfuscated_gaiaid",
20: "name_space",
21: "federated_identity",
22: "federated_provider",
}, 22)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.DOUBLE,
5: ProtocolBuffer.Encoder.STARTGROUP,
6: ProtocolBuffer.Encoder.DOUBLE,
7: ProtocolBuffer.Encoder.DOUBLE,
8: ProtocolBuffer.Encoder.STARTGROUP,
9: ProtocolBuffer.Encoder.STRING,
10: ProtocolBuffer.Encoder.STRING,
11: ProtocolBuffer.Encoder.STRING,
12: ProtocolBuffer.Encoder.STARTGROUP,
13: ProtocolBuffer.Encoder.STRING,
14: ProtocolBuffer.Encoder.STARTGROUP,
15: ProtocolBuffer.Encoder.STRING,
16: ProtocolBuffer.Encoder.NUMERIC,
17: ProtocolBuffer.Encoder.STRING,
18: ProtocolBuffer.Encoder.NUMERIC,
19: ProtocolBuffer.Encoder.STRING,
20: ProtocolBuffer.Encoder.STRING,
21: ProtocolBuffer.Encoder.STRING,
22: ProtocolBuffer.Encoder.STRING,
}, 22, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.PropertyValue'
class Property(ProtocolBuffer.ProtocolMessage):
BLOB = 14
TEXT = 15
BYTESTRING = 16
ATOM_CATEGORY = 1
ATOM_LINK = 2
ATOM_TITLE = 3
ATOM_CONTENT = 4
ATOM_SUMMARY = 5
ATOM_AUTHOR = 6
GD_WHEN = 7
GD_EMAIL = 8
GEORSS_POINT = 9
GD_IM = 10
GD_PHONENUMBER = 11
GD_POSTALADDRESS = 12
GD_RATING = 13
BLOBKEY = 17
_Meaning_NAMES = {
14: "BLOB",
15: "TEXT",
16: "BYTESTRING",
1: "ATOM_CATEGORY",
2: "ATOM_LINK",
3: "ATOM_TITLE",
4: "ATOM_CONTENT",
5: "ATOM_SUMMARY",
6: "ATOM_AUTHOR",
7: "GD_WHEN",
8: "GD_EMAIL",
9: "GEORSS_POINT",
10: "GD_IM",
11: "GD_PHONENUMBER",
12: "GD_POSTALADDRESS",
13: "GD_RATING",
17: "BLOBKEY",
}
def Meaning_Name(cls, x): return cls._Meaning_NAMES.get(x, "")
Meaning_Name = classmethod(Meaning_Name)
has_meaning_ = 0
meaning_ = 0
has_meaning_uri_ = 0
meaning_uri_ = ""
has_name_ = 0
name_ = ""
has_value_ = 0
has_multiple_ = 0
multiple_ = 0
def __init__(self, contents=None):
self.value_ = PropertyValue()
if contents is not None: self.MergeFromString(contents)
def meaning(self): return self.meaning_
def set_meaning(self, x):
self.has_meaning_ = 1
self.meaning_ = x
def clear_meaning(self):
if self.has_meaning_:
self.has_meaning_ = 0
self.meaning_ = 0
def has_meaning(self): return self.has_meaning_
def meaning_uri(self): return self.meaning_uri_
def set_meaning_uri(self, x):
self.has_meaning_uri_ = 1
self.meaning_uri_ = x
def clear_meaning_uri(self):
if self.has_meaning_uri_:
self.has_meaning_uri_ = 0
self.meaning_uri_ = ""
def has_meaning_uri(self): return self.has_meaning_uri_
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def value(self): return self.value_
def mutable_value(self): self.has_value_ = 1; return self.value_
def clear_value(self):self.has_value_ = 0; self.value_.Clear()
def has_value(self): return self.has_value_
def multiple(self): return self.multiple_
def set_multiple(self, x):
self.has_multiple_ = 1
self.multiple_ = x
def clear_multiple(self):
if self.has_multiple_:
self.has_multiple_ = 0
self.multiple_ = 0
def has_multiple(self): return self.has_multiple_
def MergeFrom(self, x):
assert x is not self
if (x.has_meaning()): self.set_meaning(x.meaning())
if (x.has_meaning_uri()): self.set_meaning_uri(x.meaning_uri())
if (x.has_name()): self.set_name(x.name())
if (x.has_value()): self.mutable_value().MergeFrom(x.value())
if (x.has_multiple()): self.set_multiple(x.multiple())
def Equals(self, x):
if x is self: return 1
if self.has_meaning_ != x.has_meaning_: return 0
if self.has_meaning_ and self.meaning_ != x.meaning_: return 0
if self.has_meaning_uri_ != x.has_meaning_uri_: return 0
if self.has_meaning_uri_ and self.meaning_uri_ != x.meaning_uri_: return 0
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
if self.has_multiple_ != x.has_multiple_: return 0
if self.has_multiple_ and self.multiple_ != x.multiple_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: name not set.')
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
elif not self.value_.IsInitialized(debug_strs): initialized = 0
if (not self.has_multiple_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: multiple not set.')
return initialized
def ByteSize(self):
n = 0
if (self.has_meaning_): n += 1 + self.lengthVarInt64(self.meaning_)
if (self.has_meaning_uri_): n += 1 + self.lengthString(len(self.meaning_uri_))
n += self.lengthString(len(self.name_))
n += self.lengthString(self.value_.ByteSize())
return n + 4
def ByteSizePartial(self):
n = 0
if (self.has_meaning_): n += 1 + self.lengthVarInt64(self.meaning_)
if (self.has_meaning_uri_): n += 1 + self.lengthString(len(self.meaning_uri_))
if (self.has_name_):
n += 1
n += self.lengthString(len(self.name_))
if (self.has_value_):
n += 1
n += self.lengthString(self.value_.ByteSizePartial())
if (self.has_multiple_):
n += 2
return n
def Clear(self):
self.clear_meaning()
self.clear_meaning_uri()
self.clear_name()
self.clear_value()
self.clear_multiple()
def OutputUnchecked(self, out):
if (self.has_meaning_):
out.putVarInt32(8)
out.putVarInt32(self.meaning_)
if (self.has_meaning_uri_):
out.putVarInt32(18)
out.putPrefixedString(self.meaning_uri_)
out.putVarInt32(26)
out.putPrefixedString(self.name_)
out.putVarInt32(32)
out.putBoolean(self.multiple_)
out.putVarInt32(42)
out.putVarInt32(self.value_.ByteSize())
self.value_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_meaning_):
out.putVarInt32(8)
out.putVarInt32(self.meaning_)
if (self.has_meaning_uri_):
out.putVarInt32(18)
out.putPrefixedString(self.meaning_uri_)
if (self.has_name_):
out.putVarInt32(26)
out.putPrefixedString(self.name_)
if (self.has_multiple_):
out.putVarInt32(32)
out.putBoolean(self.multiple_)
if (self.has_value_):
out.putVarInt32(42)
out.putVarInt32(self.value_.ByteSizePartial())
self.value_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_meaning(d.getVarInt32())
continue
if tt == 18:
self.set_meaning_uri(d.getPrefixedString())
continue
if tt == 26:
self.set_name(d.getPrefixedString())
continue
if tt == 32:
self.set_multiple(d.getBoolean())
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_value().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_meaning_: res+=prefix+("meaning: %s\n" % self.DebugFormatInt32(self.meaning_))
if self.has_meaning_uri_: res+=prefix+("meaning_uri: %s\n" % self.DebugFormatString(self.meaning_uri_))
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
if self.has_value_:
res+=prefix+"value <\n"
res+=self.value_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_multiple_: res+=prefix+("multiple: %s\n" % self.DebugFormatBool(self.multiple_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kmeaning = 1
kmeaning_uri = 2
kname = 3
kvalue = 5
kmultiple = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "meaning",
2: "meaning_uri",
3: "name",
4: "multiple",
5: "value",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.Property'
class Path_Element(ProtocolBuffer.ProtocolMessage):
has_type_ = 0
type_ = ""
has_id_ = 0
id_ = 0
has_name_ = 0
name_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def type(self): return self.type_
def set_type(self, x):
self.has_type_ = 1
self.type_ = x
def clear_type(self):
if self.has_type_:
self.has_type_ = 0
self.type_ = ""
def has_type(self): return self.has_type_
def id(self): return self.id_
def set_id(self, x):
self.has_id_ = 1
self.id_ = x
def clear_id(self):
if self.has_id_:
self.has_id_ = 0
self.id_ = 0
def has_id(self): return self.has_id_
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def MergeFrom(self, x):
assert x is not self
if (x.has_type()): self.set_type(x.type())
if (x.has_id()): self.set_id(x.id())
if (x.has_name()): self.set_name(x.name())
def Equals(self, x):
if x is self: return 1
if self.has_type_ != x.has_type_: return 0
if self.has_type_ and self.type_ != x.type_: return 0
if self.has_id_ != x.has_id_: return 0
if self.has_id_ and self.id_ != x.id_: return 0
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_type_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: type not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.type_))
if (self.has_id_): n += 1 + self.lengthVarInt64(self.id_)
if (self.has_name_): n += 1 + self.lengthString(len(self.name_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_type_):
n += 1
n += self.lengthString(len(self.type_))
if (self.has_id_): n += 1 + self.lengthVarInt64(self.id_)
if (self.has_name_): n += 1 + self.lengthString(len(self.name_))
return n
def Clear(self):
self.clear_type()
self.clear_id()
self.clear_name()
def OutputUnchecked(self, out):
out.putVarInt32(18)
out.putPrefixedString(self.type_)
if (self.has_id_):
out.putVarInt32(24)
out.putVarInt64(self.id_)
if (self.has_name_):
out.putVarInt32(34)
out.putPrefixedString(self.name_)
def OutputPartial(self, out):
if (self.has_type_):
out.putVarInt32(18)
out.putPrefixedString(self.type_)
if (self.has_id_):
out.putVarInt32(24)
out.putVarInt64(self.id_)
if (self.has_name_):
out.putVarInt32(34)
out.putPrefixedString(self.name_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 12: break
if tt == 18:
self.set_type(d.getPrefixedString())
continue
if tt == 24:
self.set_id(d.getVarInt64())
continue
if tt == 34:
self.set_name(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_type_: res+=prefix+("type: %s\n" % self.DebugFormatString(self.type_))
if self.has_id_: res+=prefix+("id: %s\n" % self.DebugFormatInt64(self.id_))
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
return res
class Path(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.element_ = []
if contents is not None: self.MergeFromString(contents)
def element_size(self): return len(self.element_)
def element_list(self): return self.element_
def element(self, i):
return self.element_[i]
def mutable_element(self, i):
return self.element_[i]
def add_element(self):
x = Path_Element()
self.element_.append(x)
return x
def clear_element(self):
self.element_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.element_size()): self.add_element().CopyFrom(x.element(i))
def Equals(self, x):
if x is self: return 1
if len(self.element_) != len(x.element_): return 0
for e1, e2 in zip(self.element_, x.element_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.element_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 2 * len(self.element_)
for i in xrange(len(self.element_)): n += self.element_[i].ByteSize()
return n
def ByteSizePartial(self):
n = 0
n += 2 * len(self.element_)
for i in xrange(len(self.element_)): n += self.element_[i].ByteSizePartial()
return n
def Clear(self):
self.clear_element()
def OutputUnchecked(self, out):
for i in xrange(len(self.element_)):
out.putVarInt32(11)
self.element_[i].OutputUnchecked(out)
out.putVarInt32(12)
def OutputPartial(self, out):
for i in xrange(len(self.element_)):
out.putVarInt32(11)
self.element_[i].OutputPartial(out)
out.putVarInt32(12)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 11:
self.add_element().TryMerge(d)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.element_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("Element%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kElementGroup = 1
kElementtype = 2
kElementid = 3
kElementname = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "Element",
2: "type",
3: "id",
4: "name",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STARTGROUP,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.STRING,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.Path'
class Reference(ProtocolBuffer.ProtocolMessage):
has_app_ = 0
app_ = ""
has_name_space_ = 0
name_space_ = ""
has_path_ = 0
def __init__(self, contents=None):
self.path_ = Path()
if contents is not None: self.MergeFromString(contents)
def app(self): return self.app_
def set_app(self, x):
self.has_app_ = 1
self.app_ = x
def clear_app(self):
if self.has_app_:
self.has_app_ = 0
self.app_ = ""
def has_app(self): return self.has_app_
def name_space(self): return self.name_space_
def set_name_space(self, x):
self.has_name_space_ = 1
self.name_space_ = x
def clear_name_space(self):
if self.has_name_space_:
self.has_name_space_ = 0
self.name_space_ = ""
def has_name_space(self): return self.has_name_space_
def path(self): return self.path_
def mutable_path(self): self.has_path_ = 1; return self.path_
def clear_path(self):self.has_path_ = 0; self.path_.Clear()
def has_path(self): return self.has_path_
def MergeFrom(self, x):
assert x is not self
if (x.has_app()): self.set_app(x.app())
if (x.has_name_space()): self.set_name_space(x.name_space())
if (x.has_path()): self.mutable_path().MergeFrom(x.path())
def Equals(self, x):
if x is self: return 1
if self.has_app_ != x.has_app_: return 0
if self.has_app_ and self.app_ != x.app_: return 0
if self.has_name_space_ != x.has_name_space_: return 0
if self.has_name_space_ and self.name_space_ != x.name_space_: return 0
if self.has_path_ != x.has_path_: return 0
if self.has_path_ and self.path_ != x.path_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_app_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app not set.')
if (not self.has_path_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: path not set.')
elif not self.path_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.app_))
if (self.has_name_space_): n += 2 + self.lengthString(len(self.name_space_))
n += self.lengthString(self.path_.ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_app_):
n += 1
n += self.lengthString(len(self.app_))
if (self.has_name_space_): n += 2 + self.lengthString(len(self.name_space_))
if (self.has_path_):
n += 1
n += self.lengthString(self.path_.ByteSizePartial())
return n
def Clear(self):
self.clear_app()
self.clear_name_space()
self.clear_path()
def OutputUnchecked(self, out):
out.putVarInt32(106)
out.putPrefixedString(self.app_)
out.putVarInt32(114)
out.putVarInt32(self.path_.ByteSize())
self.path_.OutputUnchecked(out)
if (self.has_name_space_):
out.putVarInt32(162)
out.putPrefixedString(self.name_space_)
def OutputPartial(self, out):
if (self.has_app_):
out.putVarInt32(106)
out.putPrefixedString(self.app_)
if (self.has_path_):
out.putVarInt32(114)
out.putVarInt32(self.path_.ByteSizePartial())
self.path_.OutputPartial(out)
if (self.has_name_space_):
out.putVarInt32(162)
out.putPrefixedString(self.name_space_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 106:
self.set_app(d.getPrefixedString())
continue
if tt == 114:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_path().TryMerge(tmp)
continue
if tt == 162:
self.set_name_space(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_: res+=prefix+("app: %s\n" % self.DebugFormatString(self.app_))
if self.has_name_space_: res+=prefix+("name_space: %s\n" % self.DebugFormatString(self.name_space_))
if self.has_path_:
res+=prefix+"path <\n"
res+=self.path_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kapp = 13
kname_space = 20
kpath = 14
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
13: "app",
14: "path",
20: "name_space",
}, 20)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
13: ProtocolBuffer.Encoder.STRING,
14: ProtocolBuffer.Encoder.STRING,
20: ProtocolBuffer.Encoder.STRING,
}, 20, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.Reference'
class User(ProtocolBuffer.ProtocolMessage):
has_email_ = 0
email_ = ""
has_auth_domain_ = 0
auth_domain_ = ""
has_nickname_ = 0
nickname_ = ""
has_gaiaid_ = 0
gaiaid_ = 0
has_obfuscated_gaiaid_ = 0
obfuscated_gaiaid_ = ""
has_federated_identity_ = 0
federated_identity_ = ""
has_federated_provider_ = 0
federated_provider_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def email(self): return self.email_
def set_email(self, x):
self.has_email_ = 1
self.email_ = x
def clear_email(self):
if self.has_email_:
self.has_email_ = 0
self.email_ = ""
def has_email(self): return self.has_email_
def auth_domain(self): return self.auth_domain_
def set_auth_domain(self, x):
self.has_auth_domain_ = 1
self.auth_domain_ = x
def clear_auth_domain(self):
if self.has_auth_domain_:
self.has_auth_domain_ = 0
self.auth_domain_ = ""
def has_auth_domain(self): return self.has_auth_domain_
def nickname(self): return self.nickname_
def set_nickname(self, x):
self.has_nickname_ = 1
self.nickname_ = x
def clear_nickname(self):
if self.has_nickname_:
self.has_nickname_ = 0
self.nickname_ = ""
def has_nickname(self): return self.has_nickname_
def gaiaid(self): return self.gaiaid_
def set_gaiaid(self, x):
self.has_gaiaid_ = 1
self.gaiaid_ = x
def clear_gaiaid(self):
if self.has_gaiaid_:
self.has_gaiaid_ = 0
self.gaiaid_ = 0
def has_gaiaid(self): return self.has_gaiaid_
def obfuscated_gaiaid(self): return self.obfuscated_gaiaid_
def set_obfuscated_gaiaid(self, x):
self.has_obfuscated_gaiaid_ = 1
self.obfuscated_gaiaid_ = x
def clear_obfuscated_gaiaid(self):
if self.has_obfuscated_gaiaid_:
self.has_obfuscated_gaiaid_ = 0
self.obfuscated_gaiaid_ = ""
def has_obfuscated_gaiaid(self): return self.has_obfuscated_gaiaid_
def federated_identity(self): return self.federated_identity_
def set_federated_identity(self, x):
self.has_federated_identity_ = 1
self.federated_identity_ = x
def clear_federated_identity(self):
if self.has_federated_identity_:
self.has_federated_identity_ = 0
self.federated_identity_ = ""
def has_federated_identity(self): return self.has_federated_identity_
def federated_provider(self): return self.federated_provider_
def set_federated_provider(self, x):
self.has_federated_provider_ = 1
self.federated_provider_ = x
def clear_federated_provider(self):
if self.has_federated_provider_:
self.has_federated_provider_ = 0
self.federated_provider_ = ""
def has_federated_provider(self): return self.has_federated_provider_
def MergeFrom(self, x):
assert x is not self
if (x.has_email()): self.set_email(x.email())
if (x.has_auth_domain()): self.set_auth_domain(x.auth_domain())
if (x.has_nickname()): self.set_nickname(x.nickname())
if (x.has_gaiaid()): self.set_gaiaid(x.gaiaid())
if (x.has_obfuscated_gaiaid()): self.set_obfuscated_gaiaid(x.obfuscated_gaiaid())
if (x.has_federated_identity()): self.set_federated_identity(x.federated_identity())
if (x.has_federated_provider()): self.set_federated_provider(x.federated_provider())
def Equals(self, x):
if x is self: return 1
if self.has_email_ != x.has_email_: return 0
if self.has_email_ and self.email_ != x.email_: return 0
if self.has_auth_domain_ != x.has_auth_domain_: return 0
if self.has_auth_domain_ and self.auth_domain_ != x.auth_domain_: return 0
if self.has_nickname_ != x.has_nickname_: return 0
if self.has_nickname_ and self.nickname_ != x.nickname_: return 0
if self.has_gaiaid_ != x.has_gaiaid_: return 0
if self.has_gaiaid_ and self.gaiaid_ != x.gaiaid_: return 0
if self.has_obfuscated_gaiaid_ != x.has_obfuscated_gaiaid_: return 0
if self.has_obfuscated_gaiaid_ and self.obfuscated_gaiaid_ != x.obfuscated_gaiaid_: return 0
if self.has_federated_identity_ != x.has_federated_identity_: return 0
if self.has_federated_identity_ and self.federated_identity_ != x.federated_identity_: return 0
if self.has_federated_provider_ != x.has_federated_provider_: return 0
if self.has_federated_provider_ and self.federated_provider_ != x.federated_provider_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_email_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: email not set.')
if (not self.has_auth_domain_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: auth_domain not set.')
if (not self.has_gaiaid_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: gaiaid not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.email_))
n += self.lengthString(len(self.auth_domain_))
if (self.has_nickname_): n += 1 + self.lengthString(len(self.nickname_))
n += self.lengthVarInt64(self.gaiaid_)
if (self.has_obfuscated_gaiaid_): n += 1 + self.lengthString(len(self.obfuscated_gaiaid_))
if (self.has_federated_identity_): n += 1 + self.lengthString(len(self.federated_identity_))
if (self.has_federated_provider_): n += 1 + self.lengthString(len(self.federated_provider_))
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_email_):
n += 1
n += self.lengthString(len(self.email_))
if (self.has_auth_domain_):
n += 1
n += self.lengthString(len(self.auth_domain_))
if (self.has_nickname_): n += 1 + self.lengthString(len(self.nickname_))
if (self.has_gaiaid_):
n += 1
n += self.lengthVarInt64(self.gaiaid_)
if (self.has_obfuscated_gaiaid_): n += 1 + self.lengthString(len(self.obfuscated_gaiaid_))
if (self.has_federated_identity_): n += 1 + self.lengthString(len(self.federated_identity_))
if (self.has_federated_provider_): n += 1 + self.lengthString(len(self.federated_provider_))
return n
def Clear(self):
self.clear_email()
self.clear_auth_domain()
self.clear_nickname()
self.clear_gaiaid()
self.clear_obfuscated_gaiaid()
self.clear_federated_identity()
self.clear_federated_provider()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.email_)
out.putVarInt32(18)
out.putPrefixedString(self.auth_domain_)
if (self.has_nickname_):
out.putVarInt32(26)
out.putPrefixedString(self.nickname_)
out.putVarInt32(32)
out.putVarInt64(self.gaiaid_)
if (self.has_obfuscated_gaiaid_):
out.putVarInt32(42)
out.putPrefixedString(self.obfuscated_gaiaid_)
if (self.has_federated_identity_):
out.putVarInt32(50)
out.putPrefixedString(self.federated_identity_)
if (self.has_federated_provider_):
out.putVarInt32(58)
out.putPrefixedString(self.federated_provider_)
def OutputPartial(self, out):
if (self.has_email_):
out.putVarInt32(10)
out.putPrefixedString(self.email_)
if (self.has_auth_domain_):
out.putVarInt32(18)
out.putPrefixedString(self.auth_domain_)
if (self.has_nickname_):
out.putVarInt32(26)
out.putPrefixedString(self.nickname_)
if (self.has_gaiaid_):
out.putVarInt32(32)
out.putVarInt64(self.gaiaid_)
if (self.has_obfuscated_gaiaid_):
out.putVarInt32(42)
out.putPrefixedString(self.obfuscated_gaiaid_)
if (self.has_federated_identity_):
out.putVarInt32(50)
out.putPrefixedString(self.federated_identity_)
if (self.has_federated_provider_):
out.putVarInt32(58)
out.putPrefixedString(self.federated_provider_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_email(d.getPrefixedString())
continue
if tt == 18:
self.set_auth_domain(d.getPrefixedString())
continue
if tt == 26:
self.set_nickname(d.getPrefixedString())
continue
if tt == 32:
self.set_gaiaid(d.getVarInt64())
continue
if tt == 42:
self.set_obfuscated_gaiaid(d.getPrefixedString())
continue
if tt == 50:
self.set_federated_identity(d.getPrefixedString())
continue
if tt == 58:
self.set_federated_provider(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_email_: res+=prefix+("email: %s\n" % self.DebugFormatString(self.email_))
if self.has_auth_domain_: res+=prefix+("auth_domain: %s\n" % self.DebugFormatString(self.auth_domain_))
if self.has_nickname_: res+=prefix+("nickname: %s\n" % self.DebugFormatString(self.nickname_))
if self.has_gaiaid_: res+=prefix+("gaiaid: %s\n" % self.DebugFormatInt64(self.gaiaid_))
if self.has_obfuscated_gaiaid_: res+=prefix+("obfuscated_gaiaid: %s\n" % self.DebugFormatString(self.obfuscated_gaiaid_))
if self.has_federated_identity_: res+=prefix+("federated_identity: %s\n" % self.DebugFormatString(self.federated_identity_))
if self.has_federated_provider_: res+=prefix+("federated_provider: %s\n" % self.DebugFormatString(self.federated_provider_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kemail = 1
kauth_domain = 2
knickname = 3
kgaiaid = 4
kobfuscated_gaiaid = 5
kfederated_identity = 6
kfederated_provider = 7
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "email",
2: "auth_domain",
3: "nickname",
4: "gaiaid",
5: "obfuscated_gaiaid",
6: "federated_identity",
7: "federated_provider",
}, 7)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.STRING,
7: ProtocolBuffer.Encoder.STRING,
}, 7, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.User'
class EntityProto(ProtocolBuffer.ProtocolMessage):
GD_CONTACT = 1
GD_EVENT = 2
GD_MESSAGE = 3
_Kind_NAMES = {
1: "GD_CONTACT",
2: "GD_EVENT",
3: "GD_MESSAGE",
}
def Kind_Name(cls, x): return cls._Kind_NAMES.get(x, "")
Kind_Name = classmethod(Kind_Name)
has_key_ = 0
has_entity_group_ = 0
has_owner_ = 0
owner_ = None
has_kind_ = 0
kind_ = 0
has_kind_uri_ = 0
kind_uri_ = ""
def __init__(self, contents=None):
self.key_ = Reference()
self.entity_group_ = Path()
self.property_ = []
self.raw_property_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def key(self): return self.key_
def mutable_key(self): self.has_key_ = 1; return self.key_
def clear_key(self):self.has_key_ = 0; self.key_.Clear()
def has_key(self): return self.has_key_
def entity_group(self): return self.entity_group_
def mutable_entity_group(self): self.has_entity_group_ = 1; return self.entity_group_
def clear_entity_group(self):self.has_entity_group_ = 0; self.entity_group_.Clear()
def has_entity_group(self): return self.has_entity_group_
def owner(self):
if self.owner_ is None:
self.lazy_init_lock_.acquire()
try:
if self.owner_ is None: self.owner_ = User()
finally:
self.lazy_init_lock_.release()
return self.owner_
def mutable_owner(self): self.has_owner_ = 1; return self.owner()
def clear_owner(self):
if self.has_owner_:
self.has_owner_ = 0;
if self.owner_ is not None: self.owner_.Clear()
def has_owner(self): return self.has_owner_
def kind(self): return self.kind_
def set_kind(self, x):
self.has_kind_ = 1
self.kind_ = x
def clear_kind(self):
if self.has_kind_:
self.has_kind_ = 0
self.kind_ = 0
def has_kind(self): return self.has_kind_
def kind_uri(self): return self.kind_uri_
def set_kind_uri(self, x):
self.has_kind_uri_ = 1
self.kind_uri_ = x
def clear_kind_uri(self):
if self.has_kind_uri_:
self.has_kind_uri_ = 0
self.kind_uri_ = ""
def has_kind_uri(self): return self.has_kind_uri_
def property_size(self): return len(self.property_)
def property_list(self): return self.property_
def property(self, i):
return self.property_[i]
def mutable_property(self, i):
return self.property_[i]
def add_property(self):
x = Property()
self.property_.append(x)
return x
def clear_property(self):
self.property_ = []
def raw_property_size(self): return len(self.raw_property_)
def raw_property_list(self): return self.raw_property_
def raw_property(self, i):
return self.raw_property_[i]
def mutable_raw_property(self, i):
return self.raw_property_[i]
def add_raw_property(self):
x = Property()
self.raw_property_.append(x)
return x
def clear_raw_property(self):
self.raw_property_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_key()): self.mutable_key().MergeFrom(x.key())
if (x.has_entity_group()): self.mutable_entity_group().MergeFrom(x.entity_group())
if (x.has_owner()): self.mutable_owner().MergeFrom(x.owner())
if (x.has_kind()): self.set_kind(x.kind())
if (x.has_kind_uri()): self.set_kind_uri(x.kind_uri())
for i in xrange(x.property_size()): self.add_property().CopyFrom(x.property(i))
for i in xrange(x.raw_property_size()): self.add_raw_property().CopyFrom(x.raw_property(i))
def Equals(self, x):
if x is self: return 1
if self.has_key_ != x.has_key_: return 0
if self.has_key_ and self.key_ != x.key_: return 0
if self.has_entity_group_ != x.has_entity_group_: return 0
if self.has_entity_group_ and self.entity_group_ != x.entity_group_: return 0
if self.has_owner_ != x.has_owner_: return 0
if self.has_owner_ and self.owner_ != x.owner_: return 0
if self.has_kind_ != x.has_kind_: return 0
if self.has_kind_ and self.kind_ != x.kind_: return 0
if self.has_kind_uri_ != x.has_kind_uri_: return 0
if self.has_kind_uri_ and self.kind_uri_ != x.kind_uri_: return 0
if len(self.property_) != len(x.property_): return 0
for e1, e2 in zip(self.property_, x.property_):
if e1 != e2: return 0
if len(self.raw_property_) != len(x.raw_property_): return 0
for e1, e2 in zip(self.raw_property_, x.raw_property_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: key not set.')
elif not self.key_.IsInitialized(debug_strs): initialized = 0
if (not self.has_entity_group_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: entity_group not set.')
elif not self.entity_group_.IsInitialized(debug_strs): initialized = 0
if (self.has_owner_ and not self.owner_.IsInitialized(debug_strs)): initialized = 0
for p in self.property_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.raw_property_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.key_.ByteSize())
n += self.lengthString(self.entity_group_.ByteSize())
if (self.has_owner_): n += 2 + self.lengthString(self.owner_.ByteSize())
if (self.has_kind_): n += 1 + self.lengthVarInt64(self.kind_)
if (self.has_kind_uri_): n += 1 + self.lengthString(len(self.kind_uri_))
n += 1 * len(self.property_)
for i in xrange(len(self.property_)): n += self.lengthString(self.property_[i].ByteSize())
n += 1 * len(self.raw_property_)
for i in xrange(len(self.raw_property_)): n += self.lengthString(self.raw_property_[i].ByteSize())
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_key_):
n += 1
n += self.lengthString(self.key_.ByteSizePartial())
if (self.has_entity_group_):
n += 2
n += self.lengthString(self.entity_group_.ByteSizePartial())
if (self.has_owner_): n += 2 + self.lengthString(self.owner_.ByteSizePartial())
if (self.has_kind_): n += 1 + self.lengthVarInt64(self.kind_)
if (self.has_kind_uri_): n += 1 + self.lengthString(len(self.kind_uri_))
n += 1 * len(self.property_)
for i in xrange(len(self.property_)): n += self.lengthString(self.property_[i].ByteSizePartial())
n += 1 * len(self.raw_property_)
for i in xrange(len(self.raw_property_)): n += self.lengthString(self.raw_property_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_key()
self.clear_entity_group()
self.clear_owner()
self.clear_kind()
self.clear_kind_uri()
self.clear_property()
self.clear_raw_property()
def OutputUnchecked(self, out):
if (self.has_kind_):
out.putVarInt32(32)
out.putVarInt32(self.kind_)
if (self.has_kind_uri_):
out.putVarInt32(42)
out.putPrefixedString(self.kind_uri_)
out.putVarInt32(106)
out.putVarInt32(self.key_.ByteSize())
self.key_.OutputUnchecked(out)
for i in xrange(len(self.property_)):
out.putVarInt32(114)
out.putVarInt32(self.property_[i].ByteSize())
self.property_[i].OutputUnchecked(out)
for i in xrange(len(self.raw_property_)):
out.putVarInt32(122)
out.putVarInt32(self.raw_property_[i].ByteSize())
self.raw_property_[i].OutputUnchecked(out)
out.putVarInt32(130)
out.putVarInt32(self.entity_group_.ByteSize())
self.entity_group_.OutputUnchecked(out)
if (self.has_owner_):
out.putVarInt32(138)
out.putVarInt32(self.owner_.ByteSize())
self.owner_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_kind_):
out.putVarInt32(32)
out.putVarInt32(self.kind_)
if (self.has_kind_uri_):
out.putVarInt32(42)
out.putPrefixedString(self.kind_uri_)
if (self.has_key_):
out.putVarInt32(106)
out.putVarInt32(self.key_.ByteSizePartial())
self.key_.OutputPartial(out)
for i in xrange(len(self.property_)):
out.putVarInt32(114)
out.putVarInt32(self.property_[i].ByteSizePartial())
self.property_[i].OutputPartial(out)
for i in xrange(len(self.raw_property_)):
out.putVarInt32(122)
out.putVarInt32(self.raw_property_[i].ByteSizePartial())
self.raw_property_[i].OutputPartial(out)
if (self.has_entity_group_):
out.putVarInt32(130)
out.putVarInt32(self.entity_group_.ByteSizePartial())
self.entity_group_.OutputPartial(out)
if (self.has_owner_):
out.putVarInt32(138)
out.putVarInt32(self.owner_.ByteSizePartial())
self.owner_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 32:
self.set_kind(d.getVarInt32())
continue
if tt == 42:
self.set_kind_uri(d.getPrefixedString())
continue
if tt == 106:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_key().TryMerge(tmp)
continue
if tt == 114:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_property().TryMerge(tmp)
continue
if tt == 122:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_raw_property().TryMerge(tmp)
continue
if tt == 130:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_entity_group().TryMerge(tmp)
continue
if tt == 138:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_owner().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_key_:
res+=prefix+"key <\n"
res+=self.key_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_entity_group_:
res+=prefix+"entity_group <\n"
res+=self.entity_group_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_owner_:
res+=prefix+"owner <\n"
res+=self.owner_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_kind_: res+=prefix+("kind: %s\n" % self.DebugFormatInt32(self.kind_))
if self.has_kind_uri_: res+=prefix+("kind_uri: %s\n" % self.DebugFormatString(self.kind_uri_))
cnt=0
for e in self.property_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("property%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.raw_property_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("raw_property%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kkey = 13
kentity_group = 16
kowner = 17
kkind = 4
kkind_uri = 5
kproperty = 14
kraw_property = 15
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
4: "kind",
5: "kind_uri",
13: "key",
14: "property",
15: "raw_property",
16: "entity_group",
17: "owner",
}, 17)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
13: ProtocolBuffer.Encoder.STRING,
14: ProtocolBuffer.Encoder.STRING,
15: ProtocolBuffer.Encoder.STRING,
16: ProtocolBuffer.Encoder.STRING,
17: ProtocolBuffer.Encoder.STRING,
}, 17, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.EntityProto'
class CompositeProperty(ProtocolBuffer.ProtocolMessage):
has_index_id_ = 0
index_id_ = 0
def __init__(self, contents=None):
self.value_ = []
if contents is not None: self.MergeFromString(contents)
def index_id(self): return self.index_id_
def set_index_id(self, x):
self.has_index_id_ = 1
self.index_id_ = x
def clear_index_id(self):
if self.has_index_id_:
self.has_index_id_ = 0
self.index_id_ = 0
def has_index_id(self): return self.has_index_id_
def value_size(self): return len(self.value_)
def value_list(self): return self.value_
def value(self, i):
return self.value_[i]
def set_value(self, i, x):
self.value_[i] = x
def add_value(self, x):
self.value_.append(x)
def clear_value(self):
self.value_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_index_id()): self.set_index_id(x.index_id())
for i in xrange(x.value_size()): self.add_value(x.value(i))
def Equals(self, x):
if x is self: return 1
if self.has_index_id_ != x.has_index_id_: return 0
if self.has_index_id_ and self.index_id_ != x.index_id_: return 0
if len(self.value_) != len(x.value_): return 0
for e1, e2 in zip(self.value_, x.value_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_index_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: index_id not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.index_id_)
n += 1 * len(self.value_)
for i in xrange(len(self.value_)): n += self.lengthString(len(self.value_[i]))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_index_id_):
n += 1
n += self.lengthVarInt64(self.index_id_)
n += 1 * len(self.value_)
for i in xrange(len(self.value_)): n += self.lengthString(len(self.value_[i]))
return n
def Clear(self):
self.clear_index_id()
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt64(self.index_id_)
for i in xrange(len(self.value_)):
out.putVarInt32(18)
out.putPrefixedString(self.value_[i])
def OutputPartial(self, out):
if (self.has_index_id_):
out.putVarInt32(8)
out.putVarInt64(self.index_id_)
for i in xrange(len(self.value_)):
out.putVarInt32(18)
out.putPrefixedString(self.value_[i])
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_index_id(d.getVarInt64())
continue
if tt == 18:
self.add_value(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_index_id_: res+=prefix+("index_id: %s\n" % self.DebugFormatInt64(self.index_id_))
cnt=0
for e in self.value_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("value%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kindex_id = 1
kvalue = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "index_id",
2: "value",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.CompositeProperty'
class Index_Property(ProtocolBuffer.ProtocolMessage):
ASCENDING = 1
DESCENDING = 2
_Direction_NAMES = {
1: "ASCENDING",
2: "DESCENDING",
}
def Direction_Name(cls, x): return cls._Direction_NAMES.get(x, "")
Direction_Name = classmethod(Direction_Name)
has_name_ = 0
name_ = ""
has_direction_ = 0
direction_ = 1
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def direction(self): return self.direction_
def set_direction(self, x):
self.has_direction_ = 1
self.direction_ = x
def clear_direction(self):
if self.has_direction_:
self.has_direction_ = 0
self.direction_ = 1
def has_direction(self): return self.has_direction_
def MergeFrom(self, x):
assert x is not self
if (x.has_name()): self.set_name(x.name())
if (x.has_direction()): self.set_direction(x.direction())
def Equals(self, x):
if x is self: return 1
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
if self.has_direction_ != x.has_direction_: return 0
if self.has_direction_ and self.direction_ != x.direction_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: name not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.name_))
if (self.has_direction_): n += 1 + self.lengthVarInt64(self.direction_)
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_name_):
n += 1
n += self.lengthString(len(self.name_))
if (self.has_direction_): n += 1 + self.lengthVarInt64(self.direction_)
return n
def Clear(self):
self.clear_name()
self.clear_direction()
def OutputUnchecked(self, out):
out.putVarInt32(26)
out.putPrefixedString(self.name_)
if (self.has_direction_):
out.putVarInt32(32)
out.putVarInt32(self.direction_)
def OutputPartial(self, out):
if (self.has_name_):
out.putVarInt32(26)
out.putPrefixedString(self.name_)
if (self.has_direction_):
out.putVarInt32(32)
out.putVarInt32(self.direction_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 20: break
if tt == 26:
self.set_name(d.getPrefixedString())
continue
if tt == 32:
self.set_direction(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
if self.has_direction_: res+=prefix+("direction: %s\n" % self.DebugFormatInt32(self.direction_))
return res
class Index(ProtocolBuffer.ProtocolMessage):
has_entity_type_ = 0
entity_type_ = ""
has_ancestor_ = 0
ancestor_ = 0
def __init__(self, contents=None):
self.property_ = []
if contents is not None: self.MergeFromString(contents)
def entity_type(self): return self.entity_type_
def set_entity_type(self, x):
self.has_entity_type_ = 1
self.entity_type_ = x
def clear_entity_type(self):
if self.has_entity_type_:
self.has_entity_type_ = 0
self.entity_type_ = ""
def has_entity_type(self): return self.has_entity_type_
def ancestor(self): return self.ancestor_
def set_ancestor(self, x):
self.has_ancestor_ = 1
self.ancestor_ = x
def clear_ancestor(self):
if self.has_ancestor_:
self.has_ancestor_ = 0
self.ancestor_ = 0
def has_ancestor(self): return self.has_ancestor_
def property_size(self): return len(self.property_)
def property_list(self): return self.property_
def property(self, i):
return self.property_[i]
def mutable_property(self, i):
return self.property_[i]
def add_property(self):
x = Index_Property()
self.property_.append(x)
return x
def clear_property(self):
self.property_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_entity_type()): self.set_entity_type(x.entity_type())
if (x.has_ancestor()): self.set_ancestor(x.ancestor())
for i in xrange(x.property_size()): self.add_property().CopyFrom(x.property(i))
def Equals(self, x):
if x is self: return 1
if self.has_entity_type_ != x.has_entity_type_: return 0
if self.has_entity_type_ and self.entity_type_ != x.entity_type_: return 0
if self.has_ancestor_ != x.has_ancestor_: return 0
if self.has_ancestor_ and self.ancestor_ != x.ancestor_: return 0
if len(self.property_) != len(x.property_): return 0
for e1, e2 in zip(self.property_, x.property_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_entity_type_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: entity_type not set.')
if (not self.has_ancestor_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: ancestor not set.')
for p in self.property_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.entity_type_))
n += 2 * len(self.property_)
for i in xrange(len(self.property_)): n += self.property_[i].ByteSize()
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_entity_type_):
n += 1
n += self.lengthString(len(self.entity_type_))
if (self.has_ancestor_):
n += 2
n += 2 * len(self.property_)
for i in xrange(len(self.property_)): n += self.property_[i].ByteSizePartial()
return n
def Clear(self):
self.clear_entity_type()
self.clear_ancestor()
self.clear_property()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.entity_type_)
for i in xrange(len(self.property_)):
out.putVarInt32(19)
self.property_[i].OutputUnchecked(out)
out.putVarInt32(20)
out.putVarInt32(40)
out.putBoolean(self.ancestor_)
def OutputPartial(self, out):
if (self.has_entity_type_):
out.putVarInt32(10)
out.putPrefixedString(self.entity_type_)
for i in xrange(len(self.property_)):
out.putVarInt32(19)
self.property_[i].OutputPartial(out)
out.putVarInt32(20)
if (self.has_ancestor_):
out.putVarInt32(40)
out.putBoolean(self.ancestor_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_entity_type(d.getPrefixedString())
continue
if tt == 19:
self.add_property().TryMerge(d)
continue
if tt == 40:
self.set_ancestor(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_entity_type_: res+=prefix+("entity_type: %s\n" % self.DebugFormatString(self.entity_type_))
if self.has_ancestor_: res+=prefix+("ancestor: %s\n" % self.DebugFormatBool(self.ancestor_))
cnt=0
for e in self.property_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("Property%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kentity_type = 1
kancestor = 5
kPropertyGroup = 2
kPropertyname = 3
kPropertydirection = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "entity_type",
2: "Property",
3: "name",
4: "direction",
5: "ancestor",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STARTGROUP,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.NUMERIC,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.Index'
class CompositeIndex(ProtocolBuffer.ProtocolMessage):
WRITE_ONLY = 1
READ_WRITE = 2
DELETED = 3
ERROR = 4
_State_NAMES = {
1: "WRITE_ONLY",
2: "READ_WRITE",
3: "DELETED",
4: "ERROR",
}
def State_Name(cls, x): return cls._State_NAMES.get(x, "")
State_Name = classmethod(State_Name)
has_app_id_ = 0
app_id_ = ""
has_id_ = 0
id_ = 0
has_definition_ = 0
has_state_ = 0
state_ = 0
def __init__(self, contents=None):
self.definition_ = Index()
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def id(self): return self.id_
def set_id(self, x):
self.has_id_ = 1
self.id_ = x
def clear_id(self):
if self.has_id_:
self.has_id_ = 0
self.id_ = 0
def has_id(self): return self.has_id_
def definition(self): return self.definition_
def mutable_definition(self): self.has_definition_ = 1; return self.definition_
def clear_definition(self):self.has_definition_ = 0; self.definition_.Clear()
def has_definition(self): return self.has_definition_
def state(self): return self.state_
def set_state(self, x):
self.has_state_ = 1
self.state_ = x
def clear_state(self):
if self.has_state_:
self.has_state_ = 0
self.state_ = 0
def has_state(self): return self.has_state_
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
if (x.has_id()): self.set_id(x.id())
if (x.has_definition()): self.mutable_definition().MergeFrom(x.definition())
if (x.has_state()): self.set_state(x.state())
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if self.has_id_ != x.has_id_: return 0
if self.has_id_ and self.id_ != x.id_: return 0
if self.has_definition_ != x.has_definition_: return 0
if self.has_definition_ and self.definition_ != x.definition_: return 0
if self.has_state_ != x.has_state_: return 0
if self.has_state_ and self.state_ != x.state_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_app_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app_id not set.')
if (not self.has_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: id not set.')
if (not self.has_definition_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: definition not set.')
elif not self.definition_.IsInitialized(debug_strs): initialized = 0
if (not self.has_state_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: state not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.app_id_))
n += self.lengthVarInt64(self.id_)
n += self.lengthString(self.definition_.ByteSize())
n += self.lengthVarInt64(self.state_)
return n + 4
def ByteSizePartial(self):
n = 0
if (self.has_app_id_):
n += 1
n += self.lengthString(len(self.app_id_))
if (self.has_id_):
n += 1
n += self.lengthVarInt64(self.id_)
if (self.has_definition_):
n += 1
n += self.lengthString(self.definition_.ByteSizePartial())
if (self.has_state_):
n += 1
n += self.lengthVarInt64(self.state_)
return n
def Clear(self):
self.clear_app_id()
self.clear_id()
self.clear_definition()
self.clear_state()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
out.putVarInt32(16)
out.putVarInt64(self.id_)
out.putVarInt32(26)
out.putVarInt32(self.definition_.ByteSize())
self.definition_.OutputUnchecked(out)
out.putVarInt32(32)
out.putVarInt32(self.state_)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
if (self.has_id_):
out.putVarInt32(16)
out.putVarInt64(self.id_)
if (self.has_definition_):
out.putVarInt32(26)
out.putVarInt32(self.definition_.ByteSizePartial())
self.definition_.OutputPartial(out)
if (self.has_state_):
out.putVarInt32(32)
out.putVarInt32(self.state_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
if tt == 16:
self.set_id(d.getVarInt64())
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_definition().TryMerge(tmp)
continue
if tt == 32:
self.set_state(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
if self.has_id_: res+=prefix+("id: %s\n" % self.DebugFormatInt64(self.id_))
if self.has_definition_:
res+=prefix+"definition <\n"
res+=self.definition_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_state_: res+=prefix+("state: %s\n" % self.DebugFormatInt32(self.state_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kapp_id = 1
kid = 2
kdefinition = 3
kstate = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
2: "id",
3: "definition",
4: "state",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.NUMERIC,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.CompositeIndex'
if _extension_runtime:
pass
__all__ = ['PropertyValue','PropertyValue_ReferenceValuePathElement','PropertyValue_PointValue','PropertyValue_UserValue','PropertyValue_ReferenceValue','Property','Path','Path_Element','Reference','User','EntityProto','CompositeProperty','Index','Index_Property','CompositeIndex']
| {
"content_hash": "676eb7de346701ec4080787c36ddc779",
"timestamp": "",
"source": "github",
"line_count": 3221,
"max_line_length": 281,
"avg_line_length": 29.480596088171374,
"alnum_prop": 0.6338342618237729,
"repo_name": "adviti/melange",
"id": "274b04995f9a42ef58999b97a0cb61b047806113",
"size": "95561",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "thirdparty/google_appengine/google/appengine/datastore/entity_pb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import argparse
from bakery_cli.fixers import CharacterSymbolsFixer
description = 'Fixes TTF NAME table strings to be ascii only'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('ttf_font', nargs='+',
help="Font in OpenType (TTF/OTF) format")
args = parser.parse_args()
for path in args.ttf_font:
CharacterSymbolsFixer(None, path).apply()
| {
"content_hash": "260caacf6b3810e6edb274b734449d0a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 61,
"avg_line_length": 28.428571428571427,
"alnum_prop": 0.7286432160804021,
"repo_name": "jessamynsmith/fontbakery",
"id": "eb02a8f1aac14f8e78b543587cf77c1a027ea752",
"size": "1122",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/fontbakery-fix-ascii-fontmetadata.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "466249"
}
],
"symlink_target": ""
} |
"""
Provide functions for the creation and manipulation of 3D Spheres.
Sphere are represented using a numpy.array of shape (4,).
The first three values are the sphere's position.
The fourth value is the sphere's radius.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from math3.utils import all_parameters_as_numpy_arrays, parameters_as_numpy_arrays
@parameters_as_numpy_arrays('center')
def create(center=None, radius=1.0, dtype=None):
if center is None:
center = [0., 0., 0.]
return np.array([center[0], center[1], center[2], radius], dtype=dtype)
@parameters_as_numpy_arrays('points')
def create_from_points(points, dtype=None):
"""Creates a sphere centred around 0,0,0 that encompasses
the furthest point in the provided list.
:param numpy.array points: An Nd array of vectors.
:rtype: A sphere as a two value tuple.
"""
dtype = dtype or points.dtype
# calculate the lengths of all the points
# use squared length to save processing
lengths = np.apply_along_axis(
np.sum,
points.ndim - 1,
points ** 2
)
# find the maximum value
maximum = lengths.max()
# square root this, this is the radius
radius = np.sqrt(maximum)
return np.array([0.0, 0.0, 0.0, radius], dtype=dtype)
@all_parameters_as_numpy_arrays
def position(sphere):
"""Returns the position of the sphere.
:param numpy.array sphere: The sphere to extract the position from.
:rtype: numpy.array
:return: The centre of the sphere.
"""
return sphere[:3].copy()
@all_parameters_as_numpy_arrays
def radius(sphere):
"""Returns the radius of the sphere.
:param numpy.array sphere: The sphere to extract the radius from.
:rtype: float
:return: The radius of the sphere.
"""
return sphere[3]
| {
"content_hash": "bdd6d2670804dabe0485c22bc8d47008",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 82,
"avg_line_length": 28.630769230769232,
"alnum_prop": 0.6765180010746911,
"repo_name": "PhloxAR/math3",
"id": "615c9d1be3efb2a88a9880db0aa114f647972488",
"size": "1885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "math3/funcs/sphere.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "293019"
},
{
"name": "Shell",
"bytes": "1377"
}
],
"symlink_target": ""
} |
"""
WSGI config for agazeta project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "agazeta.settings")
from django.core.wsgi import get_wsgi_application
from dj_static import Cling, MediaCling
application = get_wsgi_application()
application = Cling(application)
#application = Cling(MediaCling(get_wsgi_application()))
| {
"content_hash": "733aa42dfe707c6a8ba2402b97dced07",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 78,
"avg_line_length": 27.42105263157895,
"alnum_prop": 0.7773512476007678,
"repo_name": "Scoppio/a-gazeta-de-geringontzan",
"id": "92f84952c385a84b612f896bbc009f823d0894d1",
"size": "521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agazeta/agazeta/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "12901"
},
{
"name": "JavaScript",
"bytes": "3030"
},
{
"name": "Jupyter Notebook",
"bytes": "85847"
},
{
"name": "Python",
"bytes": "20949"
}
],
"symlink_target": ""
} |
import requests
import os
import codecs
from bs4 import BeautifulSoup
from mtgreatest.rdb import Cursor, serialize
from update_events import clean_magic_link
def write_as(text, filename):
f = codecs.open(filename, 'w', 'utf-8')
f.write(text)
f.close()
def scrape_info_type(info_type, soup, event_id):
if info_type not in os.listdir('.'):
os.mkdir(info_type)
os.chdir(info_type)
alts = ['-a', '-b', '-c']
round_inds = dict()
for f in os.listdir('.'):
os.remove(f)
results = [el for el in soup.find_all('p') if info_type.upper() in el.text or info_type.capitalize() in el.text]
for result in results:
for el in result.parent.find_all('a'):
r = requests.get(clean_magic_link(el['href']))
if r.status_code is not 200:
continue
if el.text in round_inds:
filename = el.text + alts[round_inds[el.text]]
round_inds[el.text] += 1
else:
filename = el.text
round_inds[el.text] = 0
filename += '.html'
write_as(r.text, filename)
assert len(os.listdir('.')) > 11, 'fewer than 12 rounds detected for type {}'.format(info_type)
os.chdir('..')
def scrape_link(event_link, event_id):
print 'scraping event {}'.format(event_id)
os.chdir('/Users/joel/Projects/mtg-html')
r = requests.get(event_link)
try:
if r.status_code is not 200:
r.raise_for_status()
if event_id not in os.listdir('.'):
os.mkdir(event_id)
os.chdir(event_id)
write_as(r.text, 'index.html')
soup = BeautifulSoup(r.text, 'lxml')
scrape_info_type('results', soup, event_id)
scrape_info_type('pairings', soup, event_id)
scrape_info_type('standings', soup, event_id)
except Exception as error:
return {'value': -1, 'error': unicode(error)}
return {'value': 1, 'error': None}
def mark_event(event_link, event_id, result):
cursor = Cursor()
entries = [serialize(entry) for entry in [result['value'], result['error'], event_id, event_link]]
query = "UPDATE event_table set process_status={}, last_error={} where event_id={} and event_link={}".format(*entries)
cursor.execute(query)
cursor.close()
return
def scrape_new_links(num_events):
cursor = Cursor()
query = "select event_link, event_id from event_table where process_status=0 order by day_1_date desc limit {}".format(num_events)
event_infos = cursor.execute(query)
cursor.close()
for event_info in event_infos:
mark_event(*event_info, result=scrape_link(*event_info))
| {
"content_hash": "4aca95b92bafd76ff12ce2a30838faf8",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 134,
"avg_line_length": 32.75609756097561,
"alnum_prop": 0.6023827252419955,
"repo_name": "oelarnes/mtgreatest",
"id": "56ae64840459a60fdbc8cb03ba0f2eb4dd5a1868",
"size": "2686",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mtgreatest-py/mtgreatest/scrape/scrape_event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "726813"
},
{
"name": "Python",
"bytes": "37630"
}
],
"symlink_target": ""
} |
from keras.datasets import mnist
import numpy as np
from PIL import Image
import math
import os
import keras.backend as K
K.set_image_data_format('channels_first')
print(K.image_data_format)
################################
# GAN 모델링
################################
from keras import models, layers, optimizers
class GAN(models.Sequential):
def __init__(self, input_dim=64):
"""
self, self.generator, self.discriminator are all models
"""
super().__init__()
self.input_dim = input_dim
self.generator = self.GENERATOR()
self.discriminator = self.DISCRIMINATOR()
self.add(self.generator)
self.discriminator.trainable = False
self.add(self.discriminator)
self.compile_all()
def compile_all(self):
# Compiling stage
d_optim = optimizers.SGD(lr=0.0005, momentum=0.9, nesterov=True)
g_optim = optimizers.SGD(lr=0.0005, momentum=0.9, nesterov=True)
self.generator.compile(loss='binary_crossentropy', optimizer="SGD")
self.compile(loss='binary_crossentropy', optimizer=g_optim)
self.discriminator.trainable = True
self.discriminator.compile(loss='binary_crossentropy', optimizer=d_optim)
def GENERATOR(self):
input_dim = self.input_dim
model = models.Sequential()
model.add(layers.Dense(1024, activation='tanh', input_dim=input_dim))
model.add(layers.Dense(128 * 7 * 7, activation='tanh'))
model.add(layers.BatchNormalization())
model.add(layers.Reshape((128, 7, 7), input_shape=(128 * 7 * 7,)))
model.add(layers.UpSampling2D(size=(2, 2)))
model.add(layers.Conv2D(64, (5, 5), padding='same', activation='tanh'))
model.add(layers.UpSampling2D(size=(2, 2)))
model.add(layers.Conv2D(1, (5, 5), padding='same', activation='tanh'))
return model
def DISCRIMINATOR(self):
model = models.Sequential()
model.add(layers.Conv2D(64, (5, 5), padding='same', activation='tanh',
input_shape=(1, 28, 28)))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Conv2D(128, (5, 5), activation='tanh'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(1024, activation='tanh'))
model.add(layers.Dense(1, activation='sigmoid'))
return model
def train_both(self, x):
ln = x.shape[0]
# First trial for training discriminator
z = self.get_z(ln)
w = self.generator.predict_on_batch(z, verbose=0)
xw = np.concatenate((x, w))
y2 = [1] * ln + [0] * ln
d_loss = self.discriminator.train_on_batch(xw, y2)
# Second trial for training generator
z = self.get_z(ln)
self.discriminator.trainable = False
g_loss = self.train_on_batch(z, [1] * ln)
self.discriminator.trainable = True
return d_loss, g_loss
################################
# GAN 학습하기
################################
def combine_images(generated_images):
num = generated_images.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = generated_images.shape[2:]
image = np.zeros((height * shape[0], width * shape[1]),
dtype=generated_images.dtype)
for index, img in enumerate(generated_images):
i = int(index / width)
j = index % width
image[i * shape[0]:(i + 1) * shape[0],
j * shape[1]:(j + 1) * shape[1]] = img[0, :, :]
return image
def get_x(X_train, index, BATCH_SIZE):
return X_train[index * BATCH_SIZE:(index + 1) * BATCH_SIZE]
def save_images(generated_images, output_fold, epoch, index):
# print(generated_images.shape)
image = combine_images(generated_images)
image = image * 127.5 + 127.5
Image.fromarray(image.astype(np.uint8)).save(
output_fold + '/' +
str(epoch) + "_" + str(index) + ".png")
def load_data():
(X_train, y_train), (_, _) = mnist.load_data()
return X_train[:10]
def train(args):
BATCH_SIZE = args.batch_size
epochs = args.epochs
output_fold = args.output_fold
input_dim = args.input_dim
os.makedirs(output_fold, exist_ok=True)
print('Output_fold is', output_fold)
X_train = load_data()
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = X_train.reshape((X_train.shape[0], 1) + X_train.shape[1:])
gan = GAN(input_dim)
d_loss_ll = []
g_loss_ll = []
for epoch in range(epochs):
print("Epoch is", epoch)
print("Number of batches", int(X_train.shape[0] / BATCH_SIZE))
d_loss_l = []
g_loss_l = []
for index in range(int(X_train.shape[0] / BATCH_SIZE)):
x = get_x(X_train, index, BATCH_SIZE)
d_loss, g_loss = gan.train_both(x)
d_loss_l.append(d_loss)
g_loss_l.append(g_loss)
if epoch % 10 == 0 or epoch == epochs - 1:
z = gan.get_z(x.shape[0])
w = gan.generator.predict_on_batch(z, verbose=0)
save_images(w, output_fold, epoch, 0)
d_loss_ll.append(d_loss_l)
g_loss_ll.append(g_loss_l)
gan.generator.save_weights(output_fold + '/' + 'generator', True)
gan.discriminator.save_weights(output_fold + '/' + 'discriminator', True)
np.savetxt(output_fold + '/' + 'd_loss', d_loss_ll)
np.savetxt(output_fold + '/' + 'g_loss', g_loss_ll)
################################
# GAN 예제 실행하기
################################
def main():
class ARGS:
pass
args = ARGS()
args.batch_size = 2
args.epochs = 1000
args.output_fold = 'GAN_OUT'
args.input_dim = 10
train(args)
if __name__ == '__main__':
main() | {
"content_hash": "757857530b869c98e0362cf3376b30db",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 81,
"avg_line_length": 31.171122994652407,
"alnum_prop": 0.5735117515868932,
"repo_name": "jskDr/keraspp",
"id": "4d2a1012b32df35744475a71372e3e058798e5e2",
"size": "5953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old/gan_cnn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "5048483"
},
{
"name": "Python",
"bytes": "169240"
}
],
"symlink_target": ""
} |
from glad.loader import BaseLoader
from glad.loader.volt import LOAD_OPENGL_DLL
_GLX_LOADER = \
LOAD_OPENGL_DLL % {'pre':'private', 'init':'open_gl',
'proc':'get_proc', 'terminate':'close_gl'} + '''
bool gladLoadGLX() {
StructToDg structToDg;
structToDg.func = cast(void*)get_proc;
auto dg = *cast(Loader*)&structToDg;
bool status = false;
if(open_gl()) {
status = gladLoadGLX(dg);
close_gl();
}
return status;
}
'''
_GLX_HAS_EXT = '''
private bool has_ext(const(char)* name) {
return true;
}
'''
class GLXVoltLoader(BaseLoader):
def write(self, fobj, apis):
fobj.write('import watt.library;\n')
if not self.disabled:
fobj.write(_GLX_LOADER)
def write_begin_load(self, fobj):
pass
def write_end_load(self, fobj):
fobj.write('\treturn true;\n')
def write_find_core(self, fobj):
pass
def write_has_ext(self, fobj):
fobj.write(_GLX_HAS_EXT) | {
"content_hash": "e6d78b6817c41fa922f5484dc6156905",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 71,
"avg_line_length": 22.75,
"alnum_prop": 0.5864135864135864,
"repo_name": "dbralir/glad",
"id": "dbdeeaeb5821ed22f1c3591f92489b3c5508be50",
"size": "1001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glad/loader/glx/volt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1659"
},
{
"name": "C++",
"bytes": "2445"
},
{
"name": "Python",
"bytes": "88697"
},
{
"name": "Shell",
"bytes": "3321"
}
],
"symlink_target": ""
} |
from reports import *
from plotting import plot_measures, plot_mosaic, plot_all, plot_fd
| {
"content_hash": "0d9618730a5b355228dc47464c6a6b41",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 66,
"avg_line_length": 44.5,
"alnum_prop": 0.7865168539325843,
"repo_name": "wangkangcheng/ccc",
"id": "c874f8ab80a76e844b31c7c4a375d1e6f0796499",
"size": "226",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "qap/viz/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "15218"
},
{
"name": "Python",
"bytes": "250306"
},
{
"name": "R",
"bytes": "7072"
},
{
"name": "Ruby",
"bytes": "1699"
},
{
"name": "Shell",
"bytes": "4639"
}
],
"symlink_target": ""
} |
from utils import TreeNode, construct
class Solution:
def maxDepth(self, root: TreeNode) -> int:
if root is None:
return 0
root.level = 1
queue = [root]
i, last = 0, 0
while i <= last:
node = queue[i]
if node.left:
node.left.level = node.level + 1
queue.append(node.left)
last += 1
if node.right:
node.right.level = node.level + 1
queue.append(node.right)
last += 1
i += 1
return queue[-1].level
if __name__ == "__main__":
sol = Solution()
root = construct([3, 9, 20, None, None, 15, 7])
print(sol.maxDepth(root))
root = construct([3, None, 20])
print(sol.maxDepth(root))
root = construct([])
print(sol.maxDepth(root))
| {
"content_hash": "7bbb75c4b19f5c893b655afe8be47b92",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 51,
"avg_line_length": 26.84375,
"alnum_prop": 0.48661233993015135,
"repo_name": "shenfei/oj_codes",
"id": "4de83bd5990e7e7d4463fb546c03386c03a415af",
"size": "1021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leetcode/python/n104_Maximum_Depth_of_Binary_Tree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "14397"
},
{
"name": "Python",
"bytes": "154341"
}
],
"symlink_target": ""
} |
'''
New Integration Test for Starting Windows VM with Multi Data Volumes.
@author: Mirabel
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.resource_operations as res_ops
import os
import telnetlib
import time
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
cpuNum = 6
memorySize = 2048 * 1024 * 1024
new_offering = test_lib.lib_create_instance_offering(cpuNum = cpuNum,\
memorySize = memorySize)
test_obj_dict.add_instance_offering(new_offering)
new_offering_uuid = new_offering.uuid
l3_name = os.environ.get('l3VlanNetworkName1')
disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName'))
data_volume_uuids = [disk_offering.uuid,disk_offering.uuid,disk_offering.uuid]
data_volume_num = 3
volume_list = []
try:
vm = test_stub.create_windows_vm_2(l3_name, instance_offering_uuid = new_offering_uuid)
test_obj_dict.add_vm(vm)
for i in range(data_volume_num):
volume_list.append(test_stub.create_volume())
test_obj_dict.add_volume(volume_list[i])
volume_list[i].attach(vm)
except:
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_fail('Create Windows VM with Multi Data volumes Test Fail')
vm_inv = vm.get_vm()
print"vm_uuid:%s"%(vm_inv.uuid)
host_inv = test_lib.lib_find_host_by_vm(vm_inv)
host_ip = host_inv.managementIp
host_username = host_inv.username
print"host_username%s"%(host_username)
host_password = host_inv.password
cmd = "virsh domiflist %s|grep vnic|awk '{print $3}'"% (vm_inv.uuid)
br_eth0 = test_lib.lib_execute_ssh_cmd(host_ip, host_username, host_password, cmd, 60)
cmd2 = 'ip a add 10.11.254.254/8 dev %s'% (br_eth0)
test_lib.lib_execute_ssh_cmd(host_ip, host_username, host_password, cmd2, 60)
vm_ip = vm.get_vm().vmNics[0].ip
test_lib.lib_wait_target_up(vm_ip, '23', 1200)
vm_username = os.environ.get('winImageUsername')
vm_password = os.environ.get('winImagePassword')
tn=telnetlib.Telnet(vm_ip)
tn.read_until("login: ")
tn.write(vm_username+"\r\n")
tn.read_until("password: ")
tn.write(vm_password+"\r\n")
tn.read_until(vm_username+">")
tn.write("wmic diskdrive\r\n")
vm_data_volumes=tn.read_until(vm_username+">")
tn.close()
if len(vm_data_volumes.split('\r\n')) != (data_volume_num + 6):
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_fail('Create Windows VM with Multi Data volumes Test Fail')
try:
vm.reboot()
except:
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_fail('Reboot Windows VM with Multi Data volumes fail')
vm_ip = vm.get_vm().vmNics[0].ip
test_lib.lib_wait_target_up(vm_ip, '23', 1200)
vm_username = os.environ.get('winImageUsername')
vm_password = os.environ.get('winImagePassword')
tn=telnetlib.Telnet(vm_ip)
tn.read_until("login: ")
tn.write(vm_username+"\r\n")
tn.read_until("password: ")
tn.write(vm_password+"\r\n")
tn.read_until(vm_username+">")
tn.write("wmic diskdrive\r\n")
vm_data_volumes=tn.read_until(vm_username+">")
tn.close()
if len(vm_data_volumes.split('\r\n')) == (data_volume_num + 6):
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('Create Windows VM with Multi Data Volumes Test Success')
else:
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_fail('Create Windows VM with Multi Data volumes Test Fail')
#Will be called only if exception happens in test().
def error_cleanup():
global vm
if vm:
vm.destroy()
| {
"content_hash": "858ea2407d523fa274a225438aa924ca",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 99,
"avg_line_length": 37.61904761904762,
"alnum_prop": 0.6453164556962026,
"repo_name": "zstackorg/zstack-woodpecker",
"id": "4c53f224f8004891a9fd7e126faa34fd10939cfa",
"size": "3950",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integrationtest/vm/virtualrouter/windows/test_windows_vm_multi_data_volumes.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "46522"
},
{
"name": "Makefile",
"bytes": "692"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "2891030"
},
{
"name": "Shell",
"bytes": "54266"
}
],
"symlink_target": ""
} |
'''
Created on Oct 29, 2015
@author: ev0l
'''
from handlers import BaseHandler
class LogOutHandler(BaseHandler):
def get(self):
self.clear_all_cookies()
self.redirect("/")
route = [(r"/logout", LogOutHandler), ]
| {
"content_hash": "14e4ffa0ace07a81b490d0f664ffb289",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 39,
"avg_line_length": 16.857142857142858,
"alnum_prop": 0.6440677966101694,
"repo_name": "evolsnow/tornado-web",
"id": "578961f695b1d3e728efe2fd01f3595bdcd4743c",
"size": "236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handlers/logout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3702"
},
{
"name": "HTML",
"bytes": "7391"
},
{
"name": "JavaScript",
"bytes": "3233"
},
{
"name": "Python",
"bytes": "17326"
}
],
"symlink_target": ""
} |
import unittest
from .test_oauth import UAOauth2ClientTest
def suite():
tests = ['UAOauth2ClientTest']
return unittest.TestSuite(map(WidgetTestCase, tests))
if __name__ == '__main__':
suite()
| {
"content_hash": "a8b5d2a16cb1fb15bc0cb2e7d09da841",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 57,
"avg_line_length": 20.7,
"alnum_prop": 0.6859903381642513,
"repo_name": "igorfala/python-under-armour",
"id": "1f953934f7e77a792a7a271571302e4b3a8a7d11",
"size": "207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15601"
}
],
"symlink_target": ""
} |
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class StorageAccountsOperations(object):
"""StorageAccountsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def check_name_availability(
self, account_name, custom_headers=None, raw=False, **operation_config):
"""
Checks that account name is valid and is not in use.
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3
and 24 characters in length and use numbers and lower-case letters
only.
:type account_name:
:class:`StorageAccountCheckNameAvailabilityParameters
<fixtures.acceptancetestsstoragemanagementclient.models.StorageAccountCheckNameAvailabilityParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`CheckNameAvailabilityResult
<fixtures.acceptancetestsstoragemanagementclient.models.CheckNameAvailabilityResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(account_name, 'StorageAccountCheckNameAvailabilityParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CheckNameAvailabilityResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, resource_group_name, account_name, parameters, custom_headers=None, raw=False, **operation_config):
"""
Asynchronously creates a new storage account with the specified
parameters. Existing accounts cannot be updated with this API and
should instead use the Update Storage Account API. If an account is
already created and subsequent PUT request is issued with exact same
set of properties, then HTTP 200 would be returned.
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3
and 24 characters in length and use numbers and lower-case letters
only.
:type account_name: str
:param parameters: The parameters to provide for the created account.
:type parameters: :class:`StorageAccountCreateParameters
<fixtures.acceptancetestsstoragemanagementclient.models.StorageAccountCreateParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`StorageAccount
<fixtures.acceptancetestsstoragemanagementclient.models.StorageAccount>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'StorageAccountCreateParameters')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""
Deletes a storage account in Microsoft Azure.
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3
and 24 characters in length and use numbers and lower-case letters
only.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_properties(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""
Returns the properties for the specified storage account including but
not limited to name, account type, location, and account status. The
ListKeys operation should be used to retrieve storage keys.
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3
and 24 characters in length and use numbers and lower-case letters
only.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccount
<fixtures.acceptancetestsstoragemanagementclient.models.StorageAccount>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, account_name, parameters, custom_headers=None, raw=False, **operation_config):
"""
Updates the account type or tags for a storage account. It can also be
used to add a custom domain (note that custom domains cannot be added
via the Create operation). Only one custom domain is supported per
storage account. This API can only be used to update one of tags,
accountType, or customDomain per call. To update multiple of these
properties, call the API multiple times with one change per call.
This call does not change the storage keys for the account. If you
want to change storage account keys, use the RegenerateKey operation.
The location and name of the storage account cannot be changed after
creation.
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3
and 24 characters in length and use numbers and lower-case letters
only.
:type account_name: str
:param parameters: The parameters to update on the account. Note that
only one property can be changed at a time using this API.
:type parameters: :class:`StorageAccountUpdateParameters
<fixtures.acceptancetestsstoragemanagementclient.models.StorageAccountUpdateParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccount
<fixtures.acceptancetestsstoragemanagementclient.models.StorageAccount>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'StorageAccountUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_keys(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""
Lists the access keys for the specified storage account.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the storage account.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccountKeys
<fixtures.acceptancetestsstoragemanagementclient.models.StorageAccountKeys>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccountKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, custom_headers=None, raw=False, **operation_config):
"""
Lists all the storage accounts available under the subscription. Note
that storage keys are not returned; use the ListKeys operation for
this.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccountPaged
<fixtures.acceptancetestsstoragemanagementclient.models.StorageAccountPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""
Lists all the storage accounts available under the given resource
group. Note that storage keys are not returned; use the ListKeys
operation for this.
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccountPaged
<fixtures.acceptancetestsstoragemanagementclient.models.StorageAccountPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def regenerate_key(
self, resource_group_name, account_name, key_name=None, custom_headers=None, raw=False, **operation_config):
"""
Regenerates the access keys for the specified storage account.
:param resource_group_name: The name of the resource group within the
user’s subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3
and 24 characters in length and use numbers and lower-case letters
only.
:type account_name: str
:param key_name: Possible values include: 'key1', 'key2'
:type key_name: str or :class:`KeyName
<storagemanagementclient.models.KeyName>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccountKeys
<fixtures.acceptancetestsstoragemanagementclient.models.StorageAccountKeys>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
regenerate_key = models.StorageAccountRegenerateKeyParameters(key_name=key_name)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(regenerate_key, 'StorageAccountRegenerateKeyParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccountKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| {
"content_hash": "26e5a681d1ea18101c751b61aec336c8",
"timestamp": "",
"source": "github",
"line_count": 671,
"max_line_length": 154,
"avg_line_length": 46.666169895678095,
"alnum_prop": 0.6528278989557053,
"repo_name": "sharadagarwal/autorest",
"id": "d2583af6ef3d7e11bd00adead7d7ba9dae0d0fe4",
"size": "31799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/StorageManagementClient/storagemanagementclient/operations/storage_accounts_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "12942"
},
{
"name": "C#",
"bytes": "11450022"
},
{
"name": "CSS",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "274"
},
{
"name": "Java",
"bytes": "4693719"
},
{
"name": "JavaScript",
"bytes": "4685941"
},
{
"name": "PowerShell",
"bytes": "29614"
},
{
"name": "Python",
"bytes": "2274436"
},
{
"name": "Ruby",
"bytes": "232193"
},
{
"name": "Shell",
"bytes": "423"
},
{
"name": "TypeScript",
"bytes": "179577"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
from registration.views import activate
from registration.views import register
urlpatterns = patterns('',
url(r'^register/$',
register,
{'backend': 'userprofile.backends.simple.SimpleBackend'},
name='registration_register'),
)
| {
"content_hash": "0b7943a76a2c2a0c31f63feca60aebc2",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 61,
"avg_line_length": 23.916666666666668,
"alnum_prop": 0.7317073170731707,
"repo_name": "praekelt/django-userprofile",
"id": "5bc6865f379e0cab17ae13ca8df88fc37ec147cb",
"size": "287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "userprofile/backends/simple/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "258"
},
{
"name": "Python",
"bytes": "17725"
}
],
"symlink_target": ""
} |
import importlib
import pianette.config
import sys
from pianette.Pianette import Pianette
from pianette.PianetteArgumentParser import PianetteArgumentParser
from pianette.utils import Debug
Debug.println("INFO", " ")
Debug.println("INFO", " ################################## ")
Debug.println("INFO", " | PIANETTE | ")
Debug.println("INFO", " ################################## ")
Debug.println("INFO", " ")
configobj = pianette.config.get_all_configobj()
parser = PianetteArgumentParser(configobj=configobj)
args = parser.parse_args()
# Instanciate the global Pianette
# Its responsibility is to translate Piano actions to Console actions
pianette = Pianette(configobj=configobj)
# We MUST select a player before we select a game.
# This allow for per-player mappings
# The game can be changed afterwards, but not the player, as we don't expect
# to be able to unplug the controller from the console.
pianette.select_player(args.selected_player)
pianette.select_game(args.selected_game)
if args.enabled_sources is not None:
for source in args.enabled_sources:
pianette.load_source(source)
# Run the main loop of interactive Pianette
Debug.println("NOTICE", "Entering main loop")
pianette.cmd.cmdloop()
| {
"content_hash": "e1c22ae416ad140a9d607d04d11e935a",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 76,
"avg_line_length": 33.648648648648646,
"alnum_prop": 0.7132530120481928,
"repo_name": "tchapi/pianette",
"id": "3dc86c2ec6e724ee83aaa26e05e8506e067dbd26",
"size": "1524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "4155"
},
{
"name": "CSS",
"bytes": "5331"
},
{
"name": "HTML",
"bytes": "14483"
},
{
"name": "JavaScript",
"bytes": "2539"
},
{
"name": "Python",
"bytes": "76257"
},
{
"name": "Shell",
"bytes": "1708"
}
],
"symlink_target": ""
} |
from aquilon.exceptions_ import NotFoundException, ArgumentError
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.host import hostname_to_host
from aquilon.worker.dbwrappers.resources import get_resource_holder
from aquilon.worker.dbwrappers.change_management import ChangeManagement
class CommandDelClusterMemberPriority(BrokerCommand):
requires_plenaries = True
resource_class = None
def render(self, session, logger, plenaries, cluster, resourcegroup, member,
user, justification, reason, **kwargs): # pylint: disable=W0613
holder = get_resource_holder(session, logger, None, cluster,
None, resourcegroup, compel=False)
# Validate ChangeManagement
cm = ChangeManagement(session, user, justification, reason, logger, self.command, **kwargs)
cm.consider(holder)
cm.validate()
dbhost = hostname_to_host(session, member)
name = self.resource_class.__mapper__.polymorphic_identity
dbresource = self.resource_class.get_unique(session, name=name,
holder=holder, compel=True)
dbcluster = dbresource.holder.toplevel_holder_object
if not dbhost.cluster or dbhost.cluster != dbcluster:
raise ArgumentError("{0} is not a member of {1:l}."
.format(dbhost, dbcluster))
plenaries.add(holder.holder_object)
plenaries.add(dbresource)
try:
del dbresource.entries[dbhost]
except KeyError:
raise NotFoundException("{0} has no {1:c} entry."
.format(dbhost, dbresource))
# Mostly cosmetic - don't leave empty containers behind
if not dbresource.entries:
holder.resources.remove(dbresource)
session.flush()
plenaries.write()
return
| {
"content_hash": "387d07235784659c9641b3d58f69ba63",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 99,
"avg_line_length": 38.05882352941177,
"alnum_prop": 0.639361154044307,
"repo_name": "quattor/aquilon",
"id": "1ebbfe1ccd3ac4d57f4260e92fa06616bf712b5a",
"size": "2635",
"binary": false,
"copies": "1",
"ref": "refs/heads/upstream",
"path": "lib/aquilon/worker/commands/del_cluster_member_priority.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "1823"
},
{
"name": "Makefile",
"bytes": "5732"
},
{
"name": "Mako",
"bytes": "4178"
},
{
"name": "PLSQL",
"bytes": "102109"
},
{
"name": "PLpgSQL",
"bytes": "8091"
},
{
"name": "Pan",
"bytes": "1058"
},
{
"name": "Perl",
"bytes": "6057"
},
{
"name": "Python",
"bytes": "5884984"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "33547"
},
{
"name": "Smarty",
"bytes": "4603"
}
],
"symlink_target": ""
} |
import cvxpy
from .solver import Solver
from sklearn.utils import check_array
class NuclearNormMinimization(Solver):
"""
Simple implementation of "Exact Matrix Completion via Convex Optimization"
by Emmanuel Candes and Benjamin Recht using cvxpy.
"""
def __init__(
self,
require_symmetric_solution=False,
min_value=None,
max_value=None,
error_tolerance=0.0001,
max_iters=50000,
verbose=True):
"""
Parameters
----------
require_symmetric_solution : bool
Add symmetry constraint to convex problem
min_value : float
Smallest possible imputed value
max_value : float
Largest possible imputed value
error_tolerance : bool
Degree of error allowed on reconstructed values. If omitted then
defaults to 0.0001
max_iters : int
Maximum number of iterations for the convex solver
verbose : bool
Print debug info
"""
Solver.__init__(
self,
min_value=min_value,
max_value=max_value)
self.require_symmetric_solution = require_symmetric_solution
self.error_tolerance = error_tolerance
self.max_iters = max_iters
self.verbose = verbose
def _constraints(self, X, missing_mask, S, error_tolerance):
"""
Parameters
----------
X : np.array
Data matrix with missing values filled in
missing_mask : np.array
Boolean array indicating where missing values were
S : cvxpy.Variable
Representation of solution variable
"""
ok_mask = ~missing_mask
masked_X = cvxpy.multiply(ok_mask, X)
masked_S = cvxpy.multiply(ok_mask, S)
abs_diff = cvxpy.abs(masked_S - masked_X)
close_to_data = abs_diff <= error_tolerance
constraints = [close_to_data]
if self.require_symmetric_solution:
constraints.append(S == S.T)
if self.min_value is not None:
constraints.append(S >= self.min_value)
if self.max_value is not None:
constraints.append(S <= self.max_value)
return constraints
def _create_objective(self, m, n):
"""
Parameters
----------
m, n : int
Dimensions that of solution matrix
Returns the objective function and a variable representing the
solution to the convex optimization problem.
"""
# S is the completed matrix
shape = (m, n)
S = cvxpy.Variable(shape, name="S")
norm = cvxpy.norm(S, "nuc")
objective = cvxpy.Minimize(norm)
return S, objective
def solve(self, X, missing_mask):
X = check_array(X, force_all_finite=False)
m, n = X.shape
S, objective = self._create_objective(m, n)
constraints = self._constraints(
X=X,
missing_mask=missing_mask,
S=S,
error_tolerance=self.error_tolerance)
problem = cvxpy.Problem(objective, constraints)
problem.solve(
verbose=self.verbose,
solver=cvxpy.CVXOPT,
max_iters=self.max_iters,
# use_indirect, see: https://github.com/cvxgrp/cvxpy/issues/547
use_indirect=False)
return S.value
| {
"content_hash": "7bca3112742a1e07adc64de86269ab0c",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 78,
"avg_line_length": 29.775862068965516,
"alnum_prop": 0.569195136074117,
"repo_name": "iskandr/fancyimpute",
"id": "30c66b635561d3582cf392f27edad74131afb2b0",
"size": "3999",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fancyimpute/nuclear_norm_minimization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "95506"
}
],
"symlink_target": ""
} |
import os
import sys
import time
import getpass
sys.path.append("./library")
import argparse
import ConfigParser
from utils import *
from na_funcs import *
from cisco_funcs import *
def check_on_switch(mds, zoneset, pwwns, zones, vsan, fabric, switch):
non_existent_zones = []
alias_exists = {}
zoneset_existent = False
# http://www.cisco.com/c/en/us/td/docs/switches/datacenter/mds9000/sw/6_2/configuration/guides/config_limits/b_mds_9000_configuration_limits_6_2.html
# Table 2 Fabric-level Fibre Channel Configuration Limits
# Note: The preferred number of members per zone is 2, and the maximum recommended limit is 50.
smartzone_members_limit = 50
print bcolors.OKGREEN + "Initiate validations ...\n" + bcolors.ENDC
print bcolors.BOLD + "Validating ZoneSet %s and VSAN ID %s on MDS..." % (zoneset, vsan) + bcolors.ENDC
if zoneset_exists(mds, zoneset, vsan) is not False:
zoneset_existent = True
for pwwn in pwwns:
print bcolors.BOLD + "Validating if device-alias exists with pwwn %s on MDS..." % pwwn + bcolors.ENDC
alias = device_alias_exists(mds, pwwn)
if alias:
alias_exists[pwwn] = alias
for zone_name in zones.keys():
if len(zone_name) > 1:
print bcolors.BOLD + "Validating %s on MDS..." % zone_name.strip() + bcolors.ENDC
if zone_exists(mds, zone_name, vsan) is False:
non_existent_zones.append(zone_name)
print bcolors.BOLD + "Validating number of members of %s on MDS..." % zone_name.strip() + bcolors.ENDC
members = count_smartzone_members(mds, zone_name)
if alias_exists:
print bcolors.OKBLUE + "\n### INFO! Some device-alias already exists ... ###\n" + bcolors.ENDC
for pwwn, alias in alias_exists.iteritems():
print bcolors.BOLD + "device-alias %s already exists for %s" % (alias, pwwn) + bcolors.ENDC
raw_input('\nPress ' + bcolors.BOLD + '[enter]' + bcolors.ENDC + ' to continue ...')
if zoneset_existent is False or len(non_existent_zones) > 0 or members >= smartzone_members_limit:
print bcolors.WARNING + "\n### ATENTION! Validation found some errors ... ###\n" + bcolors.ENDC
if zoneset_existent is False:
print bcolors.FAIL + "ZoneSet \"%s\" and/or VSAN ID %s doesn't exists!\n" % (zoneset, vsan) + bcolors.ENDC
if len(non_existent_zones) > 0:
for zone in non_existent_zones:
print bcolors.FAIL + "Zone \"%s\" doesn't exists!" % zone.strip() + bcolors.ENDC
if members >= smartzone_members_limit:
print bcolors.FAIL + "Zone \"%s\" has more then 50 members\n" % zone_name.strip() + bcolors.ENDC
if confirm("Are you sure you want to continue?"):
generate_smartzones(config_file, zoneset, vsan, fabric, switch)
else:
print bcolors.OKGREEN + "\nValidation successfully!" + bcolors.ENDC
generate_smartzones(config_file, zoneset, vsan, fabric, switch)
def generate_smartzones(config_file, zoneset, vsan, fabric, switch=None, check=False, mds=None):
try:
config = ConfigParser.ConfigParser()
config.read(config_file)
except Exception, e:
print bcolors.FAIL + "Error reading config file!" + bcolors.ENDC
print bcolors.BOLD + "Exception:" + bcolors.ENDC + "\n%s" % e
exit(1)
hosts_per_zone = {}
pwwns = []
for host in config.sections():
pwwns.append(config.get(host, fabric))
for host in config.sections():
for zone in config.get(host, 'zones').split(','):
hosts_per_zone[zone] = []
for host in config.sections():
for zone in config.get(host, 'zones').split(','):
hosts_per_zone[zone].append(host)
if check:
check_on_switch(mds, zoneset, pwwns, hosts_per_zone, vsan, fabric, switch)
else:
if switch:
print bcolors.OKGREEN + "\nGenerating commands to switch %s ... \n" % switch + bcolors.ENDC
else:
print bcolors.OKGREEN + "\nGenerating commands to FABRIC %s ... \n" % fabric + bcolors.ENDC
time.sleep(3)
print "config t"
print "device-alias database"
for host in config.sections():
print " device-alias name %s pwwn %s" % (host.strip(), config.get(host, fabric))
print "device-alias commit\n"
for zone, hosts in hosts_per_zone.iteritems():
if len(zone) > 1:
print "zone name %s vsan %s" % (zone.strip(), vsan)
for host in hosts:
print " member device-alias %s initiator" % host.strip()
print "exit\n"
print "zoneset activate name %s vsan %s\n" % (zoneset, vsan)
print "copy running-config startup-config\n"
if __name__ == "__main__":
arguments = argparse.ArgumentParser(
description='Generate SmartZone commands from input config file listing of short hostnames, pwwns and zones which each host will belongs.')
arguments.add_argument(
'-c','--config_hosts', required=True, type=str,
help='Configuration file with hosts, pwwns and zones')
arguments.add_argument(
'--vsan', required=True, type=str,
help='VSAN ID')
arguments.add_argument(
'--zoneset', required=True, type=str,
help='ZoneSet name')
arguments.add_argument(
'-f','--fabric', required=True, type=str, choices=['impar', 'par'],
help='Fabric side')
arguments.add_argument(
'--check',default=False, action='store_true',
help='[optional] Start a validation process by connection on MDS switch of all params')
arguments.add_argument(
'-s','--switch', required=False, type=str,
help='MDS switch fqdn or IP')
arguments.add_argument(
'-u','--username', required=False, type=str,
help='[optional] Username to ssh into mds switch. Alternate: set environment variable MDS_USERNAME. If neither exists, defaults to current OS username')
arguments.add_argument(
'-p','--password', required=False, type=str,
help='[optional] Password to ssh into mds switch. Alternate: set environment variable MDS_PASSWORD. If unset use_keys defaults to True.')
arguments.add_argument(
'--use_keys', required=False, action='store_true',
help='[optional] Use ssh keys to log into switch. If set key file will need be pass as param')
arguments.add_argument(
'--key_file', required=False, type=str,
help='[optional] filename for ssh key file')
args = arguments.parse_args()
config_file = args.config_hosts
if not os.path.exists(config_file):
print bcolors.FAIL + "%s: No such file or directory!" % config_file + bcolors.ENDC
exit(1)
vsan = args.vsan
zoneset = args.zoneset
fabric = args.fabric
switch = None
if args.check:
if args.password :
use_keys = False
password = args.password
elif os.getenv('MDS_PASSWORD') :
use_keys = False
password = os.getenv('MDS_PASSWORD')
else :
use_keys = True
password = ''
if args.username :
username = args.username
elif os.getenv('MDS_USERNAME') :
username = os.getenv('MDS_USERNAME')
else:
username = getpass.getuser()
switch = args.switch
# Params to connect on MDS
mds = {
'device_type': 'cisco_nxos',
'ip': switch,
'verbose': False,
'username': username,
'password': password,
'use_keys': use_keys
}
generate_smartzones(config_file, zoneset, vsan, fabric, switch=switch, check=True, mds=mds)
else:
generate_smartzones(config_file, zoneset, vsan, fabric)
| {
"content_hash": "007e1ea35eadd31e26506978eb73d0c1",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 161,
"avg_line_length": 39.7979797979798,
"alnum_prop": 0.6138324873096447,
"repo_name": "scottharney/python-mdszoning",
"id": "21495b33ee4516f2639ddb1bcaec64f13b7da72d",
"size": "7960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smartzone/gen_smartzones.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "36475"
}
],
"symlink_target": ""
} |
import unittest
from SevenStories import game
from SevenStories import parse
class TestGame(unittest.TestCase):
def setUp(self):
"""Set up for all the tests
Creates a sample character and stores the GameMap object.
Creates a sample load with 'health name' as sample input.
"""
self.sample_input = "health name"
self.sample_name = "testcharacter"
self.game_map = game.create_character(lambda x: "test")
self.load = parse.parse_command(self.sample_input)
def tearDown(self):
import os
os.remove(game.get_saves_dir() + self.game_map.player.name.lower() + ".dat")
def test_play_game(self):
"""Testing play_game function
Using 'save quit' as example input.
Expecting play_game to not only run successfully, but in this case,
it should also return 'quit', which will be stored as arg.
If this test passes, then the execute function's arg return works.
"""
arg = game.play_game(self.game_map, input=lambda x: "save quit")
self.assertEqual(arg, "quit", "Arg was not quit!")
def test_execute(self):
"""Testing execute function
Sample load uses 'health name' as example input
Simply testing to ensure the function runs successfully.
"""
game.execute(self.game_map, self.load)
def test_get_saves_dir(self):
import os
"""Testing get_saves_dir function
The project directory is expected to contain the saves directory,
just as 'SevenStories/saves'. This test will check the returned
absolute path from the get_saves_dir function and ensure that
the ending is 'SevenStories/saves'.
Test handles differences between how operating systems handle
slashes in directory paths.
"""
saves_dir = game.get_saves_dir()
if os.name == "posix":
self.assertEqual(saves_dir[len(saves_dir) - 19:],
"SevenStories/saves/",
"Incorrect saves directory returned!")
else:
self.assertEqual(saves_dir[len(saves_dir) - 19:],
"SevenStories\\saves\\",
"Incorrect saves directory returned!")
def test_get_simple_answer(self):
"""Testing get_simple_answer function
Running the two most important sample answers, 'yes' and 'no'.
"""
answer1 = game.get_simple_answer("Test question.", input=lambda x: "yes")
answer2 = game.get_simple_answer("Test question.", input=lambda x: "no")
self.assertTrue(answer1, "First answer failed to return True!")
self.assertFalse(answer2, "Second answer failed to return False!")
def test_save_game(self):
"""Testing save_game function
Simply testing to ensure the function runs successfully.
"""
game.save_game(self.game_map, echo=False)
def test_load_game(self):
"""Testing load_game function
Ensuring that load_game returns a GameMap object
and ensuring that the correct game_map is loaded
by checking the player's name within it.
"""
sample_game_map = game.load_game("test")
self.assertIsInstance(sample_game_map, game.gamemap.GameMap,
"Did not return a GameMap object!")
self.assertEqual(sample_game_map.player.name, "test",
"Failed to load the correct GameMap object!")
def test_reset_character(self):
"""Testing reset_character function
Simply testing to ensure the function runs successfully.
"""
game.reset_character("test", echo=False)
def test_create_character(self):
"""Testing create_character function
Creating a sample character with sample name.
Ensuring that create_character returns the proper GameMap object
Deletes the sample character after
"""
sample_game_map = game.create_character(input=lambda x: self.sample_name)
self.assertIsInstance(sample_game_map, game.gamemap.GameMap,
"Did not return a GameMap object!")
self.assertEqual(sample_game_map.player.name, self.sample_name,
"Failed to create the correct GameMap object!")
game.delete_character(sample_game_map.player.name, echo=False)
def test_delete_character(self):
"""Testing delete_character function
Creating a sample character with sample name.
Deleting that character after it's created.
Checking directory to ensure that the character does not exist.
"""
sample_game_map = game.create_character(input=lambda x: self.sample_name)
game.delete_character(sample_game_map.player.name.lower(), echo=False)
| {
"content_hash": "aebd4565f5c7324a426369b2fe0d6443",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 84,
"avg_line_length": 37.61538461538461,
"alnum_prop": 0.62719836400818,
"repo_name": "huntermalm/SevenStories",
"id": "bfc9bc0c07c1ee8bc888c273d2f18b5bd9ec4b1b",
"size": "4890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_game.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42007"
}
],
"symlink_target": ""
} |
from pyquery import PyQuery as pq
import calendar
import urllib2
import codecs
import time
import re
import os, os.path
def shouldRetrieve(pseudoToCheck):
try:
if os.stat("datafiles/tweets_party_generales.csv").st_size == 0:
print "return because file empty"
return True
except:
print "return because file does not exist"
return True
tweets = codecs.open("datafiles/tweets_party_generales.csv", "r", "utf-8")
for line in reversed(tweets.readlines()):
line = line.strip()
fields = line.split(',')
pseudo = fields[1][1:-1]
if pseudo.lower() == pseudoToCheck.lower():
halfDay = 60 * 60 * 12
timeElapsed = int(time.time()) - int(fields[6][1:-1])
tweets.close()
return timeElapsed > halfDay
tweets.close()
return True
def retrieveFromList(date_min = "", date_max = ""):
parties = codecs.open("datafiles/pseudo_parties.txt", "r", "utf-8") # Parties + level + pseudos
for party in parties:
fields = party.strip().split(",")
party_name = fields[0]
level = fields[1]
if fields[2][1:-1]:
pseudo = fields[2]
else:
continue
partyLine = '"' + party_name + '","' + level + '","' + pseudo + '"'
print "Retrieving tweets for", pseudo
if shouldRetrieve(pseudo):
retrievePages(partyLine, party_name, pseudo, date_min, date_max)
parties.close()
def writeAccounts(page, partyLine, pseudo):
with codecs.open("datafiles/parties_accounts.csv","a+","utf-8") as accounts:
for line in accounts:
foundPseudo = line.strip().split(',')[2][1:-1]
if foundPseudo == pseudo:
return
location = cleanText(page(".ProfileHeaderCard-location").text())
biography = cleanText(page(".ProfileHeaderCard-bio").text())
subscription_ini = page(".ProfileHeaderCard-joinDateText").attr("title")
subscription = ""
if subscription_ini:
subscription = strToTimestamp(subscription_ini.split(" - ")[1])
accounts.write('%s,"%s","%s","%s"\n' % (partyLine, location, subscription, biography))
def retrievePages(partyLine, party_name, pseudo, date_min = "", date_max = ""):
data = codecs.open("datafiles/tweets_party_generales.csv", "a", "utf-8")
# If no minimal date is specified, the program searches in the file the last tweets written
if date_min == "":
timestamp_min = findTimestampMax(pseudo)
else:
timestamp_min = strToTimestamp(date_min)
# Max. date by default is the date at which the program is launched
if date_max == "":
timestamp_max = int(time.time())
else:
timestamp_max = strToTimestamp(date_max)
# Retrieve informations about the candidate
page = pq("https://twitter.com/" + pseudo, headers={'Accept-Language': 'en-US,en;q=0.5'})
writeAccounts(page, partyLine, pseudo)
ret = retrieveTweets(party_name, pseudo, page, timestamp_min, timestamp_max, 0)
if len(ret) == 0:
t = int(time.time())
ret = ((6 * '"%s",' + '"%d"\n') % (party_name, pseudo, "", "", t, "", t))
data.write(ret)
data.close()
def retrieveTweets(party_name, pseudo, page, timestamp_min, timestamp_max, timestamp_old, first_page = True, has_more_items = True):
if first_page:
css = "div.stream div.tweet"
else:
css = "div.tweet"
tweets = page(css)
params = ""
tweet_id = ""
# Retrieve information for each tweet
for tweet in tweets:
tweetdom = pq(tweet)
content = cleanText(tweetdom("p.tweet-text").text())
tweet_author = cleanText(tweetdom("strong.fullname").eq(0).text())
tweet_pseudo = tweetdom("span.username").eq(0).text()
if tweet_pseudo == "":
continue
# If tweet is a retweet, its timestamp is modified in order for the program to continue
if tweet_pseudo.lower() != '@ ' + pseudo.lower():
timestamp = int(tweetdom("span._timestamp").attr("data-time"))
else:
timestamp = timestamp_old
# Retrieve page's last tweet id to create next page's url later
tweet_id = tweetdom.attr("data-item-id")
# Do not take into account pinned tweets
if tweetdom("span.js-pinned-text"):
print "Pinned tweet found. Continue."
continue
# Skip tweets until date_max, and then retrieve them until date_min
if timestamp == 0:
continue
if timestamp >= timestamp_max:
continue
if timestamp > 0 and timestamp <= timestamp_min:
return params
timestamp_old = timestamp
params += ((6 * '"%s",' + '"%d"\n') % (party_name, pseudo, tweet_author, tweet_pseudo, timestamp, content, int(time.time())))
if not has_more_items:
return params
# Create next page's url and open it
base = "https://twitter.com/i/profiles/show/"
parameters = "/timeline?include_available_features=1&include_entities=1&max_position="
url = base + pseudo + parameters + tweet_id
req = urllib2.urlopen(url)
# Read code in utf-8
obj = unicode(req.read(), "utf-8")
# Transform next page's json code (obtained using Live HTTP Headers) into html
obj = obj.decode("unicode_escape").replace('\\/', '/')
more_items = (obj.split("\"has_more_items\":")[1].split(',')[0] == "true")
obj = obj.split("\"items_html\":\"")[1][:-2]
if not obj.strip():
print "No tweets available"
return params
# Recall function with first page option set to "False"
params += retrieveTweets(party_name, pseudo, pq(obj), timestamp_min, timestamp_max, timestamp_old, False, more_items)
return params
# Convert date to timestamp
def strToTimestamp(date_min):
time_struct = time.strptime(date_min, "%d %b %Y")
timestamp = calendar.timegm(time_struct)
return(timestamp)
# Find last tweet retrieved's date
def findTimestampMax(pseudo):
try:
csv = codecs.open("datafiles/tweets_generales.csv", "r", "utf-8")
except:
print "File not found"
return 0
# Create a table containing timestampsOn crée un tableau contenant les timestamps (index 7) à chaque ligne du fichier csv en enlevant les guillemets. .strip() enlève les retours à la ligne
tab = [int(line.strip().split(",")[7][1:-1]) if line.strip().split(",")[1][3:-1] == pseudo else 0 for line in csv]
csv.close()
# Returns greater timestamp (hence the more recent)
return max(tab)
# Remove quotes and \n in data
def cleanText(text):
if not text:
return ""
text = re.sub('[\s,"]', " ", text)
return text
retrieveFromList("04 Dec 2015", "20 Dec 2015")
| {
"content_hash": "e0ddda0daec61d9139184a94d2501780",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 192,
"avg_line_length": 38.75568181818182,
"alnum_prop": 0.6091482187362557,
"repo_name": "florence-nocca/spanish-elections",
"id": "eeec1317c53b072c7df0686189f526ac20275af7",
"size": "6868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "retrieve-tweets/retrieve_party_tweets.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38619"
},
{
"name": "R",
"bytes": "12969"
}
],
"symlink_target": ""
} |
from MAT.PluginMgr import PluginTaskDescriptor, \
PluginError, FindPluginClass, PluginStep, TagStep, WholeZoneStep
from MAT.PluginDocInstaller import PluginDocInstaller
from ReplacementEngine import PIIReplacementEngine, DOC_CACHE_SCOPE, \
BATCH_CACHE_SCOPE, NO_CACHE_SCOPE
from ClearReplacementStrategy import ClearRenderingStrategy
from MAT import Error
from MAT.Workspace import WorkspaceOperation, WorkspaceError, WorkspaceFolder, \
CMDLINE_DEBUG_AVAILABLE, UI_AVAILABLE, NOT_AVAILABLE, CMDLINE_AVAILABLE, \
CoreWorkspaceFolder, Workspace, \
MATEngineExecutionWorkspaceOperationMixin
from MAT.WorkspaceDB import WorkspaceDB
from MAT.Operation import OpArgument
from MAT.Score import AggregatorScoreColumn, FileAggregateScoreRow, BaseScoreRow, Formula
import os
# Used way below, in augmentTagSummaryScoreTable etc.
class DocConfidence(Formula):
def __init__(self, header):
self.header = header
def render(self, scoreTable, separator = None):
return self.compute()
def compute(self):
# So I've passed in None below, just to trigger
# the capture of the file row.
fileRow, ignore = self.header
doc = fileRow.hypDoc
# Let's see if there's any confidence info here.
# This will work with Carafe, probably nothing else.
seqConfidences = doc.getAnnotations(["seq_confidence"])
if not seqConfidences:
return None
return reduce(lambda x, y: x * y, [float(a["posterior"]) for a in seqConfidences])
# I limit this to just completed documents, unless specified.
class RedactionOperation(WorkspaceOperation):
name = "redact"
argList = [OpArgument("replacer", help = "specify the replacer to use for this redaction (optional; obligatory if no replacer is specified in the task.xml file)",
hasArg = True),
OpArgument("retain_existing", help = "don't clear the redacted folders first"),
OpArgument("dont_limit_to_gold", help = "under normal circumstances, the redaction will apply only to gold and reconciled documents. If this flag is present, it applies to all documents.")]
def getAffectedFolders(self):
return ["redacted, rich", "redacted, raw"]
def getTargetFolderAndDocuments(self):
return "redacted, rich", self._getTargetDocuments("redacted, rich")
def do(self, replacer = None, retain_existing = False, dont_limit_to_gold = False):
# Clear the redacted folders. Run the engine.
operationSettings = self.getOperationSettings()
if operationSettings is None:
raise WorkspaceError, ("no operation settings in task '%s' for operation '%s'" % (self.folder.workspace.task.name, self.name))
operationSettings = operationSettings.copy()
# Now, we've got our settings. At least workflow and steps are defined.
try:
workflow = operationSettings["workflow"]
except KeyError:
raise WorkspaceError, ("workflow undefined in tag prep operation settings")
if replacer is not None:
operationSettings["replacer"] = replacer
elif not operationSettings.has_key("replacer"):
raise WorkspaceError, "no replacer specified in operation settings or command"
del operationSettings["workflow"]
rawFolder = self.folder.workspace.folders['redacted, raw']
richFolder = self.folder.workspace.folders['redacted, rich']
if not retain_existing:
rawFolder.clear()
richFolder.clear()
if not dont_limit_to_gold:
# Find the documents which are completed, and only use those.
self.affectedBasenames = [r[1] for r in self.folder.workspace.getDB().basenameInfo(self.affectedBasenames)
if r[2] in ("reconciled", "gold")]
allPaths = self.folder.getFiles(self.affectedBasenames)
try:
import MAT.ToolChain
e = MAT.ToolChain.MATEngine(workflow = workflow, task = self.folder.workspace.task.name)
# I'd forced this to be debug = True, back when I was passing debug all over the place.
# At this point, we're going to have to go with the less informative message.
dataPairs = e.Run(inputFileList = allPaths, input_file_type = "mat-json",
**operationSettings)
except Exception, e:
raise WorkspaceError, str(e)
# If this succeeds, I should write all the files to
# the appropriate folder.
for file, output in dataPairs:
richFolder.saveFile(output, os.path.basename(file))
rawFolder.saveFile(output, os.path.basename(file))
def webResult(self):
d = WorkspaceOperation.webResult(self)
# We want the document status to be something consistent.
# It doesn't really matter what the actual status is.
# The folder listing will reveal no
# basename info for the target folder, so I don't need to hack that too.
if d.has_key("status"):
del d["status"]
return d
# The only reason I need this is because I need to be able to review
# nominations in workspaces. Grrr.
# Because this requires a lock ID, etc., I'm replacing it with something very similar
# to autotag.
class NominationOperation(WorkspaceOperation, MATEngineExecutionWorkspaceOperationMixin):
name = "nominate"
argList = [OpArgument("replacer", help = "specify the replacer to use for this nomination (optional; obligatory if no replacer is specified in the task.xml file)",
hasArg = True),
OpArgument("dont_limit_to_gold", help = "under normal circumstances, the nomination will apply only to gold and reconciled documents. If this flag is present, it applies to all documents."),
OpArgument("lock_id", hasArg = True, help="lock ID (if document is locked)")]
def getAffectedFolders(self):
return ["nominated"]
def getTargetFolderAndDocuments(self):
# Cache the target documents, so I don't open them
# again when I lock in webResult().
self.targetDocuments = self._getTargetDocuments("nominated")
return "nominated", self.targetDocuments
def getAffectedFileBasenames(self):
if hasattr(self, "affectedFileBasename"):
return {self.affectedFileBasename: self.affectedBasenames[0]}
else:
return WorkspaceOperation.getAffectedFileBasenames(self)
def allPaths(self):
if not self.dont_limit_to_gold:
# Find the documents which are completed, and only use those.
if hasattr(self, "affectedFileBasename"):
paths = [os.path.join(self.folder.dir, p[0])
for p in self.folder.workspace.getDB().basenameInfo(self.affectedBasenames)
if p[0] == self.affectedFileBasename and p[2] in ("reconciled", "gold")]
else:
paths = [os.path.join(self.folder.dir, r[0])
for r in self.folder.workspace.getDB().basenameInfo(self.affectedBasenames)
if r[2] in ("reconciled", "gold")]
elif hasattr(self, "affectedFileBasename"):
paths = [os.path.join(self.folder.dir, self.affectedFileBasename)]
else:
paths = self.folder.getFiles(self.affectedBasenames)
return paths
# lock_id is only
# used from the UI. If the requested basenames have a lock that doesn't
# match the lock ID, you can't do anything.
# This lock_id is for the CORE. So if the lock is there, it's just used to
# determine the source file, and whether to lock the output. But
# we need to check the target locks the same way we do for autotag.
def do(self, checkPathsAffected = True, lock_id = None, replacer = None, dont_limit_to_gold = False):
self.replacer = replacer
self.dont_limit_to_gold = dont_limit_to_gold
db = self.folder.workspace.getDB()
# If there's a lock_id, there better be only one affected basename.
if lock_id and len(self.affectedBasenames) != 1:
raise WorkspaceError, "lock_id requires exactly one affected basename"
nominationLockInfo = db.nominationLockInfo()
if nominationLockInfo and (lock_id is None):
# In this situation, we can't proceed.
raise WorkspaceError, "can't nominate while documents are locked"
if lock_id:
# First, see if that file in the nomination folder is already locked.
idInfo = db.coreGetLockIDInfo(lock_id)
if [p for p in nominationLockInfo if p[0] == idInfo[0]]:
raise WorkspaceError, "can't nominate while documents are locked"
# Otherwise, make sure that the affected file basenames are just
# the one for the lock info.
self.affectedFileBasename = idInfo[0]
self.lockingUser = idInfo[2]
t = self.folder.workspace.beginTransaction(self)
self.transaction = t
try:
self._do(checkPathsAffected = checkPathsAffected)
t.commit()
except:
t.rollback()
raise
def _do(self, checkPathsAffected = True):
try:
MATEngineExecutionWorkspaceOperationMixin.do(self, checkPathsAffected = checkPathsAffected)
except:
raise
def getRunParameters(self, operationSettings):
replacer = self.replacer or operationSettings.get("replacer")
if replacer is None:
raise WorkspaceError, "no replacer specified in operation settings or command"
# In order to process the command lines really correctly, we
# pass the operationSettings to an XMLOpArgumentAggregator.
for key in ["input_file", "input_file_type", "output_file",
"output_dir", "input_file_re", "input_encoding",
"input_dir", "output_file_type", "output_encoding",
"output_fsuff"]:
if operationSettings.has_key(key):
raise WorkspaceError, ("workspace operation settings don't permit %s option to MATEngine", key)
return {"input_file_type": self.folder.fileType,
"input_encoding": "utf-8",
"replacer": replacer}
def wrapup(self, dataPairs):
nominationFolder = self.folder.workspace.folders['nominated']
db = self.folder.workspace.getDB()
# Next, we'd better check to make sure that we can write each file.
# If we can't, we want to raise an error. We should check each
# individual file, because we don't want ANYthing to happen
# if the writes can fail.
if not os.access(nominationFolder.dir, os.W_OK | os.X_OK):
raise WorkspaceError, "folder nominated not available for writing"
self.transaction.addFilesToAdd([os.path.join(nominationFolder.dir, os.path.basename(p))
for (p, iData) in dataPairs])
for p, iData in dataPairs:
fileBasename = os.path.basename(p)
if not os.access(os.path.join(nominationFolder.dir, p), os.W_OK):
raise WorkspaceError, ("file %s in folder nominated not available for writing" % fileBasename)
nominationFolder.saveFile(iData, fileBasename)
def webResult(self):
d = WorkspaceOperation.webResult(self)
if d.get("basename"):
nominationFolder = self.folder.workspace.folders['nominated']
basename, fileBasename, doc = self.targetDocuments[0]
ignore, fileBasename, lockId = self.folder.workspace._openFileBasename(nominationFolder, fileBasename,
self.lockingUser, False, doc = doc)
d["lock_id"] = lockId
# We want the document status to be something consistent.
# It doesn't really matter what the actual status is.
# The folder listing will reveal no
# basename info for the target folder, so I don't need to hack that too.
if d.has_key("status"):
del d["status"]
return d
class NominationReleaseLockOperation(WorkspaceOperation):
name = "release_lock"
availability = NOT_AVAILABLE
argList = [OpArgument("lock_id", hasArg = True, help="lock ID")]
def do(self, lock_id = None):
_in_transaction = self.transaction
if lock_id is None:
raise WorkspaceError, "Can't release a lock without an ID"
db = self.folder.workspace.getDB()
# I'm wrapping this because I don't know whether this
# operation is going to remain atomic.
if _in_transaction:
self._do(db, lock_id)
else:
t = self.folder.workspace.beginTransaction(self)
try:
self._do(db, lock_id)
t.commit()
except:
t.rollback()
raise
def _do(self, db, lock_id):
db.unlockNominationLock(lock_id)
class NominationForceUnlockOperation(WorkspaceOperation):
name = "force_unlock"
availability = CMDLINE_AVAILABLE
argList = [OpArgument("user", hasArg = True,
help = "the user who's locked the basename")]
def do(self, user = None):
if user is None:
raise WorkspaceError, "can't force unlock a basename without a user"
t = self.folder.workspace.beginTransaction(self)
try:
self._do(user)
t.commit()
except:
t.rollback()
raise
def _do(self, user):
db = self.folder.workspace.getDB()
unlocked = db.forceUnlockNominationBasenames(user, self.affectedBasenames)
if self.fromCmdline:
if unlocked:
print "Unlocked core documents:", " ".join(unlocked)
else:
print "Unlocked no documents."
class NominationSaveOperation(WorkspaceOperation):
name = "nominate_save"
availability = CMDLINE_DEBUG_AVAILABLE | UI_AVAILABLE
argList = [OpArgument("retain_existing", help = "don't clear the redacted folders first, if transform is set"),
OpArgument("doc", help = "a document to save, as a JSON string", hasArg = True),
OpArgument("transform", help = "transform after saving"),
OpArgument("lock_id", hasArg = True, help="lock ID (if document is locked)"),
OpArgument("release_lock", help="release the lock after save")]
def getAffectedFolders(self):
if hasattr(self, "doTransform") and self.doTransform:
return ["nominated", "redacted, rich", "redacted, raw"]
else:
return ["nominated"]
def getTargetFolderAndDocuments(self):
if hasattr(self, "doTransform") and self.doTransform:
return "redacted, rich", self._getTargetDocuments("redacted, rich")
else:
return "nominated", self._getTargetDocuments("nominated")
def getAffectedFileBasenames(self):
return {self.affectedFileBasename: self.affectedBasenames[0]}
def do(self, retain_existing = False, doc = None, transform = False, lock_id = None, release_lock = False):
self.doTransform = transform
if lock_id is None:
raise WorkspaceError, "can't save without lock ID"
# Now we get the basename. Must check to ensure that
# the lock ID matches. Need to get the file basename
# from the transaction.
db = self.folder.workspace.getDB()
fileBasename, basename, user = db.nominationGetLockIDInfo(lock_id)
if basename != self.affectedBasenames[0]:
raise WorkspaceError, ("wrong lock ID %s for basename %s" % (lock_id, self.affectedBasenames[0]))
self.affectedFileBasename = fileBasename
t = self.folder.workspace.beginTransaction(self, filesToPreserve = [os.path.join(self.folder.dir, fileBasename)])
try:
if doc is not None:
# It can be none, if it's not dirty.
# First, make it into a document. The document
# string is almost certainly not Unicode yet.
docObj = self.folder.docIO.readFromByteSequence(doc, 'utf-8')
# There better only be one basename.
self.folder.saveFile(docObj, fileBasename)
if release_lock:
if self.fromCmdline:
print "Releasing lock ID %s" % lock_id
o = self.folder.getOperation("release_lock",
basenames = [basename],
transaction = t)
o.do(lock_id = lock_id)
t.commit()
except:
t.rollback()
raise
if transform:
# Clear the redacted folders. Run the engine.
operationSettings = self.getOperationSettings()
if operationSettings is None:
raise WorkspaceError, ("no operation settings in task '%s' for operation '%s'" % \
(self.folder.workspace.task.name, self.name))
operationSettings = operationSettings.copy()
# Now, we've got our settings. At least workflow and steps are defined.
try:
workflow = operationSettings["workflow"]
except KeyError:
raise WorkspaceError, ("workflow undefined in tag prep operation settings")
del operationSettings["workflow"]
rawFolder = self.folder.workspace.folders['redacted, raw']
richFolder = self.folder.workspace.folders['redacted, rich']
if not retain_existing:
rawFolder.clear()
richFolder.clear()
allPaths = [os.path.join(self.folder.dir, fileBasename)]
try:
import MAT.ToolChain
e = MAT.ToolChain.MATEngine(workflow = workflow, task = self.folder.workspace.task.name)
dataPairs = e.Run(inputFileList = allPaths, input_file_type = "mat-json",
**operationSettings)
except Error.MATError, e:
raise WorkspaceError, e.prefix + ": " + e.errstr
# If this succeeds, I should write all the files to
# the appropriate folder.
for file, output in dataPairs:
richFolder.saveFile(output, os.path.basename(file))
rawFolder.saveFile(output, os.path.basename(file))
# And remove them from the nominated folder, I think.
self.folder.removeFile(fileBasename)
def webResult(self):
d = WorkspaceOperation.webResult(self)
# We want the document status to be something consistent.
# It doesn't really matter what the actual status is.
# The folder listing will reveal no
# basename info for the target folder, so I don't need to hack that too.
if d.has_key("status"):
del d["status"]
return d
class NominationFolder(CoreWorkspaceFolder):
def fileBasenameLocked(self, fileBasename):
return self.workspace.getDB().nominationDocumentLocked(fileBasename)
def updateOpenFileWebResultSeed(self, doc, basename, seed):
return
def prepareForEditing(self, doc, fileBasename, user, lockId):
db = self.workspace.getDB()
db.lockNominationDocument(lockId, fileBasename, user)
def listContents(self, basenames):
db = self.workspace.getDB()
bPairs = []
# For these basenames, see which files are actually present.
lockInfo = dict([(docName, lockedBy) for (docName, lockedBy, lockID) in db.nominationLockInfo()])
for docName, basename, status, assignedUser, lockedBy in db.basenameInfo(basenames):
# Ignore locking and status - this is just to get assignment info.
if os.path.exists(os.path.join(self.dir, docName)):
info = {"basename": basename}
if docName != basename:
info["doc name"] = docName
if assignedUser:
info["assigned to"] = assignedUser
lockedBy = lockInfo.get(docName)
if lockedBy:
info["locked by"] = lockedBy
bPairs.append(info)
return bPairs
def removeFile(self, fileBasename):
CoreWorkspaceFolder.removeFile(self, fileBasename)
self.workspace.getDB().unlockNominationDocument(fileBasename)
class RedactionFolder(CoreWorkspaceFolder):
def fileBasenameLocked(self, fileBasename):
return None
def updateOpenFileWebResultSeed(self, doc, basename, seed):
return
def prepareForEditing(self, doc, fileBasename, user, lockId):
raise WorkspaceError, "folder is not editable"
def listContents(self, basenames):
db = self.workspace.getDB()
bPairs = []
# For these basenames, see which files are actually present.
for docName, basename, status, assignedUser, lockedBy in db.basenameInfo(basenames):
# Ignore locking and status - this is just to get assignment info.
if os.path.exists(os.path.join(self.dir, docName)):
info = {"basename": basename}
if docName != basename:
info["doc name"] = docName
if assignedUser:
info["assigned to"] = assignedUser
bPairs.append(info)
return bPairs
class DeidentificationDB(WorkspaceDB):
def nominationDocumentLocked(self, docName):
lockedByResult = self._execute("SELECT locked_by FROM nomination_lock WHERE doc_name = ?",
params = [docName])
if not lockedByResult:
return None
else:
return lockedByResult[0][0]
def lockNominationDocument(self, lockId, docName, lockedBy):
# If there's already one, we overwrite the original lock.
if self._execute("SELECT locked_by FROM nomination_lock WHERE doc_name = ?",
params = [docName]):
self._execute("UPDATE nomination_lock SET lock_id = ?, locked_by = ? WHERE doc_name = ?",
params = [lockId, lockedBy, docName],
retrieval = False)
else:
self._execute("INSERT INTO nomination_lock VALUES (?, ?, ?)",
params = [docName, lockedBy, lockId],
retrieval = False)
def unlockNominationLock(self, lockId):
self._execute("DELETE FROM nomination_lock WHERE lock_id = ?",
params = [lockId],
retrieval = False)
def unlockNominationDocument(self, docName):
self._execute("DELETE FROM nomination_lock WHERE doc_name = ?",
params = [docName],
retrieval = False)
def nominationLockInfo(self):
return self._execute("SELECT doc_name, locked_by, lock_id FROM nomination_lock")
def nominationGetLockIDInfo(self, lockId):
v = self._execute("SELECT A.doc_name, B.basename, A.locked_by FROM nomination_lock A, document_info B WHERE A.lock_id = ? AND A.doc_name = B.doc_name",
params = [lockId])
if len(v) == 0:
return None, None, None
else:
return v[0]
# Another situation where we can't use substitution because I need "IN".
def forceUnlockNominationBasenames(self, user, basenames):
docLocksToDelete = [r[0] for r in self._executeWithParamDict("SELECT B.doc_name FROM document_info A, nomination_lock B WHERE A.doc_name = B.doc_name AND B.locked_by = $(user) AND A.basename IN ($(basenames))", {"user": user, "basenames": basenames})]
if docLocksToDelete:
self._executeWithParamDict("DELETE FROM nomination_lock WHERE doc_name IN ($(docLocksToDelete))", {"docLocksToDelete": docLocksToDelete}, retrieval = False)
return docLocksToDelete
class DeidTaskDescriptor(PluginTaskDescriptor):
categories = {}
REDACTION_ATTR = "redacted"
SEED_UNPARSEABLE_ATTR = "seed_unparseable"
def __init__(self, *args, **kw):
PluginTaskDescriptor.__init__(self, *args, **kw)
self.localReplacers = {}
self._rdirCache = None
self._replacerCache = None
self._instantiatedReplacerCache = {}
def fromXML(self, *args):
PluginTaskDescriptor.fromXML(self, *args)
# At this point, we want to pull out the redaction settings.
# Now, we look for all the settings which end in
# _replacers, which have a corresponding setting
# which ends in _replacers_workflows.
import re
replPat = re.compile("^(.*)_replacers$")
replWFPat = re.compile("^(.*)_replacers_workflows$")
replKeyPairs = {}
for key in self.settings.keys():
m = replPat.match(key)
if m is not None:
try:
replKeyPairs[m.group(1)][0] = key
except KeyError:
replKeyPairs[m.group(1)] = [key, None]
else:
m = replWFPat.match(key)
if m is not None:
try:
replKeyPairs[m.group(1)][1] = key
except KeyError:
replKeyPairs[m.group(1)] = [None, key]
# Now, we've gone through all the keys.
# I need two types of mappings. First, I need to be able
# to find a replacer of a particular name. Second, I need
# to be able to see if a replacer supports a workflow.
# Third, I need to report, for various workflows, what
# replacers are available. The last is least important.
# This all needs to happen by the rname in the replacer.
self.localReplacers = {}
for family, [repls, replWFs] in replKeyPairs.items():
if (repls is not None) and (replWFs is not None):
replWFs = self.settings[replWFs].split(",")
repls = self.settings[repls].split(",")
# Now, we have all the workflow names and the replacer names.
for rName in repls:
try:
r = FindPluginClass(rName, self.name)
if not issubclass(r, PIIReplacementEngine):
raise PluginError, ("replacer class %s is not a subclass of PIIReplacementEngine" % rName)
if self.localReplacers.has_key(r.__rname__):
entry = self.localReplacers[r.__rname__][1]
for wf in replWFs:
if wf not in entry:
entry.append(wf)
else:
self.localReplacers[r.__rname__] = [r, replWFs[:]]
except NameError:
raise PluginError, ("unknown replacer %s" % rName)
# Now, anyone who gets the replacers, gets a mapping from workflows
# to replacer names.
def findReplacer(self, rName):
try:
return self.localReplacers[rName]
except KeyError:
return None
def allReplacers(self):
return self.localReplacers.keys()
def getReplacerRDirs(self):
if self._rdirCache is not None:
return self._rdirCache
else:
# Return all resource directories up to the root.
if self.parentObj and hasattr(self.parentObj, "getReplacerRDirs"):
seed = self.parentObj.getReplacerRDirs()[:]
else:
seed = []
if self.resourceDir not in seed:
seed[0:0] = [self.resourceDir]
self._rdirCache = seed
return seed
# Fetch the CGI task metadata. Only called on leaves.
def getCGIWorkflowMetadata(self, wfObj):
params = PluginTaskDescriptor.getCGIWorkflowMetadata(self, wfObj)
workFlow = wfObj.name
# Return the replacers.
params["uiSettings"]["replacers"] = [key for (key, rPair) in self.localReplacers.items() if workFlow in rPair[1]]
return params
def enhanceCGIMetadata(self, metadata):
PluginTaskDescriptor.enhanceCGIMetadata(self, metadata)
# What I need to do here is get the replacers for the
# workspace.
try:
redactionWorkflow = self.getWorkspaceOperations()["redact"]["workflow"]
metadata["workspaceReplacers"] = [key for (key, rPair) in self.localReplacers.items() if redactionWorkflow in rPair[1]]
except KeyError:
metadata["workspaceReplacers"] = []
def getCmdlineTaskMetadata(self):
# How should we format this? We need to find all the possible sets.
wfSets = {}
for key, rPair in self.localReplacers.items():
wfSet = rPair[1][:]
wfSet.sort()
wfTuple = tuple(wfSet)
try:
wfSets[wfTuple].append(key)
except KeyError:
wfSets[wfTuple] = [key]
return [" replacers : " + ", ".join([", ".join(vals) + " (" + ", ".join(key) + ")" for key, vals in wfSets.items()])]
# Workspace customization. Add the redact action to the
# completed folder. Add the redacted, rich and redacted, raw folders.
# Redaction has default settings in the
def workspaceCustomize(self, workspace, create = False):
workspace.addFolder('redacted, rich', "redacted_rich", create = create,
folderClass = RedactionFolder,
description = "rich versions of redacted documents",
importTarget = False)
workspace.addFolder('redacted, raw', "redacted_raw", create = create,
folderClass = RedactionFolder,
description = "raw versions of redacted documents",
importTarget = False)
from MAT.DocumentIO import getDocumentIO
workspace.folders["redacted, raw"].docIO = getDocumentIO("raw", encoding = "utf-8")
workspace.folders['core'].addOperation("redact", RedactionOperation)
# I have to make sure that this folder gets created if it's not already
# there, because some of the folks who are using this code have already
# made workspaces.
f = NominationFolder(workspace, 'nominated',
description = "completed documents with nominated replacements",
importTarget = False)
workspace.folders['nominated'] = f
if not os.path.isdir(f.dir):
f.create()
workspace.folders['core'].addOperation("nominate", NominationOperation)
workspace.folders['nominated'].addOperation("nominate_save", NominationSaveOperation)
workspace.folders["nominated"].addOperation("release_lock", NominationReleaseLockOperation)
workspace.folders["nominated"].addOperation("force_unlock", NominationForceUnlockOperation)
workspace.getDB = lambda: self._getEnhancedWorkspaceDB(workspace)
def _getEnhancedWorkspaceDB(self, ws):
db = Workspace.getDB(ws)
db.run_script(os.path.join(os.path.dirname(os.path.abspath(__file__)), "deid_ws.sql"))
db.__class__ = DeidentificationDB
return db
def workspaceUpdate1To2(self, workspace, oldWorkspaceDir, basenames, initialUser):
import shutil
# Just copy them over. The folders will already have been created.
redactedRichBasenames = list(set(os.listdir(os.path.join(oldWorkspaceDir, "folders", "redacted_rich"))) & basenames)
print "Copying basenames from 'redacted, rich':", " ".join(redactedRichBasenames)
for b in redactedRichBasenames:
shutil.copy(os.path.join(oldWorkspaceDir, "folders", "redacted_rich", b),
os.path.join(workspace.folders["redacted, rich"].dir, b))
redactedRawBasenames = list(set(os.listdir(os.path.join(oldWorkspaceDir, "folders", "redacted_raw"))) & basenames)
print "Copying basenames from 'redacted, raw':", " ".join(redactedRawBasenames)
for b in redactedRawBasenames:
shutil.copy(os.path.join(oldWorkspaceDir, "folders", "redacted_raw", b),
os.path.join(workspace.folders["redacted, raw"].dir, b))
nominatedBasenames = list(set(os.listdir(os.path.join(oldWorkspaceDir, "folders", "nominated"))) & basenames)
print "Copying basenames from 'nominated': ", " ".join(nominatedBasenames)
for b in nominatedBasenames:
shutil.copy(os.path.join(oldWorkspaceDir, "folders", "nominated", b),
os.path.join(workspace.folders["nominated"].dir, b))
# Local operations.
def replaceableAnnotations(self):
return self.getAnnotationTypesByCategory("content")
def instantiateReplacer(self, rName, **kw):
if self._instantiatedReplacerCache.has_key(rName):
return self._instantiatedReplacerCache[rName]
else:
rPair = self.findReplacer(rName)
if rPair is not None:
r = rPair[0]
c = r(self.getReplacerRDirs(), self.categories, **kw)
self._instantiatedReplacerCache[rName] = c
return c
return None
# Here, I'm going to try to add a column which reflects the
# document-level probabilities.
def augmentTagSummaryScoreTable(self, tbl):
c = AggregatorScoreColumn("doc_confidence",
rowDispatch = [(FileAggregateScoreRow, DocConfidence, None),
(BaseScoreRow, None)])
tbl.addColumn(c, after = "accum")
tbl.aggregates.append(c)
return tbl
def augmentTokenSummaryScoreTable(self, tbl):
c = AggregatorScoreColumn("doc_confidence",
rowDispatch = [(FileAggregateScoreRow, DocConfidence, None),
(BaseScoreRow, None)])
tbl.addColumn(c, after = "accum")
tbl.aggregates.append(c)
return tbl
def augmentDetailScoreTable(self, tbl):
return tbl
#
# Here are the deidentification steps
#
class NominateStep(PluginStep):
argList = [OpArgument("replacer", help = "specify the replacer to use. Obligatory if more than one replacer is available. See above for available replacers.", hasArg = True),
OpArgument("cache_scope", help = "specify the cache scope for particular tags. Argument is a semicolon-delimited sequence of <tag>,doc|batch|none, e.g. 'PERSON,batch;LOCATION;doc'. Default scope is document scope.", hasArg = True),
OpArgument("cache_case_sensitivity", help = "specify which tags have case-sensitive caches. Argument is a semicolon-delimited sequence of tags, e.g., 'PERSON;LOCATION'.", hasArg = True),
OpArgument("resource_file_repl", help="specify a replacement for one of the resource files used by the replacement engine. Argument is a semicolon-delimited sequence of <file>=<repl>. See the ReplacementEngine.py for details.", hasArg = True),
OpArgument("replacement_map_file", help="Specify a replacement map file to provide some detailed control over clear -> clear replacements. See documentation for details.", hasArg = True),
OpArgument("replacement_map", help="Specify a replacement map to provide some detailed control over clear -> clear replacements. See documentation for details.", hasArg = True),
OpArgument("dont_nominate", help = "A comma-separated list of labels for which nominations should not be proposed", hasArg = True),
OpArgument("flag_unparseable_seeds", hasArg = True,
help = "A comma-separated list of labels whose annotations should be flagged in clear -> clear replacement when the phrase in the original document could not be parsed appropriately (and thus whose replacements might not have the appropriate fidelity). Currently, only dates, URLs, phone numbers, and can be flagged in this way.")]
def paramsSatisfactory(self, wfName, failureReasons, replacer = None, **params):
if replacer is None:
allReplacers = self.descriptor.allReplacers()
if len(allReplacers) == 1:
replacer = allReplacers[0]
if replacer is None:
raise PluginError, "no replacer specified"
# Filter the task implementation based on the replacer.
# If the named replacer isn't one of the replacers
# in the task, we bail.
rPair = self.descriptor.findReplacer(replacer)
if rPair is None:
failureReasons.append("task '%s' does not know about the replacer '%s'" % (self.descriptor.name, replacer))
return False
elif wfName not in rPair[1]:
failureReasons.append("workflow '%s' in task '%s' does not support the replacer '%s'" % (wfName, self.descriptor.name, replacer))
return False
else:
return True
# This drives the replacers.
def doBatch(self, iDataPairs, replacer = None, dont_nominate = None, flag_unparseable_seeds = None, **kw):
# This needs to be a batch step, so that we can get corpus-level
# weights to work.
# Don't bother catching the errors; we'll deal with them
# in the engine.
if replacer is None:
# Checked in paramsSatisfactory().
replacer = self.descriptor.allReplacers()[0]
r = self.descriptor.instantiateReplacer(replacer, **kw)
if not r:
raise Error.MATError("nominate", "couldn't find the replacer named " + replacer)
if dont_nominate is not None:
dontNominate = set([x.strip() for x in dont_nominate.split(",")])
else:
dontNominate = set()
if flag_unparseable_seeds is not None:
flagUnparseableSeeds = set([x.strip() for x in flag_unparseable_seeds.split(",")])
else:
flagUnparseableSeeds = set()
# print "FLAGGING", flagUnparseableSeeds
# This should only happen with spanned annotations, but we
# have to make absolutely sure. See below.
replaceableAnnots = set(self.descriptor.replaceableAnnotations()) - dontNominate
# Two phases: first we digest, then we replace.
# Note that what we need for the replacement is the
# effective label, as defined by the task.
nomMapping = {}
# Apparently, you may have the same file more than once. This
# is a bug in the bug queue, and the only instance of doBatch in the
# system where that problem might arise is this one. So let's fix it.
for f, annotSet in iDataPairs:
annotSet.metadata["replacer_used"] = replacer
# First, generate all the nominations.
digestionDict = {}
annList = []
for eName in replaceableAnnots:
try:
eType = annotSet.anameDict[eName]
except KeyError:
# There may not be any.
continue
# If it's spanless, skip it.
if not eType.hasSpan:
continue
annList = annList + annotSet.atypeDict[eType]
# Sort them in order.
annList.sort(key = lambda ann: ann.start)
# Digest.
for annot in annList:
lab = self.descriptor.getEffectiveAnnotationLabel(annot)
digestionDict[annot] = (lab, r.Digest(lab, annotSet.signal[annot.start:annot.end]))
r.EndDocumentForDigestion()
if hasattr(r, "dateDelta"):
# This is an integer.
annotSet.metadata["dateDelta"] = r.dateDelta
nomMapping[(f, annotSet)] = (annList, digestionDict)
# Replace.
for f, annotSet in iDataPairs:
annList, digestionDict = nomMapping[(f, annotSet)]
for annot in annList:
lab, digestions = digestionDict[annot]
repl = r.Replace(lab, digestions, filename = f)
annot[self.descriptor.REDACTION_ATTR] = repl
# ONLY if we're in clear -> clear. Otherwise, it doesn't matter
# that the seed is unparseable. Either it's not expected to be,
# or the target doesn't care.
if (replacer == "clear -> clear") and (lab in flagUnparseableSeeds) and \
hasattr(digestions, "seed_unparseable") and digestions.seed_unparseable:
import sys
print >> sys.stderr, "WARNING: the '%s' phrase '%s' from %d to %d could not be parsed for nomination, and its nomination must be reviewed before the transform step can apply" % (annot.atype.lab, annotSet.signal[annot.start:annot.end], annot.start, annot.end)
annot[self.descriptor.SEED_UNPARSEABLE_ATTR] = digestions.__ctype__
r.EndDocumentForReplacement()
return iDataPairs
def undo(self, annotSet, **kw):
try:
del annotSet.metadata["replacer_used"]
except KeyError:
pass
for tag in self.descriptor.getAnnotationTypesByCategory("content"):
try:
atype = annotSet.anameDict[tag]
if not atype.attr_table.has_key(self.descriptor.REDACTION_ATTR):
continue
# We can't remove the attribute from the
# annotation TYPE, because those are global.
# Once the attribute is defined, it's always
# defined. However, we can remove it most efficiently
# from the ANNOTATION by seeing how many attributes
# the annotation has (remember, a shorter list
# is equal to nulls everywhere). If the annotation
# list is no longer than the index of the
# redacted attribute, then we can just truncate
# the list of attrs. This should probably
# be a delitem on the annotation. Well, no;
# you can set an attribute to null, but you
# can't actually delete it once it's set.
i = atype.attr_table[self.descriptor.REDACTION_ATTR]
for annot in annotSet.atypeDict[atype]:
if len(annot.attrs) > i:
# There's something at that index.
annot.attrs[i] = None
i = atype.attr_table.get(self.descriptor.SEED_UNPARSEABLE_ATTR)
if i is not None:
for annot in annotSet.atypeDict[atype]:
if len(annot.attrs) > i:
annot.attrs[i] = None
except KeyError:
pass
def isDone(self, annotSet):
for annot in annotSet.getAnnotations(self.descriptor.getAnnotationTypesByCategory("content")):
try:
if annot[self.descriptor.REDACTION_ATTR] is not None:
return True
except KeyError:
pass
return False
from MAT.Document import OverlapError, AnnotatedDoc
import sys
class TransformStep(PluginStep):
argList = [OpArgument("prologue", help = "Specify the text of a prologue to insert into the transformed document. You may wish to do this, e.g., to assert that all names in the document are fake. This option takes preference over --prologue_file.", hasArg = True),
OpArgument("prologue_file", help = "Specify a file which contains the text of a prologue to insert into the transformed document. You may wish to do this, e.g., to assert that all names in the document are fake. The file is assumed to be in UTF-8 encoding. --prologue takes preference over this option.", hasArg = True),
OpArgument("dont_transform", help = "A comma-separated list of labels that should not be transformed", hasArg = True)]
def __init__(self, *args, **kw):
PluginStep.__init__(self, *args, **kw)
# We need to know which
# step to use to prep the final document after the tags
# have been located. The prepping differs
# depending on whether the redaction is to clear or not.
# If it's to clear, find the "zone" task in the demo workflow;
# otherwise, find the zone task in the resynth workflow.
# We don't want to have to find the replacer in the
# invocation of do(). In particular, we should expect that the
# replacer be in the document itself. But that means that we'd
# need to figure out, on a document-by-document basis,
# which prep function to use. So let's cache them in advance.
# Well, we can't, actually, because looking for a step
# in the context of when the steps are created gives you
# infinite recursion. So we need to create them later.
self._postTransformStepsFound = False
self.clearZoneStep = None
self.resynthZoneStep = None
def _ensurePostTransformSteps(self):
if not self._postTransformStepsFound:
self._postTransformStepsFound = True
self.clearZoneStep = self.descriptor.getStep("Demo", "zone")
try:
self.resynthZoneStep = self.descriptor.getStep("Resynthesize", "zone")
except PluginError:
pass
#
# Core deidentification engine. Transform step is general.
#
def augmentClearZones(self, iDataPairs):
self.clearZoneStep.doBatch(iDataPairs)
# And, once it's tokenized, I have to make sure that (believe it
# or not) no tags mismatch the annotation boundaries. If they do,
# I need to expand the annotation boundaries to match the nearest
# token. This is a messy computation, but it turns out I need
# it in the core, anyway.
for fname, annotSet in iDataPairs:
for seg in annotSet.getAnnotations(["SEGMENT"]):
seg["annotator"] = "unknown human"
seg["status"] = "human gold"
annotSet.adjustTagsToTokens(self.descriptor)
def augmentRedactedZones(self, iDataPairs):
# There may not be a zone step. But in any case, what we
# want to do is go back through the annotations and adjust
# the boundaries until there's no leading or trailing whitespace.
if self.resynthZoneStep:
resynthZoneStep.doBatch(iDataPairs)
for fname, annotSet in iDataPairs:
annotSet.avoidWhitespaceInTags(self.descriptor)
# The problem with doing this file by file is that you have to call the
# tokenizer every damn time when you align. What I really want to do is
# do it in batch, and within the batch process, do the individual file
# replacements.
def doBatch(self, iDataPairs, replacer = None, prologue = None, prologue_file = None, dont_transform = None, **kw):
if (prologue is None) and (prologue_file is not None):
if not os.path.isabs(prologue_file):
prologue_file = os.path.join(self.descriptor.taskRoot, prologue_file)
import codecs
fp = codecs.open(prologue_file, "r", "utf-8")
prologue = fp.read()
fp.close()
elif type(prologue) is str:
prologue = prologue.decode('ascii')
if dont_transform is not None:
dontTransform = set([x.strip() for x in dont_transform.split(",")])
else:
dontTransform = set()
# Someone might decide to call do() on this object. Let's see if we can
# figure out what replacer was used.
replacersUsed = set([annotSet.metadata.get("replacer_used") for fname, annotSet in iDataPairs])
replacersUsed.discard(None)
if len(replacersUsed) > 1:
raise Error.MATError("transform", "multiple replacers specified in transform set")
if replacer is None:
if len(replacersUsed) == 0:
raise Error.MATError("transform", "no replacer specified")
else:
replacer = list(replacersUsed)[0]
r = self.descriptor.instantiateReplacer(replacer, **kw)
if not r:
raise Error.MATError("transform", "couldn't find the replacer named " + replacer)
if isinstance(r.renderingStrategy, ClearRenderingStrategy):
clearTarget = True
else:
clearTarget = False
self._ensurePostTransformSteps()
# From these, we remove the ones which don't have any redaction attributes
# specified (they may have been filtered out by dont_nominate), and the ones which
# shouldn't be transformed.
# Actually, it's a bit more complicated than that. We don't want to LOSE
# content annotations which aren't replaceable. So what we want to do
# is build up a map of replacements for all content annotations, and
# then, for the subset of annotations which are transformable and
# have a replacement, use that replacement.
annotNames = self.descriptor.getAnnotationTypesByCategory("content")
outPairs = []
for fname, annotSet in iDataPairs:
try:
newSet = self._transformAnnotSet(r, annotSet, annotNames, dontTransform, prologue)
outPairs.append((fname, newSet))
except OverlapError:
sys.stderr.write("Can't transform document %s because there's an overlap\n" % fname)
return []
if clearTarget:
self.augmentClearZones(outPairs)
else:
self.augmentRedactedZones(outPairs)
# Finally, mark the document as zoned and tagged.
for fname, d in outPairs:
d.setStepsDone(["zone", "tag"])
return outPairs
def _transformAnnotSet(self, engine, annotSet, annotNames, dontTransform, prologue):
# Seed it with mapings into the original signal.
replacerMap = {}
replaceableAnnotNames = set([a for a in self.descriptor.replaceableAnnotations()
if (a not in dontTransform) and \
(annotSet.findAnnotationType(a).attr_table.has_key(self.descriptor.REDACTION_ATTR))])
# We have to change the regions in the signal so that
# they're substituted. We order them because we need to
# go through them in order to handle the substitutions cleanly.
# Note that orderAnnotations will filter out the spanless types.
# This might generate an overlap error; see caller.
try:
annots = annotSet.orderAnnotations(annotNames)
except OverlapError:
sys.stderr.write("Can't transform document because there's an overlap\n")
return None
atypeIndexDict = {}
for aname in replaceableAnnotNames:
try:
t = annotSet.anameDict[aname]
except KeyError:
# There may not be any.
continue
atypeIndexDict[t] = t.ensureAttribute(self.descriptor.REDACTION_ATTR)
# Update the replacer map.
replacerMap[t] = lambda x: x[atypeIndexDict[x.atype]]
# Build a new doc.
d = AnnotatedDoc(globalTypeRepository = annotSet.atypeRepository.globalTypeRepository)
# Copy the metadata, because the interface will need it.
d.metadata = annotSet.metadata.copy()
d.metadata["phasesDone"] = []
signal = annotSet.signal
unparseableAttr = self.descriptor.SEED_UNPARSEABLE_ATTR
# Originally, I was going to have the
# untransformed ones as no annotations at all, but
# really, I should have an annotation, since I may
# need to compare them later.
replacementTuples = []
preservationTuples = []
for a in annots:
if a.get(unparseableAttr) is not None:
raise PluginError, ("The '%s' phrase '%s' from %d to %d could not be parsed for nomination, and its nomination must be reviewed before the transform step can apply" % (a.atype.lab, signal[a.start:a.end], a.start, a.end))
if a.atype.lab in replaceableAnnotNames:
replacementTuples.append((a.atype.lab, a.start, a.end, replacerMap[a.atype](a)))
else:
preservationTuples.append((a.atype.lab, a.start, a.end))
output, finalTuples = engine.Transform(signal, prologue, replacementTuples, preservationTuples)
for lab, start, end in finalTuples:
# Poo. The type is going to have the "redacted" attribute,
# which may hose me at some point.
newT = d.findAnnotationType(lab)
d.createAnnotation(start, end, newT)
d.signal = output
return d
# This won't be recorded as a step done, but if it were, you can't
# undo it anyway.
def do(self, annotSet, **kw):
iDataPairs = self.doBatch([("<file>", annotSet)], **kw)
if iDataPairs:
return iDataPairs[0][1]
else:
return None
def undo(self, annotSet, **kw):
pass
class ResynthZoneStep(PluginStep):
def do(self, annotSet, **kw):
return annotSet
def undo(self, annotSet, **kw):
pass
class MultiZoneStepForUndo(PluginStep):
# This had better never be called.
def do(self, annotSet, **kw):
return annotSet
def undo(self, annotSet, **kw):
self.removeAnnotationsByCategory(annotSet, "token", "zone")
def isDone(self, annotSet):
return False
class ResynthTagStep(TagStep):
def __init__(self, *args, **kw):
if (kw.has_key("by_hand") and kw["by_hand"]):
raise PluginError, "by_hand attribute applies only to a real tagging step"
TagStep.__init__(self, *args, **kw)
# This isn't really a tag step.
del self.initSettings["tag_step"]
def paramsSatisfactory(self, wfName, failureReasons, replacer = None, **params):
if replacer is None:
allReplacers = self.descriptor.allReplacers()
if len(allReplacers) == 1:
replacer = allReplacers[0]
if replacer is None:
raise PluginError, "no replacer specified"
# Filter the task implementation based on the replacer.
# If the named replacer isn't one of the replacers
# in the task, we bail.
rPair = self.descriptor.findReplacer(replacer)
if rPair is None:
failureReasons.append("task '%s' does not know about the replacer '%s'" % (self.descriptor.name, replacer))
return False
elif wfName not in rPair[1]:
failureReasons.append("workflow '%s' in task '%s' does not support the replacer '%s'" % (wfName, self.descriptor.name, replacer))
return False
else:
return True
def do(self, annotSet, replacer = None, **kw):
# Ask the current replacer to find all the matches.
if replacer is None:
# Checked in paramsSatisfactory().
replacer = self.descriptor.allReplacers()[0]
try:
r = self.descriptor.instantiateReplacer(replacer, **kw)
if not r:
raise Error.MATError("tag", "couldn't find the replacer named " + replacer)
# Two phases: first we digest, then we replace.
tuples = r.FindReplacedElements(annotSet.signal)
for start, end, tname in tuples:
atype = annotSet.findAnnotationType(tname)
annotSet.createAnnotation(start, end, tname)
return annotSet
except Exception, e:
raise Error.MATError("tag", str(e), show_tb = True)
# Undocumented utility for expanding the documentation in-line.
class DocEnhancer(PluginDocInstaller):
def process(self):
#
# BEGIN APP-SPECIFIC MODIFICATIONS
#
# In this section, you should modify the value of INDEX_CONTENTS,
# and populate the HTML target directory appropriately.
# The deidentification bundle consists of three things: the deidentification
# summary and the general extensions, which are only provided by the core, and
# the site modifications, which are only provided by the sites. Ideally,
# these should be loadable in any order. So let's say that we expect to insert,
# under the section marker
# <div class="invisible" id="appcustomizations"></div>
# something that looks like
# <div id="deidcustomizations">
# <ul class="secthead"><li>Deidentification customizations</li><ul>
# <ul><li>General
# <li><...site link...>
# </ul>
# </div>
self.ensureDEID()
# Now, since this is the core, we insert the general link
# at the beginning of the deidcustomization list, and we
# insert the introduction at the appropriate place.
self.addListElement(self.getElementById("deidcustomizationlist"),
"General", href = "doc/general.html", first = True)
self.addAppOverviewEntry("doc/intro.html", "MIST: The MITRE Identification Scrubber Toolkit")
def ensureDEID(self):
# So if there isn't any div yet, insert the infrastructure. Then, add the local
# link at the end, if this is the local one, and if it's the core, add the
# core link.
DEID_INSERT = """
<div id="deidcustomizations">
<ul class="secthead"><li>Deidentification customizations</li></ul>
<ul id="deidcustomizationlist"></ul>
</div>
"""
# Everybody makes sure that the deidcustomization node is present.
custNode = self.getElementById("deidcustomizations")
if custNode is None:
self.addAppCustomizationList(DEID_INSERT)
def addSubtaskDetail(self, url, listEntry):
self.ensureDEID()
# Now, since this is one of the sites, we insert the site link
# at the end of the deidcustomization list.
self.addListElement(self.getElementById("deidcustomizationlist"),
listEntry, href = url)
| {
"content_hash": "5fc5e33186e96da331192f7411b7afcb",
"timestamp": "",
"source": "github",
"line_count": 1347,
"max_line_length": 357,
"avg_line_length": 44.18634001484781,
"alnum_prop": 0.6065626102589089,
"repo_name": "VHAINNOVATIONS/DmD",
"id": "c1b4afe06b10a1f4f73064ab188d6ae80a75b6fe",
"size": "59622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrubber/MIST_2_0_4/src/tasks/core/python/Deidentification.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "258262"
},
{
"name": "HTML",
"bytes": "3057541"
},
{
"name": "Java",
"bytes": "363296"
},
{
"name": "JavaScript",
"bytes": "8682388"
},
{
"name": "Perl",
"bytes": "294110"
},
{
"name": "Perl6",
"bytes": "14166"
},
{
"name": "Prolog",
"bytes": "782419"
},
{
"name": "Python",
"bytes": "3569206"
},
{
"name": "Shell",
"bytes": "6422"
},
{
"name": "XS",
"bytes": "120883"
}
],
"symlink_target": ""
} |
import logging
import requests
import unittest
from unittest.mock import MagicMock, patch, PropertyMock
from pyhik.hikvision import HikCamera
from pyhik.constants import (CONNECT_TIMEOUT)
XML = """<MotionDetection xmlns="http://www.hikvision.com/ver20/XMLSchema" version="2.0">
<enabled>{}</enabled>
<enableHighlight>true</enableHighlight>
<samplingInterval>2</samplingInterval>
<startTriggerTime>500</startTriggerTime>
<endTriggerTime>500</endTriggerTime>
<regionType>grid</regionType>
<Grid>
<rowGranularity>18</rowGranularity>
<columnGranularity>22</columnGranularity>
</Grid>
<MotionDetectionLayout version="2.0">
<sensitivityLevel>20</sensitivityLevel>
<layout>
<gridMap>000000000000000000000000000000000c007e0c007ffffc</gridMap>
</layout>
</MotionDetectionLayout>
</MotionDetection>"""
@patch("pyhik.hikvision.requests.Session")
class HikvisionTestCase(unittest.TestCase):
@staticmethod
def set_motion_detection_state(get, value):
get.reset_mock()
mock = get.return_value
mock.reset_mock()
type(mock).ok = PropertyMock(return_value=True)
type(mock).status_code = PropertyMock(return_value=requests.codes.ok)
type(mock).text = PropertyMock(
return_value=XML.format("true" if value else "false")
)
return get
@patch("pyhik.hikvision.HikCamera.get_device_info")
@patch("pyhik.hikvision.HikCamera.get_event_triggers")
def test_motion_detection(self, *args):
session = args[-1].return_value
get = session.get
url = "localhost:80/ISAPI/System/Video/inputs/channels/1/motionDetection"
# Motion detection disabled
self.set_motion_detection_state(get, False)
device = HikCamera(host="localhost")
get.assert_called_once_with(url, timeout=CONNECT_TIMEOUT)
self.assertIsNotNone(device)
self.assertFalse(device.current_motion_detection_state)
# Motion detection enabled
self.set_motion_detection_state(get, True)
device = HikCamera(host="localhost")
self.assertIsNotNone(device)
self.assertTrue(device.current_motion_detection_state)
# Enable calls put with the expected data
self.set_motion_detection_state(get, True)
session.put.return_value = MagicMock(status_code=requests.codes.ok, ok=True)
device.enable_motion_detection()
session.put.assert_called_once_with(url, data=XML.format("true").encode(), timeout=CONNECT_TIMEOUT)
# Disable
def change_get_response(url, data,timeout):
self.set_motion_detection_state(get, False)
return MagicMock(ok=True, status_code=requests.codes.ok)
self.set_motion_detection_state(get, True)
session.put = MagicMock(side_effect=change_get_response)
device = HikCamera(host="localhost")
self.assertTrue(device.current_motion_detection_state)
device.disable_motion_detection()
self.assertFalse(device.current_motion_detection_state)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "358d4af3878a64a064c54e2161a5516d",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 107,
"avg_line_length": 37.464285714285715,
"alnum_prop": 0.6835081029551954,
"repo_name": "mezz64/pyHik",
"id": "21d4687b07961a50ceb170d54a3a559f78f7a9fb",
"size": "3171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_hikvision.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33500"
}
],
"symlink_target": ""
} |
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import translation
from openbudgets.apps.transport.incoming.importers.initial import InitImporter
class Command(BaseCommand):
help = 'Loads initial data for the instance from formatted CSV files.'
# Also need for the language settings issue described below.
can_import_settings = False
def __init__(self):
# Make django respect our language settings in management commands.
# We need to enforce this for modeltranslation to work as expected, and
# work around a hardcoded value for this in Django itself.
# https://groups.google.com/forum/?fromgroups#!topic/django-modeltranslation/JBgEBfWZZ9A
translation.activate(settings.MODELTRANSLATION_DEFAULT_LANGUAGE)
super(Command, self).__init__()
def handle(self, *args, **options):
self.stdout.write('Loading initial data from CSV sources.')
fixtures = os.listdir(settings.FIXTURE_DIRS[0])
csvs = sorted([filename for filename in fixtures if filename.endswith('.csv')])
for csv in csvs:
self.stdout.write('Writing data from ' + csv + ' ...')
f = settings.FIXTURE_DIRS[0] + '/' + csv
importer = InitImporter(f)
importer.save()
self.stdout.write("Data from CSV sources loaded. We are ready to rock.")
| {
"content_hash": "7ccf010683400b97264e6e50d958d3ea",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 96,
"avg_line_length": 39.55555555555556,
"alnum_prop": 0.6867977528089888,
"repo_name": "shaib/openbudgets",
"id": "615f10dad115c68f123031565541a7408c977fc1",
"size": "1424",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "openbudgets/commons/management/commands/loadcsv.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "355833"
},
{
"name": "JavaScript",
"bytes": "1185878"
},
{
"name": "Python",
"bytes": "487714"
}
],
"symlink_target": ""
} |
"""Support for ANEL PwrCtrl switches."""
from datetime import timedelta
import logging
from anel_pwrctrl import DeviceMaster
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_PORT_RECV = "port_recv"
CONF_PORT_SEND = "port_send"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PORT_RECV): cv.port,
vol.Required(CONF_PORT_SEND): cv.port,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_HOST): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up PwrCtrl devices/switches."""
host = config.get(CONF_HOST)
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
port_recv = config[CONF_PORT_RECV]
port_send = config[CONF_PORT_SEND]
try:
master = DeviceMaster(
username=username,
password=password,
read_port=port_send,
write_port=port_recv,
)
master.query(ip_addr=host)
except OSError as ex:
_LOGGER.error("Unable to discover PwrCtrl device: %s", str(ex))
return False
devices = []
for device in master.devices.values():
parent_device = PwrCtrlDevice(device)
devices.extend(
PwrCtrlSwitch(switch, parent_device) for switch in device.switches.values()
)
add_entities(devices)
class PwrCtrlSwitch(SwitchEntity):
"""Representation of a PwrCtrl switch."""
def __init__(self, port, parent_device):
"""Initialize the PwrCtrl switch."""
self._port = port
self._parent_device = parent_device
@property
def unique_id(self):
"""Return the unique ID of the device."""
return f"{self._port.device.host}-{self._port.get_index()}"
@property
def name(self):
"""Return the name of the device."""
return self._port.label
@property
def is_on(self):
"""Return true if the device is on."""
return self._port.get_state()
def update(self):
"""Trigger update for all switches on the parent device."""
self._parent_device.update()
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._port.on()
def turn_off(self, **kwargs):
"""Turn the switch off."""
self._port.off()
class PwrCtrlDevice:
"""Device representation for per device throttling."""
def __init__(self, device):
"""Initialize the PwrCtrl device."""
self._device = device
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update the device and all its switches."""
self._device.update()
| {
"content_hash": "a020d6178cee7095be6eb956aa482a71",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 87,
"avg_line_length": 28.08411214953271,
"alnum_prop": 0.6369384359400998,
"repo_name": "sdague/home-assistant",
"id": "0669a3bb6c6a4fa79dfa225f47b60cd25a68711c",
"size": "3005",
"binary": false,
"copies": "10",
"ref": "refs/heads/dev",
"path": "homeassistant/components/anel_pwrctrl/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "27869189"
},
{
"name": "Shell",
"bytes": "4528"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organization', '0014_auto_20160619_1837'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='verified',
field=models.BooleanField(default=False, help_text='Verified organizations are visible to all users', verbose_name='Verified'),
),
]
| {
"content_hash": "45b79bc4208488e6c68c809f0b2373bc",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 139,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.6416666666666667,
"repo_name": "sakset/getyourdata",
"id": "159033cdd60bdcd91b41d54b7069963f002ffbb3",
"size": "552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "getyourdata/organization/migrations/0015_auto_20160619_1852.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2791"
},
{
"name": "HTML",
"bytes": "64735"
},
{
"name": "JavaScript",
"bytes": "1519"
},
{
"name": "Python",
"bytes": "218082"
},
{
"name": "Shell",
"bytes": "2722"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import socket
import tinctest
import unittest2 as unittest
from tinctest.lib import local_path
from mpp.gpdb.tests.storage.lib import Database
from mpp.models import MPPTestCase
from tinctest.models.scenario import ScenarioTestCase
from mpp.gpdb.tests.storage.filerep_end_to_end import FilerepTestCase
class FilerepMiscTestCase(ScenarioTestCase, MPPTestCase):
"""
@gucs gp_create_table_random_default_distribution=off
"""
def test_verify_ctlog_xlog_consistency(self):
'''
@description : This test verifies consistency between xlog and changetracking logs.
The consistency can be lost due to non synchronous write patterns of these logs.
This can cause resync phase to fail due to inconsistency seen between actual data and changetracking
logs. Refer to MPP-23631 for more explanation.
The test increases the size of wal buffer to reduce frequency of writes to disk. This helps to push
changetracking log more frequently and thus tries to create inconsistency between them.
If the system works fine, it shouldn't get affected due to this behaviour. That's when the test passes.
@product_version gpdb:(4.3.1.0-4.3], gpdb:(4.2.8.0-4.2]
'''
list_wal_buf = []
list_wal_buf.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.run_gpconfig", ['wal_buffers', '512', '512']))
self.test_case_scenario.append(list_wal_buf, serial=True)
list_gen_load = []
list_gen_load.append("mpp.gpdb.tests.storage.filerep.mpp23631.test_misc.ctlog_xlog_cons_setup")
list_gen_load.append("mpp.gpdb.tests.storage.filerep_end_to_end.runcheckpoint.runCheckPointSQL.runCheckPointTestCase")
self.test_case_scenario.append(list_gen_load, serial=True)
list = []
list.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.method_run_failover",['primary']))
list.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.trigger_transition")
self.test_case_scenario.append(list,serial=True)
list_ct = []
list_ct.append("mpp.gpdb.tests.storage.filerep_end_to_end.runcheckpoint.runCheckPointSQL.runCheckPointTestCase")
list_ct.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.inject_fault", ['checkpoint', 'async', 'skip', 'primary']))
self.test_case_scenario.append(list_ct,serial=True)
list_ct = []
list_ct.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.inject_fault", ['fault_in_background_writer_main', 'async', 'suspend', 'primary']))
self.test_case_scenario.append(list_ct,serial=True)
list_ct = []
list_ct.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.inject_fault", ['appendonly_insert', 'async', 'panic', 'primary', 'all', 'ao']))
list_ct.append("mpp.gpdb.tests.storage.filerep.mpp23631.test_misc.ctlog_xlog_cons_ct")
self.test_case_scenario.append(list_ct,serial=True)
list_ct_post_reset = []
list_ct_post_reset.append("mpp.gpdb.tests.storage.filerep.mpp23631.test_misc.ctlog_xlog_cons_post_reset_ct")
list_ct_post_reset.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.run_gprecoverseg",['incr']))
list_ct_post_reset.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.wait_till_insync_transition")
# list_ct_post_reset.append("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.check_mirror_seg")
list_ct_post_reset.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.do_gpcheckcat",{'outputFile':'test_verify_ctlog_xlog_consistency.out'}))
self.test_case_scenario.append(list_ct_post_reset,serial=True)
list_cleanup = []
list_cleanup.append("mpp.gpdb.tests.storage.filerep.mpp23631.test_misc.ctlog_xlog_cons_cleanup")
list_cleanup.append(("mpp.gpdb.tests.storage.filerep_end_to_end.FilerepTestCase.run_gpconfig", ['wal_buffers', '8', '8']))
self.test_case_scenario.append(list_cleanup, serial=True)
| {
"content_hash": "d2a55d8232d8ce04e6a937c088d1a7d6",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 166,
"avg_line_length": 55.348837209302324,
"alnum_prop": 0.7224789915966386,
"repo_name": "cjcjameson/gpdb",
"id": "a1d328604252cb7388470fba2309dd50955e0f81",
"size": "4760",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "src/test/tinc/tincrepo/mpp/gpdb/tests/storage/filerep/mpp23631/test_mpp23631.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5665"
},
{
"name": "Batchfile",
"bytes": "11492"
},
{
"name": "C",
"bytes": "35862596"
},
{
"name": "C++",
"bytes": "3303631"
},
{
"name": "CMake",
"bytes": "17118"
},
{
"name": "CSS",
"bytes": "7407"
},
{
"name": "Csound Score",
"bytes": "179"
},
{
"name": "DTrace",
"bytes": "1160"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "736617"
},
{
"name": "HTML",
"bytes": "191406"
},
{
"name": "Java",
"bytes": "268244"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "196275"
},
{
"name": "M4",
"bytes": "104559"
},
{
"name": "Makefile",
"bytes": "437242"
},
{
"name": "Objective-C",
"bytes": "41796"
},
{
"name": "PLSQL",
"bytes": "261677"
},
{
"name": "PLpgSQL",
"bytes": "5198576"
},
{
"name": "Perl",
"bytes": "3901323"
},
{
"name": "Perl 6",
"bytes": "8302"
},
{
"name": "Python",
"bytes": "8753134"
},
{
"name": "Roff",
"bytes": "51338"
},
{
"name": "Ruby",
"bytes": "26724"
},
{
"name": "SQLPL",
"bytes": "3895383"
},
{
"name": "Shell",
"bytes": "554130"
},
{
"name": "XS",
"bytes": "8405"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "488779"
}
],
"symlink_target": ""
} |
"""Copyright 2008 Python Software Foundation, Ian Bicking, and Google."""
import cStringIO
import inspect
import mimetools
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
# status codes
# informational
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
# successful
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
IM_USED = 226
# redirection
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
# client error
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
UPGRADE_REQUIRED = 426
# server error
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE = 507
NOT_EXTENDED = 510
# Mapping status codes to official W3C names
responses = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
}
# maximal amount of data to read at one time in _safe_read
MAXAMOUNT = 1048576
# maximal line length when calling readline().
_MAXLINE = 65536
# Can't get this symbol from socket since importing socket causes an import
# cycle though:
# google.net.proto.ProtocolBuffer imports...
# httplib imports ...
# socket imports ...
# remote_socket_service_pb imports ProtocolBuffer
_GLOBAL_DEFAULT_TIMEOUT = object()
_IMPLEMENTATION = 'gae'
class HTTPMessage(mimetools.Message):
# App Engine Note: This class has been copied almost unchanged from
# Python 2.7.2
def addheader(self, key, value):
"""Add header for field key handling repeats."""
prev = self.dict.get(key)
if prev is None:
self.dict[key] = value
else:
combined = ", ".join((prev, value))
self.dict[key] = combined
def addcontinue(self, key, more):
"""Add more field data from a continuation line."""
prev = self.dict[key]
self.dict[key] = prev + "\n " + more
def readheaders(self):
"""Read header lines.
Read header lines up to the entirely blank line that terminates them.
The (normally blank) line that ends the headers is skipped, but not
included in the returned list. If a non-header line ends the headers,
(which is an error), an attempt is made to backspace over it; it is
never included in the returned list.
The variable self.status is set to the empty string if all went well,
otherwise it is an error message. The variable self.headers is a
completely uninterpreted list of lines contained in the header (so
printing them will reproduce the header exactly as it appears in the
file).
If multiple header fields with the same name occur, they are combined
according to the rules in RFC 2616 sec 4.2:
Appending each subsequent field-value to the first, each separated
by a comma. The order in which header fields with the same field-name
are received is significant to the interpretation of the combined
field value.
"""
# XXX The implementation overrides the readheaders() method of
# rfc822.Message. The base class design isn't amenable to
# customized behavior here so the method here is a copy of the
# base class code with a few small changes.
self.dict = {}
self.unixfrom = ''
self.headers = hlist = []
self.status = ''
headerseen = ""
firstline = 1
startofline = unread = tell = None
if hasattr(self.fp, 'unread'):
unread = self.fp.unread
elif self.seekable:
tell = self.fp.tell
while True:
if tell:
try:
startofline = tell()
except IOError:
startofline = tell = None
self.seekable = 0
line = self.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
if not line:
self.status = 'EOF in headers'
break
# Skip unix From name time lines
if firstline and line.startswith('From '):
self.unixfrom = self.unixfrom + line
continue
firstline = 0
if headerseen and line[0] in ' \t':
# XXX Not sure if continuation lines are handled properly
# for http and/or for repeating headers
# It's a continuation line.
hlist.append(line)
self.addcontinue(headerseen, line.strip())
continue
elif self.iscomment(line):
# It's a comment. Ignore it.
continue
elif self.islast(line):
# Note! No pushback here! The delimiter line gets eaten.
break
headerseen = self.isheader(line)
if headerseen:
# It's a legal header line, save it.
hlist.append(line)
self.addheader(headerseen, line[len(headerseen)+1:].strip())
continue
else:
# It's not a header line; throw it back and stop here.
if not self.dict:
self.status = 'No headers'
else:
self.status = 'Non-header line where header expected'
# Try to undo the read.
if unread:
unread(line)
elif tell:
self.fp.seek(startofline)
else:
self.status = self.status + '; bad seek'
break
class HTTPResponse:
# App Engine Note: The public interface is identical to the interface provided
# in Python 2.7 except __init__ takes a
# google.appengine.api.urlfetch.Response instance rather than a socket.
def __init__(self,
fetch_response, # App Engine Note: fetch_response was "sock".
debuglevel=0,
strict=0,
method=None,
buffering=False):
self._fetch_response = fetch_response
self.fp = cStringIO.StringIO(fetch_response.content) # For the HTTP class.
self.debuglevel = debuglevel
self.strict = strict
self._method = method
self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def begin(self):
if self.msg is not None:
# we've already started reading the response
return
self.msg = self._fetch_response.header_msg
self.version = 11 # We can't get the real HTTP version so make one up.
self.status = self._fetch_response.status_code
self.reason = responses.get(self._fetch_response.status_code, 'Unknown')
# The following are implementation details and should not be read by
# clients - but set them to reasonable values just in case.
self.chunked = 0
self.chunk_left = None
self.length = None
self.will_close = 1
def close(self):
if self.fp:
self.fp.close()
self.fp = None
def isclosed(self):
return self.fp is None
def read(self, amt=None):
if self.fp is None:
return ''
if self._method == 'HEAD':
self.close()
return ''
if amt is None:
return self.fp.read()
else:
return self.fp.read(amt)
def fileno(self):
raise NotImplementedError('fileno is not supported')
def getheader(self, name, default=None):
if self.msg is None:
raise ResponseNotReady()
return self.msg.getheader(name, default)
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise ResponseNotReady()
return self.msg.items()
class HTTPConnection:
# App Engine Note: The public interface is identical to the interface provided
# in Python 2.7.2 but the implementation uses
# google.appengine.api.urlfetch. Some methods are no-ops and set_tunnel
# raises NotImplementedError.
_protocol = 'http' # passed to urlfetch.
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
strict = 0
_allow_truncated = True
_follow_redirects = False
def __init__(self, host, port=None, strict=None,
timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None):
# net.proto.ProcotolBuffer relies on httplib so importing urlfetch at the
# module level causes a failure on prod. That means the import needs to be
# lazy.
from google.appengine.api import urlfetch
self._fetch = urlfetch.fetch
self._method_map = {
'GET': urlfetch.GET,
'POST': urlfetch.POST,
'HEAD': urlfetch.HEAD,
'PUT': urlfetch.PUT,
'DELETE': urlfetch.DELETE,
'PATCH': urlfetch.PATCH,
}
self.host = host
self.port = port
# With urllib2 in Python 2.6, an object can be passed here.
# The default is set to socket.GLOBAL_DEFAULT_TIMEOUT which is an object.
# We only accept float, int or long values, otherwise it can be
# silently ignored.
if not isinstance(timeout, (float, int, long)):
timeout = None
self.timeout = timeout
# Both 'strict' and 'source_address' are ignored.
self._method = self._url = None
self._body = ''
self.headers = []
def set_tunnel(self, host, port=None, headers=None):
""" Sets up the host and the port for the HTTP CONNECT Tunnelling.
The headers argument should be a mapping of extra HTTP headers
to send with the CONNECT request.
App Engine Note: This method is not supported.
"""
raise NotImplementedError('HTTP CONNECT Tunnelling is not supported')
def set_debuglevel(self, level):
pass
def connect(self):
"""Connect to the host and port specified in __init__.
App Engine Note: This method is a no-op.
"""
def close(self):
"""Close the connection to the HTTP server.
App Engine Note: This method is a no-op.
"""
def send(self, data):
"""Send `data' to the server."""
self._body += data
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
"""Send a request to the server.
`method' specifies an HTTP request method, e.g. 'GET'.
`url' specifies the object being requested, e.g. '/index.html'.
`skip_host' if True does not add automatically a 'Host:' header
`skip_accept_encoding' if True does not add automatically an
'Accept-Encoding:' header
App Engine Note: `skip_host' and `skip_accept_encoding' are not honored by
the urlfetch service.
"""
self._method = method
self._url = url
def putheader(self, header, *values):
"""Send a request header line to the server.
For example: h.putheader('Accept', 'text/html')
"""
hdr = '\r\n\t'.join([str(v) for v in values])
self.headers.append((header, hdr))
def endheaders(self, message_body=None):
"""Indicate that the last header line has been sent to the server.
This method sends the request to the server. The optional
message_body argument can be used to pass message body
associated with the request. The message body will be sent in
the same packet as the message headers if possible. The
message_body should be a string.
"""
if message_body is not None:
self.send(message_body)
def request(self, method, url, body=None, headers=None):
"""Send a complete request to the server."""
self._method = method
self._url = url
try: # 'body' can be a file.
self._body = body.read()
except AttributeError:
self._body = body
if headers is None:
headers = []
elif hasattr(headers, 'items'):
headers = headers.items()
self.headers = headers
@staticmethod
def _getargspec(callable_object):
assert callable(callable_object)
try:
# Methods and lambdas.
return inspect.getargspec(callable_object)
except TypeError:
# Class instances with __call__.
return inspect.getargspec(callable_object.__call__)
def getresponse(self, buffering=False):
"""Get the response from the server.
App Engine Note: buffering is ignored.
"""
# net.proto.ProcotolBuffer relies on httplib so importing urlfetch at the
# module level causes a failure on prod. That means the import needs to be
# lazy.
from google.appengine.api import urlfetch
import socket # Cannot be done at global scope due to circular import.
if self.port and self.port != self.default_port:
host = '%s:%s' % (self.host, self.port)
else:
host = self.host
if not self._url.startswith(self._protocol):
url = '%s://%s%s' % (self._protocol, host, self._url)
else:
url = self._url
headers = dict(self.headers)
if self.timeout in [_GLOBAL_DEFAULT_TIMEOUT,
socket._GLOBAL_DEFAULT_TIMEOUT]:
deadline = socket.getdefaulttimeout()
else:
deadline = self.timeout
try:
method = self._method_map[self._method.upper()]
except KeyError:
raise ValueError('%r is an unrecognized HTTP method' % self._method)
try:
# The Python Standard Library doesn't validate certificates so don't
# validate them here either. But some libraries (httplib2, possibly
# others) use an alternate technique where the fetch function does not
# have a validate_certificate argument so only provide it when supported.
argspec = self._getargspec(self._fetch)
extra_kwargs = (
{'validate_certificate': False}
if argspec.keywords or 'validate_certificate' in argspec.args
else {})
fetch_response = self._fetch(url,
self._body,
method, headers,
self._allow_truncated,
self._follow_redirects,
deadline,
**extra_kwargs)
except urlfetch.InvalidURLError, e:
raise InvalidURL(str(e))
except (urlfetch.ResponseTooLargeError, urlfetch.DeadlineExceededError), e:
raise HTTPException(str(e))
except urlfetch.SSLCertificateError, e:
# Should be ssl.SSLError but the ssl module isn't available.
# Continue to support this exception for versions of _fetch that do not
# support validate_certificates. Also, in production App Engine defers
# specific semantics so leaving this in just in case.
raise HTTPException(str(e))
except urlfetch.DownloadError, e:
# One of the following occured: UNSPECIFIED_ERROR, FETCH_ERROR
raise socket.error(
'An error occured while connecting to the server: %s' % e)
response = self.response_class(fetch_response, method=method)
response.begin()
self.close()
return response
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
# App Engine Note: The public interface is identical to the interface provided
# in Python 2.7.2 but the implementation does not support key and
# certificate files.
_protocol = 'https' # passed to urlfetch.
default_port = HTTPS_PORT
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=False, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
if key_file is not None or cert_file is not None:
raise NotImplementedError(
'key_file and cert_file arguments are not implemented')
HTTPConnection.__init__(self, host, port, strict, timeout, source_address)
class HTTP:
"Compatibility class with httplib.py from 1.5."
# App Engine Note: The public interface is identical to the interface provided
# in Python 2.7.
_http_vsn = 10
_http_vsn_str = 'HTTP/1.0'
debuglevel = 0
_connection_class = HTTPConnection
def __init__(self, host='', port=None, strict=None):
"Provide a default host, since the superclass requires one."
# some joker passed 0 explicitly, meaning default port
if port == 0:
port = None
# Note that we may pass an empty string as the host; this will throw
# an error when we attempt to connect. Presumably, the client code
# will call connect before then, with a proper host.
self._setup(self._connection_class(host, port, strict))
def _setup(self, conn):
self._conn = conn
# set up delegation to flesh out interface
self.send = conn.send
self.putrequest = conn.putrequest
self.endheaders = conn.endheaders
self.set_debuglevel = conn.set_debuglevel
conn._http_vsn = self._http_vsn
conn._http_vsn_str = self._http_vsn_str
self.file = None
def connect(self, host=None, port=None):
"Accept arguments to set the host/port, since the superclass doesn't."
self.__init__(host, port)
def getfile(self):
"Provide a getfile, since the superclass' does not use this concept."
return self.file
def putheader(self, header, *values):
"The superclass allows only one value argument."
self._conn.putheader(header, '\r\n\t'.join([str(v) for v in values]))
def getreply(self, buffering=False):
"""Compat definition since superclass does not define it.
Returns a tuple consisting of:
- server status code (e.g. '200' if all goes well)
- server "reason" corresponding to status code
- any RFC822 headers in the response from the server
"""
response = self._conn.getresponse()
self.headers = response.msg
self.file = response.fp
return response.status, response.reason, response.msg
def close(self):
self._conn.close()
# note that self.file == response.fp, which gets closed by the
# superclass. just clear the object ref here.
### hmm. messy. if status==-1, then self.file is owned by us.
### well... we aren't explicitly closing, but losing this ref will
### do it
self.file = None
# Copy from Python's httplib implementation.
class HTTPS(HTTP):
"""Compatibility with 1.5 httplib interface
Python 1.5.2 did not have an HTTPS class, but it defined an
interface for sending http requests that is also useful for
https.
"""
# App Engine Note: The public interface is identical to the interface provided
# in Python 2.7 except that key and certificate files are not supported.
_connection_class = HTTPSConnection
def __init__(self, host='', port=None, key_file=None, cert_file=None,
strict=None):
if key_file is not None or cert_file is not None:
raise NotImplementedError(
'key_file and cert_file arguments are not implemented')
# provide a default host, pass the X509 cert info
# urf. compensate for bad input.
if port == 0:
port = None
self._setup(self._connection_class(host, port, key_file,
cert_file, strict))
# we never actually use these for anything, but we keep them
# here for compatibility with post-1.5.2 CVS.
self.key_file = key_file
self.cert_file = cert_file
class HTTPException(Exception):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
class NotConnected(HTTPException):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
pass
class InvalidURL(HTTPException):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
pass
class UnknownProtocol(HTTPException):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
pass
class UnimplementedFileMode(HTTPException):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
pass
class IncompleteRead(HTTPException):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
def __init__(self, partial, expected=None):
self.args = partial,
self.partial = partial
self.expected = expected
def __repr__(self):
if self.expected is not None:
e = ', %i more expected' % self.expected
else:
e = ''
return 'IncompleteRead(%i bytes read%s)' % (len(self.partial), e)
def __str__(self):
return repr(self)
class ImproperConnectionState(HTTPException):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
pass
class CannotSendRequest(ImproperConnectionState):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
pass
class CannotSendHeader(ImproperConnectionState):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
pass
class ResponseNotReady(ImproperConnectionState):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
pass
class BadStatusLine(HTTPException):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
def __init__(self, line):
if not line:
line = repr(line)
self.args = line,
self.line = line
class LineTooLong(HTTPException):
# App Engine Note: This class has been copied unchanged from Python 2.7.2
def __init__(self, line_type):
HTTPException.__init__(self, "got more than %d bytes when reading %s"
% (_MAXLINE, line_type))
# for backwards compatibility
error = HTTPException
class LineAndFileWrapper:
"""A limited file-like object for HTTP/0.9 responses."""
# App Engine Note: This class has been copied unchanged from Python 2.7.2
# The status-line parsing code calls readline(), which normally
# get the HTTP status line. For a 0.9 response, however, this is
# actually the first line of the body! Clients need to get a
# readable file object that contains that line.
def __init__(self, line, file):
self._line = line
self._file = file
self._line_consumed = 0
self._line_offset = 0
self._line_left = len(line)
def __getattr__(self, attr):
return getattr(self._file, attr)
def _done(self):
# called when the last byte is read from the line. After the
# call, all read methods are delegated to the underlying file
# object.
self._line_consumed = 1
self.read = self._file.read
self.readline = self._file.readline
self.readlines = self._file.readlines
def read(self, amt=None):
if self._line_consumed:
return self._file.read(amt)
assert self._line_left
if amt is None or amt > self._line_left:
s = self._line[self._line_offset:]
self._done()
if amt is None:
return s + self._file.read()
else:
return s + self._file.read(amt - len(s))
else:
assert amt <= self._line_left
i = self._line_offset
j = i + amt
s = self._line[i:j]
self._line_offset = j
self._line_left -= amt
if self._line_left == 0:
self._done()
return s
def readline(self):
if self._line_consumed:
return self._file.readline()
assert self._line_left
s = self._line[self._line_offset:]
self._done()
return s
def readlines(self, size=None):
if self._line_consumed:
return self._file.readlines(size)
assert self._line_left
L = [self._line[self._line_offset:]]
self._done()
if size is None:
return L + self._file.readlines()
else:
return L + self._file.readlines(size)
| {
"content_hash": "ccc7299329fce4cfb211f306df0b0ddc",
"timestamp": "",
"source": "github",
"line_count": 815,
"max_line_length": 80,
"avg_line_length": 30.80490797546012,
"alnum_prop": 0.6577710507448419,
"repo_name": "Kazade/NeHe-Website",
"id": "1edee85707013d9b822f42e1e575875869aa8b57",
"size": "25302",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "google_appengine/google/appengine/dist27/gae_override/httplib.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "407860"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "CSS",
"bytes": "504898"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "JavaScript",
"bytes": "1013425"
},
{
"name": "PHP",
"bytes": "2269231"
},
{
"name": "Python",
"bytes": "62625909"
},
{
"name": "Shell",
"bytes": "40752"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
} |
import unittest
from atbash_cipher import (
decode,
encode,
)
# Tests adapted from `problem-specifications//canonical-data.json`
class AtbashCipherTest(unittest.TestCase):
def test_encode_yes(self):
self.assertEqual(encode("yes"), "bvh")
def test_encode_no(self):
self.assertEqual(encode("no"), "ml")
def test_encode_omg(self):
self.assertEqual(encode("OMG"), "lnt")
def test_encode_spaces(self):
self.assertEqual(encode("O M G"), "lnt")
def test_encode_mindblowingly(self):
self.assertEqual(encode("mindblowingly"), "nrmwy oldrm tob")
def test_encode_numbers(self):
self.assertEqual(encode("Testing,1 2 3, testing."), "gvhgr mt123 gvhgr mt")
def test_encode_deep_thought(self):
self.assertEqual(encode("Truth is fiction."), "gifgs rhurx grlm")
def test_encode_all_the_letters(self):
self.assertEqual(
encode("The quick brown fox jumps over the lazy dog."),
"gsvjf rxpyi ldmul cqfnk hlevi gsvoz abwlt",
)
def test_decode_exercism(self):
self.assertEqual(decode("vcvix rhn"), "exercism")
def test_decode_a_sentence(self):
self.assertEqual(
decode("zmlyh gzxov rhlug vmzhg vkkrm thglm v"),
"anobstacleisoftenasteppingstone",
)
def test_decode_numbers(self):
self.assertEqual(decode("gvhgr mt123 gvhgr mt"), "testing123testing")
def test_decode_all_the_letters(self):
self.assertEqual(
decode("gsvjf rxpyi ldmul cqfnk hlevi gsvoz abwlt"),
"thequickbrownfoxjumpsoverthelazydog",
)
def test_decode_with_too_many_spaces(self):
self.assertEqual(decode("vc vix r hn"), "exercism")
def test_decode_with_no_spaces(self):
self.assertEqual(
decode("zmlyhgzxovrhlugvmzhgvkkrmthglmv"), "anobstacleisoftenasteppingstone"
)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "2505ca983cf53553c5c732bc6ce8a96d",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 88,
"avg_line_length": 29.388059701492537,
"alnum_prop": 0.638395124428644,
"repo_name": "exercism/python",
"id": "98c1072afc7b41930252b9043726d94000cf4db6",
"size": "1969",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "exercises/practice/atbash-cipher/atbash_cipher_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "103144"
},
{
"name": "Python",
"bytes": "934764"
},
{
"name": "Shell",
"bytes": "2960"
}
],
"symlink_target": ""
} |
from unittest import main
from numpy.testing import assert_array_equal
import numpy as np
from calour._testing import Tests
from calour.filtering import _balanced_subsample
import calour as ca
class FTests(Tests):
def setUp(self):
super().setUp()
self.test2 = ca.read(self.test2_biom, self.test2_samp, self.test2_feat, normalize=None)
self.test1 = ca.read(self.test1_biom, self.test1_samp, self.test1_feat, normalize=None)
def test_balanced_subsample(self):
rand = np.random.RandomState(None)
d = rand.choice([0, 1, 2], 9)
for n in (1, 3, 6, 9, 10):
keep = _balanced_subsample(d, n, None)
d2 = d[keep]
uniq, counts = np.unique(d2, return_counts=True)
self.assertTrue(np.all(counts == n))
def test_downsample_unique(self):
# test on features, random method, not inplace
# since each taxonomy is unique, should have the same as original
newexp = self.test1.downsample('taxonomy', axis=1)
self.assertEqual(newexp.shape, self.test1.shape)
self.assertIsNot(newexp, self.test1)
def test_downsample_keep_1(self):
# test on samples, random method, not inplace
newexp = self.test1.downsample('group', keep=1, random_seed=2017)
self.assertEqual(newexp.shape[0], 3)
self.assertEqual(list(newexp.data[:, 7].todense().A1), [845, 859, 9])
self.assertEqual(newexp.shape[1], self.test1.shape[1])
self.assertIsNot(newexp, self.test1)
newexp = self.test1.downsample('group', keep=1, random_seed=2018)
self.assertNotEqual(list(newexp.data[:, 7].todense().A1), [845, 859, 9])
def test_downsample_sample(self):
obs = self.test2.downsample('group')
# should be down to 4 samples; feature number is the same
self.assertEqual(obs.shape, (4, 8))
sid = obs.sample_metadata.index.tolist()
all_sid = self.test2.sample_metadata.index.tolist()
exp = self.test2.reorder([all_sid.index(i) for i in sid])
self.assert_experiment_equal(obs, exp)
def test_downsample_feature(self):
obs = self.test2.downsample('oxygen', axis=1)
sid = obs.feature_metadata.index.tolist()
self.assertEqual(obs.shape, (9, 4))
all_sid = self.test2.feature_metadata.index.tolist()
exp = self.test2.reorder([all_sid.index(i) for i in sid], axis=1)
self.assertEqual(obs, exp)
def test_downsample_keep(self):
# test keeping num_keep samples, and inplace
obs = self.test1.downsample('group', keep=9, inplace=True)
# should be down to 2 groups (18 samples); feature number is the same
self.assertEqual(obs.shape, (18, 12))
self.assertEqual(set(obs.sample_metadata['group']), set(['1', '2']))
self.assertIs(obs, self.test1)
def test_filter_by_metadata_sample_edge_cases(self):
# no group 3 - none filtered
obs = self.test2.filter_by_metadata('group', [3])
self.assertEqual(obs.shape, (0, 8))
obs = self.test2.filter_by_metadata('group', [3], negate=True)
self.assert_experiment_equal(obs, self.test2)
# all samples are filtered
obs = self.test2.filter_by_metadata('group', [1, 2])
self.assert_experiment_equal(obs, self.test2)
obs = self.test2.filter_by_metadata('group', [1, 2], negate=True)
self.assertEqual(obs.shape, (0, 8))
def test_filter_by_metadata_sample(self):
for sparse, inplace in [(True, False), (True, True), (False, False), (False, True)]:
test2 = ca.read(self.test2_biom, self.test2_samp, self.test2_feat,
sparse=sparse, normalize=None)
# only filter samples bewtween 3 and 7.
obs = test2.filter_by_metadata(
'ori.order', lambda l: [7 > i > 3 for i in l], inplace=inplace)
self.assertEqual(obs.shape, (3, 8))
self.assertEqual(obs.sample_metadata.index.tolist(), ['S5', 'S6', 'S7'])
if inplace:
self.assertIs(obs, test2)
else:
self.assertIsNot(obs, test2)
def test_filter_by_metadata_feature_edge_cases(self):
# none filtered
obs = self.test2.filter_by_metadata('oxygen', ['facultative'], axis=1)
self.assertEqual(obs.shape, (9, 0))
obs = self.test2.filter_by_metadata('oxygen', ['facultative'], axis=1, negate=True)
self.assert_experiment_equal(obs, self.test2)
def test_filter_by_metadata_feature(self):
for sparse, inplace in [(True, False), (True, True), (False, False), (False, True)]:
test2 = ca.read(self.test2_biom, self.test2_samp, self.test2_feat, sparse=sparse, normalize=None)
# only filter samples with id bewtween 3 and 7.
obs = test2.filter_by_metadata('oxygen', ['anaerobic'], axis=1, inplace=inplace)
self.assertEqual(obs.shape, (9, 2))
self.assertListEqual(obs.feature_metadata.index.tolist(), ['TG', 'TC'])
if inplace:
self.assertIs(obs, test2)
else:
self.assertIsNot(obs, test2)
def test_filter_by_metadata_na(self):
test = self.test2 = ca.read(self.test2_biom, self.test2_samp, self.test2_feat,
normalize=None, feature_metadata_kwargs={'na_values': 'B'})
test_drop = test.filter_by_metadata('level1', select=None, axis='f')
self.assertEqual(self.test2.sample_metadata.index.tolist(),
test_drop.sample_metadata.index.tolist())
self.assertEqual(['AT', 'AG', 'AC', 'TA', 'TT', 'TC'],
test_drop.feature_metadata.index.tolist())
def test_filter_by_data_sample_edge_cases(self):
# all samples are filtered out
obs = self.test2.filter_by_data('abundance', axis=0, cutoff=100000, mean_or_sum='sum')
self.assertEqual(obs.shape, (0, 8))
# none is filtered out
obs = self.test2.filter_by_data('abundance', axis=0, cutoff=1, mean_or_sum='sum')
self.assert_experiment_equal(obs, self.test2)
self.assertIsNot(obs, self.test2)
def test_filter_by_data_sample(self):
for sparse, inplace in [(True, False), (True, True), (False, False), (False, True)]:
test2 = ca.read(self.test2_biom, self.test2_samp, self.test2_feat, sparse=sparse, normalize=None)
# filter out samples with abundance < 1200. only the last sample is filtered out.
obs = test2.filter_by_data('abundance', axis=0, inplace=inplace, cutoff=1200, mean_or_sum='sum')
self.assertEqual(obs.shape, (8, 8))
self.assertNotIn('S9', obs.sample_metadata)
for sid in obs.sample_metadata.index:
assert_array_equal(obs[sid, :], self.test2[sid, :])
if inplace:
self.assertIs(obs, test2)
else:
self.assertIsNot(obs, test2)
def test_filter_by_data_feature_edge_cases(self):
# all features are filtered out
obs = self.test2.filter_by_data('abundance', axis=1, cutoff=10000, mean_or_sum='sum')
self.assertEqual(obs.shape, (9, 0))
# none is filtered out
obs = self.test2.filter_by_data('abundance', axis=1, cutoff=1, mean_or_sum='sum')
self.assert_experiment_equal(obs, self.test2)
self.assertIsNot(obs, self.test2)
def test_filter_by_data_feature(self):
# one feature is filtered out when cutoff is set to 25
for inplace in [True, False]:
obs = self.test2.filter_by_data('abundance', axis=1, inplace=inplace, cutoff=25, mean_or_sum='sum')
self.assertEqual(obs.shape, (9, 7))
self.assertNotIn('TA', obs.feature_metadata)
for fid in obs.feature_metadata.index:
assert_array_equal(obs[:, fid], self.test2[:, fid])
if inplace:
self.assertIs(obs, self.test2)
else:
self.assertIsNot(obs, self.test2)
def test_filter_prevalence(self):
# this should filter all features because the upper limit is 100%
exp = self.test1.filter_prevalence(fraction=0.5)
fids = ['AA', 'AT', 'AG', 'TA', 'TT', 'TG', 'TC', 'GG']
self.assertListEqual(exp.feature_metadata.index.tolist(), fids)
self.assertEqual(exp.shape[0], self.test1.shape[0])
def test_filter_prevalence_zero(self):
# keep only features present at least in 0.5 the samples
exp = self.test1.filter_prevalence(fraction=1.01)
self.assertListEqual(exp.feature_metadata.index.tolist(), [])
self.assertEqual(exp.shape[0], self.test1.shape[0])
def test_filter_prevalence_check(self):
# filter over all samples always filter more or euqal features than
# filter over sample groups
frac = 0.001
exp = self.test1.filter_prevalence(fraction=frac)
n = exp.shape[1]
for i in self.test1.sample_metadata.columns:
x = self.test1.filter_prevalence(fraction=frac, field=i)
self.assertLessEqual(x.shape[1], n)
def test_filter_sum_abundance(self):
exp = self.test1.filter_sum_abundance(17008)
self.assertEqual(exp.shape[1], 2)
fids = ['TC', 'GG']
self.assertListEqual(exp.feature_metadata.index.tolist(), fids)
def test_filter_mean_abundance(self):
# default is 0.01 - keep features with mean abundance >= 1%
test1 = self.test1.normalize()
exp = test1.filter_mean_abundance()
fids = ['AT', 'TG', 'TC', 'GG']
self.assertListEqual(exp.feature_metadata.index.tolist(), fids)
self.assertEqual(exp.shape[0], self.test1.shape[0])
exp = test1.filter_mean_abundance(0.4, field=None)
fids = ['TC', 'GG']
self.assertListEqual(exp.feature_metadata.index.tolist(), fids)
exp = test1.filter_mean_abundance(0.6, field=None)
self.assertListEqual(exp.feature_metadata.index.tolist(), [])
exp = test1.filter_mean_abundance(0.6, field='group')
fids = ['GG']
self.assertListEqual(exp.feature_metadata.index.tolist(), fids)
def test_filter_mean_abundance_check(self):
# filter over all samples always filter more or euqal features than
# filter over sample groups
abund = 0.001
exp = self.test1.filter_mean_abundance(abund)
n = exp.shape[1]
for i in self.test1.sample_metadata.columns:
x = self.test1.filter_mean_abundance(abund, field=i)
self.assertLessEqual(x.shape[1], n)
def test_filter_ids_not_in_list(self):
fids = ['GG', 'pita']
exp = self.test1.filter_ids(fids)
self.assertListEqual(exp.feature_metadata.index.tolist(), ['GG'])
def test_filter_ids_default(self):
fids = ['GG', 'AA', 'TT']
exp = self.test1.filter_ids(fids)
self.assertListEqual(exp.feature_metadata.index.tolist(), fids)
self.assertIsNot(exp, self.test1)
def test_filter_ids_samples_inplace_negate(self):
badsamples = ['S1', 'S3', 'S5', 'S7', 'S9', 'S11', 'S13', 'S15', 'S17', 'S19']
oksamples = list(set(self.test1.sample_metadata.index.values).difference(set(badsamples)))
exp = self.test1.filter_ids(badsamples, axis=0, negate=True, inplace=True)
self.assertCountEqual(list(exp.sample_metadata.index.values), oksamples)
self.assertIs(exp, self.test1)
def test_filter_sample_group(self):
test = self.test1.filter_ids(['badsample'], axis=0, negate=True)
# does not filter anything
self.assert_experiment_equal(test.filter_sample_group('group', 9), test)
# filter group of 2
self.assert_experiment_equal(test.filter_sample_group('group', 10),
test.filter_samples('group', '1'))
def test_filter_samples_edge_cases(self):
# no group 3 - none filtered
test1 = ca.read(self.test1_biom, self.test1_samp, self.test1_feat, normalize=None)
# group dtype is O
obs = test1.filter_samples('group', ['3'])
self.assertEqual(obs.shape, (0, 12))
obs = test1.filter_samples('group', ['3'], negate=True)
self.assert_experiment_equal(obs, test1)
def test_filter_samples_na(self):
test1 = ca.read(self.test1_biom, self.test1_samp, self.test1_feat, normalize=None)
# filter na value in group column
obs = test1.filter_samples('group', None)
self.assertEqual(obs.shape, (20, 12))
self.assertEqual(test1.sample_metadata.dropna(axis=0).index.tolist(),
obs.sample_metadata.index.tolist())
def test_filter_samples(self):
for inplace in [True, False]:
test1 = ca.read(self.test1_biom, self.test1_samp, self.test1_feat, normalize=None)
# only filter samples from 11 to 14.
obs = test1.filter_samples('id', list(range(11, 15)), inplace=inplace)
self.assertEqual(obs.shape, (4, 12))
self.assertEqual(obs.sample_metadata.index.tolist(), ['S11', 'S12', 'S13', 'S14'])
if inplace:
self.assertIs(obs, test1)
else:
self.assertIsNot(obs, test1)
def test_filter_features_edge_cases(self):
# none filtered
obs = self.test2.filter_features('oxygen', ['facultative'])
self.assertEqual(obs.shape, (9, 0))
obs = self.test2.filter_features('oxygen', ['facultative'], negate=True)
self.assert_experiment_equal(obs, self.test2)
def test_filter_features(self):
for inplace in [True, False]:
test2 = ca.read(self.test2_biom, self.test2_samp, self.test2_feat, normalize=None)
obs = test2.filter_features('oxygen', ['anaerobic'], inplace=inplace)
self.assertEqual(obs.shape, (9, 2))
self.assertListEqual(obs.feature_metadata.index.tolist(), ['TG', 'TC'])
if inplace:
self.assertIs(obs, test2)
else:
self.assertIsNot(obs, test2)
if __name__ == '__main__':
main()
| {
"content_hash": "36dd78539930b9cd5ffc26c653e59a21",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 111,
"avg_line_length": 46.31699346405229,
"alnum_prop": 0.6118676356452409,
"repo_name": "RNAer/Calour",
"id": "2ab91da2a6fccd2c6d544f8c3bdd4516b9d49125",
"size": "14524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calour/tests/test_filtering.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Gherkin",
"bytes": "5338"
},
{
"name": "Jupyter Notebook",
"bytes": "270154"
},
{
"name": "Makefile",
"bytes": "927"
},
{
"name": "Python",
"bytes": "247846"
}
],
"symlink_target": ""
} |
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
def GetSheetSetViews(set):
if hasattr(set, 'Views'):
return [x.ToDSType(True) for x in set.Views]
else: return []
viewsheetsets = UnwrapElement(IN[0])
if isinstance(IN[0], list): OUT = [GetSheetSetViews(x) for x in viewsheetsets]
else: OUT = GetSheetSetViews(viewsheetsets) | {
"content_hash": "758a8cd289e73f62f8c2fc3655d815a3",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 78,
"avg_line_length": 25.529411764705884,
"alnum_prop": 0.7603686635944701,
"repo_name": "CAAD-RWTH/ClockworkForDynamo",
"id": "725ee11f7447ae3b71b4e5ee5af9e9c86b09a6f4",
"size": "434",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nodes/2.x/python/ViewSheetSet.Views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "316146"
}
],
"symlink_target": ""
} |
import sys
import argparse
import os
import urllib
import requests
from daypicts import get_picture_url, get_picture_urls
from daypicts import validate_date, gen_dates, picture_type
from daypicts import NoPictureForDate
from daypicts import REMOTE_PICT_BASE_URL, PICT_EXCEPTIONS
FIXTURE_DOC_DIR = 'fixture/docroot/'
FIXTURE_TEMPLATE_POTD_DIR = FIXTURE_DOC_DIR + 'Template-POTD/'
def parse_args(argv):
parser = argparse.ArgumentParser(description=main.__doc__)
date_help = 'YYYY-MM-DD or YYYY-MM or YYYY: year, month and day'
parser.add_argument('date', help=date_help)
parser.add_argument('-u', '--url_only', action='store_true',
help='get picture URLS only')
args = parser.parse_args(argv)
try:
iso_parts = validate_date(args.date)
except ValueError as exc:
print('error:', exc.args[0])
parser.print_usage()
sys.exit(2)
dates = list(gen_dates(iso_parts))
if len(dates) == 1:
print('-> Date: ', dates[0])
else:
fmt = '-> {} days: {}...{}'
print(fmt.format(len(dates), dates[0], dates[-1]))
return dates, args
def save_picture_urls(dates, save_path):
for date in dates:
try:
url = get_picture_url(date)
except NoPictureForDate as exc:
snippet = repr(exc)
else:
snippet = url.replace('http://', 'src="//') + '"'
print(date, end=' ')
print(snippet)
with open(os.path.join(save_path, date), 'w') as fp:
fp.write(snippet)
def save_pictures(dates, save_path, verbose=False):
urls_ok = []
for date, url in get_picture_urls(dates, verbose):
response = requests.get(url)
file_path = os.path.join(save_path,
url.replace(REMOTE_PICT_BASE_URL, ''))
file_path = urllib.parse.unquote(file_path)
octets = response.content
# http://en.wikipedia.org/wiki/Template:POTD/2013-06-15
if date not in PICT_EXCEPTIONS:
assert picture_type(octets) is not None, url
try:
os.makedirs(os.path.dirname(file_path))
except FileExistsError:
pass
with open(file_path, 'wb') as fp:
fp.write(octets)
print(file_path)
return urls_ok
def main(argv):
"""Build test fixture from Wikipedia "POTD" data"""
try:
os.makedirs(FIXTURE_TEMPLATE_POTD_DIR)
except FileExistsError:
pass
dates, args = parse_args(argv)
if args.url_only:
save_picture_urls(dates, FIXTURE_TEMPLATE_POTD_DIR)
else:
save_pictures(dates, FIXTURE_DOC_DIR)
if __name__ == '__main__':
main(sys.argv[1:])
| {
"content_hash": "4af1144db9c51fd8562693d0f4956e3a",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 71,
"avg_line_length": 27.917525773195877,
"alnum_prop": 0.5997045790251108,
"repo_name": "oxfordyang2016/learnfluentpython",
"id": "dece76b34bbdff2027f1f9c8104cda66fa6ed1a4",
"size": "2708",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "attic/concurrency/wikipedia/build_fixture.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5651"
},
{
"name": "Java",
"bytes": "3443"
},
{
"name": "JavaScript",
"bytes": "323"
},
{
"name": "Python",
"bytes": "553429"
},
{
"name": "Shell",
"bytes": "946"
}
],
"symlink_target": ""
} |
from numpy import loadtxt, append, array, arange;
from predictors import NaiveBayes;
from pickle import load, dump;
from sys import argv;
import os, urllib, json, time;
os_brack = '/'; #directory separator for os engine is running on
def loadData(datSRC, path, delim, typ):
return loadtxt(path + os_brack + datSRC, delimiter=delim, dtype = typ);
def saveData(data, name, svType,dst = '.'):
f = open( dst + os_brack + name , svType);
f.write(data);
f.close();
def googleSearch(search):
'google search api'
query = "http://ajax.googleapis.com/ajax/services/search/web?v=1.0&q=%s";#search api? free to use
results = urllib.urlopen( query % (search) );
json_res = json.loads( results.read() );
return int(json_res['responseData']['cursor']['estimatedResultCount']); #returns number of estimated search results
def loadClassifier(objFile = 'classifier.pickle', path = '.'):
'loads an already trained classifier. If no classifier is passed as \
parameter then it uses the default path and name'
if os.path.isfile( (path + os_brack + objFile) ):
return load( open(path + os_brack + objFile) );
else:
#print path + os_brack + objFile;
print '\n[!NO CLASSIFIFER IS SAVED YET!]\n'
return None;
def makeClassifier(data):
'trains a classifier with a given training data'
nv = NaiveBayes();
nv.summarizeByClass(data); #train classififer
f = open( 'classifier.pickle', 'wb');
dump(nv, f); #save trained classififer as python pickle
f.close();
return nv; #return trained classififer
def discretizeFreq(frequency, cats = [1250, 4500, 8000, 16000, 35000]):
'categorizes result hits from a google saerch query \
if no categories are passed, it uses the default defined'
#print cats;
for i in range( len(cats) ):
if frequency < cats[i]:
return i+1;
return len(cats)+1;
def discretizeTarget(data, threshold):
rows, cols = data.shape;
for i in range(rows):
if (data[i][-1] >= threshold): data[i][-1] = 1;
else: data[i][-1] = 0;
return data;
def discretizeLang(lang, languages):
index = 1;
for l in languages:
if l == lang:
return index;
index += 1;
return None;
def testClassifier(examples, trnprt, tstprt, size):
trnset = examples[:trnprt];
tstset = examples[tstprt:];
classifier = makeClassifier(trnset);
falses = 0.0;
avgLvl = 0.0;
for e in tstset:
label, prob = classifier.predict( e[:-1] );
avgLvl += prob * 100;
#print 'expected output: %d\t|predicted output: %d\t|confidence lvl: %f' % (label, e[-1], prob);
if (label != e[-1]):
falses += 1;
#print '\n>> Training data dimensions: %d' % ( len(examples[0][:-1]) )
#print '>> Prediction accuracy is: %f' % (1 - falses/(size-trnprt))
#print '>> For %d training examples and %d testing examples' % (len(trnset), len(tstset))
#print '>> Overall data size is %d\n\n' % size;
return (1 - falses/(size-trnprt)), (avgLvl/len(tstset));
def getWordCountDif(txt1, txt2, delim =' '):
return abs( len(txt1.split(delim)) - len(txt2.split(delim)) );
def main():
#setup classifier
thresh = 0.65;
classifier = loadClassifier();
if classifier is None:
path = '..' + os_brack + 'training-data' + os_brack + '3-dimensional';
examples = loadData('data-random.csv' , path, ',', float);
examples = discretizeTarget(examples, thresh);
trnprt = len(examples)/3;
trnset = examples[:trnprt];
classifier = makeClassifier(trnset);
#junk -> name of this python file, not needed but automatically passed by interpreter
#srcTxt -> source text user translated
#dstTxt -> translated text
#srcLng -> language of source txt
#dstTtxt -> language source text was translated to
#srcTxt, dstTxt, srcLng, dstLng = loadData(argv[1], '../input', '\n', str); #use this interface for basic testing
junk, srcTxt, dstTxt, srcLng, dstLng = argv; #use this interface for production
#setup input data
frequency = discretizeFreq( googleSearch(dstTxt) );
wordDif = getWordCountDif(srcTxt, dstTxt);
txtlen = len(srcTxt);
#make prediction
label, prob = classifier.predict( [txtlen, frequency, wordDif] );
prediction = ''; prob *= 100; #convert prob to a percentage
if label == 1: prediction = 'good';
else: prediction = 'bad';
#display prediction
print '\nPredicted translation type: %s' % prediction;
print 'Prediction confidence percentage: %f' % prob;
print 'Classifier\'s word-to-word equivalence threshold percentage %f\n' % (thresh * 100);
if __name__ == '__main__':
main();
#################Code reserved for classifier intensive testing##################
# datOrder = 'random'
# accurDat = '';
# confiDat = '';
# for j in arange(0.5, 1, 0.1): #threashold increases
# for k in arange(2.0,6): #training data decreases
# accurDat += '%f %f ' % ((1/k), j);
# confiDat += '%f %f ' % ((1/k), j);
# for i in range(1,4): #dimensions increase
# path = '..' + os_brack +'training-data' + os_brack + '%d-dimensional' % (i+1);
# examples = loadData('data-%s.csv' % datOrder, path, ',', float);
# examples = discretizeTarget(examples, j);
# size = len(examples);
# trnprt = size/k;
# tstprt = trnprt;
# accuracy, confidence = testClassifier(examples, trnprt, tstprt, size);
# accurDat += '%f ' % (accuracy);
# confiDat += '%f ' % (confidence);
# accurDat += '\n';
# confiDat += '\n';
# saveData(accurDat, ('acur-%s.dat' % datOrder), 'w');
# saveData(confiDat, ('conf-%s.dat' % datOrder), 'w');
# print 'data organization is %s\n' % datOrder; | {
"content_hash": "1dd4a1fa239bb84a1b96c09bcaf68eaf",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 116,
"avg_line_length": 34.81290322580645,
"alnum_prop": 0.6643810229799851,
"repo_name": "parejadan/accurp-engine",
"id": "88e651a0d9357652ab9faa41968432dbc4cebcc8",
"size": "5414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/engine.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11303"
}
],
"symlink_target": ""
} |
from .handler import Handler
from .device_hive import DeviceHive
from .device_hive_api import DeviceHiveApi
from .transports.transport import TransportError
from .api_request import ApiRequestError
from .api_response import ApiResponseError
from .device import DeviceError
from .network import NetworkError
from .device_type import DeviceTypeError
from .subscription import SubscriptionError
from .user import UserError
| {
"content_hash": "81e625473ddb6c3d41cd3e90f69455a0",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 48,
"avg_line_length": 38.18181818181818,
"alnum_prop": 0.8523809523809524,
"repo_name": "devicehive/devicehive-python",
"id": "339e154436d1578918285cda1a5c4c8ed144665c",
"size": "1077",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "devicehive/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "231773"
}
],
"symlink_target": ""
} |
class EventBase(object):
"""
The base of all event classes.
A Ryu application can define its own event type by creating a subclass.
"""
def __init__(self):
super(EventBase, self).__init__()
class EventRequestBase(EventBase):
"""
The base class for synchronous request for RyuApp.send_request.
"""
def __init__(self):
super(EventRequestBase, self).__init__()
self.dst = None # app.name of provide the event.
self.src = None
self.sync = False
self.reply_q = None
class EventReplyBase(EventBase):
"""
The base class for synchronous request reply for RyuApp.send_reply.
"""
def __init__(self, dst):
super(EventReplyBase, self).__init__()
self.dst = dst
| {
"content_hash": "799fe28d151d8bd93e98e1104200b6fb",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 75,
"avg_line_length": 24.1875,
"alnum_prop": 0.6020671834625323,
"repo_name": "fujita/ryu",
"id": "3f5c3dbf85f0fcbeebde13d7e944356267771d78",
"size": "1452",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ryu/controller/event.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28540"
},
{
"name": "CSS",
"bytes": "306"
},
{
"name": "Erlang",
"bytes": "874721"
},
{
"name": "Gnuplot",
"bytes": "1094"
},
{
"name": "HTML",
"bytes": "306"
},
{
"name": "JavaScript",
"bytes": "8436"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "6135247"
},
{
"name": "Shell",
"bytes": "17573"
}
],
"symlink_target": ""
} |
import collections
from supriya.ugens.PV_MagSquared import PV_MagSquared
class PV_MagNoise(PV_MagSquared):
"""
Multiplies magnitudes by noise.
::
>>> pv_chain = supriya.ugens.FFT(
... source=supriya.ugens.WhiteNoise.ar(),
... )
>>> pv_mag_noise = supriya.ugens.PV_MagNoise.new(
... pv_chain=pv_chain,
... )
>>> pv_mag_noise
PV_MagNoise.kr()
"""
### CLASS VARIABLES ###
_ordered_input_names = collections.OrderedDict([("pv_chain", None)])
| {
"content_hash": "315b9f1b59e3af61cf70e175c42e6bf7",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 72,
"avg_line_length": 22,
"alnum_prop": 0.5527272727272727,
"repo_name": "Pulgama/supriya",
"id": "ecaa0ca5064dcf587b15d4d42ff500a5eb243506",
"size": "550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supriya/ugens/PV_MagNoise.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2790612"
},
{
"name": "Shell",
"bytes": "569"
}
],
"symlink_target": ""
} |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from past.builtins import basestring
from past.utils import old_div
from builtins import object
import zipfile
import numpy as np
import matplotlib.pyplot as plt
from osgeo import gdal, gdal_array
from osgeo.gdalconst import GA_ReadOnly, GA_Update
from girs.geom.envelope import merge_envelopes
from . import parameter
# ===================== use Python Exceptions =================================
gdal.UseExceptions()
# =============================================================================
def driver_dictionary():
"""Return the driver dictionary
The driver dictionary:
- key: source extension
- value: driver short name
:return: driver dictionary
:rtype: dict
"""
drivers_dict = {}
for i in range(gdal.GetDriverCount()):
drv = gdal.GetDriver(i)
if drv.GetMetadataItem(gdal.DCAP_RASTER):
extensions = drv.GetMetadataItem(gdal.DMD_EXTENSIONS)
extensions = extensions.split() if extensions else [None]
for ext in extensions:
if ext:
if ext.startswith('.'):
ext = ext[1:]
ext = ext.lower()
for ext1 in [e for e in ext.split('/')]:
if ext1 not in drivers_dict:
drivers_dict[ext1] = []
drivers_dict[ext1].append(drv.ShortName)
else:
if None not in drivers_dict:
drivers_dict[None] = []
drivers_dict[None].append(drv.ShortName)
return drivers_dict
def get_driver(filename=None, driver_short_name=None):
"""Return a driver.
If driver_short_name is given, return the corresponding driver.
filename can be 'MEM' or a file name. If a file name is given, guess
the driver, returning it only when the correspondence is one to one,
else return None.
Filename suffixes, which should work:
ace2: ACE2, asc: AAIGrid, bin: NGSGEOID, blx: BLX, bmp: BMP, bt: BT, cal: CALS, ct1: CALS, dat: ZMap, ddf: SDTS,
dem: USGSDEM, dt0: DTED, dt1: DTED, dt2: DTED, e00: E00GRID, gen: ADRG, gff: GFF, grb: GRIB, grc: NWT_GRC,
gsb: NTv2, gtx: GTX, gxf: GXF, hdf: HDF4, hdf5: HDF5, hf2: HF2, hgt: SRTMHGT, jpeg: JPEG, jpg: JPEG, kea: KEA,
kro: KRO, lcp: LCP, map: PCRaster, mem: JDEM, mpl: ILWIS, n1: ESAT, nat: MSGN, ntf: NITF, pix: PCIDSK, png:
PNG, pnm: PNM, ppi: IRIS, rda: R, rgb: SGI, rik: RIK, rst: RST, rsw: RMF, sdat: SAGA, tif: GTiff, tiff: GTiff,
toc: RPFTOC, vrt: VRT, xml: ECRGTOC, xpm: XPM, xyz: XYZ,
Filenames with ambiguous suffixes:
gif: GIF, BIGGIF
grd: GSAG, GSBG, GS7BG, NWT_GRD
hdr: COASP, MFF, SNODAS
img: HFA, SRP
nc: GMT, netCDF
ter: Leveller, Terragen
Without suffix: SAR_CEOS, CEOS, JAXAPALSAR, ELAS, AIG, GRASSASCIIGrid, MEM, BSB, DIMAP, AirSAR, RS2, SAFE,
HDF4Image, ISIS3, ISIS2, PDS, VICAR, TIL, ERS, L1B, FIT, INGR, COSAR, TSX, MAP, KMLSUPEROVERLAY, SENTINEL2, MRF,
DOQ1, DOQ2, GenBin, PAux, MFF2, FujiBAS, GSC, FAST, LAN, CPG, IDA, NDF, EIR, DIPEx, LOSLAS, CTable2, ROI_PAC, ENVI,
EHdr, ISCE, ARG, BAG, HDF5Image, OZI, CTG, DB2ODBC, NUMPY
:param filename:
:param driver_short_name:
:return:
"""
if driver_short_name:
driver = gdal.GetDriverByName(driver_short_name)
if not driver:
raise ValueError('Could not find driver for short name {}'.format(driver_short_name))
elif not filename:
driver = gdal.GetDriverByName('MEM')
else:
try:
driver = gdal.IdentifyDriver(filename)
except RuntimeError:
driver = None
if not driver:
drivers_dict = driver_dictionary()
try:
driver_short_name = drivers_dict[filename.split('.')[-1]]
if len(driver_short_name) == 1:
driver = gdal.GetDriverByName(driver_short_name[0])
else:
raise ValueError('Ambiguous file name {} with possible drivers: {}'.format(
filename, ', '.join(driver_short_name)))
except KeyError:
raise ValueError('Could not find driver for file {}'.format(filename))
return driver
# =============================================================================
class RasterFilename(object):
def __init__(self, filename):
self.filename = filename
def get_filename(self):
return self.filename
def get_member(self):
return self.filename
def is_compressed(self):
return False
class RasterGzipFilename(RasterFilename):
def __init__(self, filename):
super(RasterGzipFilename, self).__init__(filename)
def get_member(self):
return self.filename[:-3]
def is_compressed(self):
return True
class RasterZipFilename(RasterFilename):
def __init__(self, filename, member):
super(RasterZipFilename, self).__init__(filename)
if not member:
with zipfile.ZipFile(filename) as zf:
members = zf.namelist()
assert len(members) == 1
member = members[0]
self.member = member
def get_member(self):
return self.member
def is_compressed(self):
return True
# =============================================================================
# Raster
# =============================================================================
class Raster(object):
def __init__(self, filename, member=None):
"""
:param filename:
:param member:
"""
self.dataset = None
if filename is None:
filename = ''
fnl = filename.lower()
if fnl.endswith('.zip'):
self.filename = RasterZipFilename(filename, member)
elif fnl.endswith('.gz'):
self.filename = RasterGzipFilename(filename)
else:
self.filename = RasterFilename(filename)
def __repr__(self):
return self.get_filename() + ' ' + self.get_parameters().__repr__()
def info(self, **kwargs):
"""Return information on a dataset
::
return gdal.Info(self.dataset, **kwargs)
:param kwargs: see gdal.Info
:return: information on a dataset
:rtype: str
"""
return gdal.Info(self.dataset, **kwargs)
def show(self, band_number=1, mask=True, scale=False, plot=True):
"""
:param band_number:
:param mask:
:param scale:
:return:
"""
array = self.get_array(band_number, mask=mask, scale=scale)
if np.ma.all(array) is np.ma.masked:
print('Empty array: nothing to show')
else:
plt.imshow(array)
if plot:
plt.show()
def plot(self, axes_dict=None, mask=True, scale=False, plot=True):
"""
:param axes_dict: dictionary with ax as key and list of bands as plot
:param mask:
:param scale:
:param plot:
:return:
"""
for ax, band_number in list(axes_dict.items()):
if isinstance(band_number, int):
array = self.get_array(band_number, mask=mask, scale=scale)
else:
array = self.get_arrays(band_number, mask=mask, scale=scale)
ax.imshow(array)
if plot:
plt.show()
def is_compressed(self):
"""Return True is the file is compressed
Valid compressions are gzip (.gz) and zip (.zip)
:return: True is the file is compressed
"""
return self.filename.is_compressed()
def get_filename(self):
"""Return the file name
:return: file name
:rtype: str
"""
return self.filename.get_filename()
def get_rastername(self):
"""Return the raster name
Raster name may be different of file name in case of zipped files
:return: file name
:rtype: str
"""
return self.filename.get_member()
def get_parameters(self):
"""Return the raster parameters defined in the dataset
:return: raster parameters
:rtype: RasterParameters
"""
return parameter.get_parameters(self.dataset)
def get_raster_size(self):
"""Return (number of columns, number of rows)
Return the x- and y-sizes as tuple (number of columns, number of rows)
:return: number of columns and rows
:rtype: (int, int)
"""
return self.dataset.RasterXSize, self.dataset.RasterYSize
def get_nodata(self, band_number=1):
"""Return nodata or list of nodata
If band_number is 'all' or a list of band numbers, return a list with one nodata for each band.
If band_number is a number, return the nodata value
:param band_number: 'all', band number or list of band numbers
:type band_number: int or list of int
:return: nodata or list of nodata
:rtype: raster type or list of raster types
"""
if band_number == 'all':
band_number = list(range(1, self.get_band_count() + 1))
try:
return [self.dataset.GetRasterBand(i).GetNoDataValue() for i in band_number]
except TypeError:
return self.dataset.GetRasterBand(band_number).GetNoDataValue()
def get_extent(self, scale=0.0):
"""Return the raster extent in world coordinates.
:return: (x_min, x_max, y_min, y_max)
:rtype:
"""
xmin, xmax, ymin, ymax = extent_pixel_to_world(self.get_geotransform(),
self.dataset.RasterXSize, self.dataset.RasterYSize)
if scale and scale != 0.0:
dx, dy = xmax - xmin, ymax - ymin
xmin -= dx * scale
xmax += dx * scale
ymin -= dy * scale
ymax += dy * scale
return xmin, xmax, ymin, ymax
def copy(self, name='', driver_short_name=None):
"""Return a copy of this raster
Creates a new RasterUpdate instance using the parameters of this raster
:param name:
:param driver_short_name:
:return:
"""
return copy(self, name, driver_short_name)
def get_band_count(self):
"""Return the number of bands
:return: number of bands
:rtype: int
"""
return self.dataset.RasterCount
def get_band_data_type(self, band_number=1):
"""Return data type or list of data types
If band_number is 'all' or a list of band numbers, return a list with one data type for each band.
If band_number is a number, return the data type
:param band_number: 'all', band number or list of band numbers
:type band_number: int or list of int
:return: data type or list of data types
:rtype: int or list of int
"""
if band_number == 'all':
band_number = list(range(1, len(band_number) + 1))
try:
return [self.dataset.GetRasterBand(i).DataType for i in band_number]
except TypeError:
return self.dataset.GetRasterBand(band_number).DataType
def get_band(self, band_number=1):
"""Return a raster band
:param band_number: band number. Default band_number=1
:return: raster band
:rtype: gdal.Band
"""
return self.dataset.GetRasterBand(band_number)
def array_block(self, band_number=1):
"""Loop through an array and yields i_row, i_col, n_rows, n_cols, array block
It is 5x slower than get_array
:return: i_row, i_col, n_rows, n_cols, block
"""
band = self.dataset.GetRasterBand(band_number)
row_size, col_size = band.YSize, band.XSize
block_sizes = band.GetBlockSize()
row_block_size, col_block_size = block_sizes[1], block_sizes[0]
col_range = list(range(0, col_size, col_block_size))
for i_row in range(0, row_size, row_block_size):
n_rows = row_block_size if i_row + row_block_size < row_size else row_size - i_row
for i_col in col_range:
n_cols = col_block_size if i_col + col_block_size < col_size else col_size - i_col
yield i_row, i_col, n_rows, n_cols, band.ReadAsArray(i_col, i_row, n_cols, n_rows)
def get_array(self, band_number=1, col0=0, row0=0, n_cols=None, n_rows=None, mask=False, scale=False):
"""Return the raster band array
If band number is a number, return a 2D-array, else if band number is a list/tuple of numbers, return a 3D-array
If mask is True, returned a numpy masked array
If scale is True, scale the raster: (array - min)/(max - min)
:param band_number: band number. Default band_number=1
:param col0: starting x pixel
:type col0: int
:param row0: starting y pixel
:type row0: int
:param n_cols: number of x-pixels
:type n_cols: int
:param n_rows: number of y-pixels
:type n_rows: int
:return: 2D- or 3D-array
:rtype: numpy.ndarray
"""
if not n_cols:
n_cols = self.dataset.RasterXSize
if not n_rows:
n_rows = self.dataset.RasterYSize
if band_number == 'all':
band_number = list(range(1, self.get_band_count() + 1))
try:
arrays = np.empty((len(band_number), n_rows, n_cols))
for i, bn in enumerate(band_number):
arrays[i] = self.get_array(bn, col0=col0, row0=row0, n_cols=n_cols, n_rows=n_rows,
mask=mask, scale=scale)
return arrays
except TypeError:
array = self.dataset.GetRasterBand(band_number).ReadAsArray(col0, row0, n_cols, n_rows)
if mask:
nodata = self.get_nodata(band_number=band_number)
array = np.ma.array(array, mask=(array == nodata))
if scale:
array = scale_array(array)
return array
def get_array_full(self, value=0, **kwargs):
"""
:param value:
:type value: int or list
:param kwargs:
:key dtype: numpy dtype, default raster type converted into dtype
:return: array
"""
n_cols, n_rows = self.get_raster_size()
dtype = kwargs.pop('dtype', gdal_array.GDALTypeCodeToNumericTypeCode(self.get_band_data_type()))
if isinstance(value, (list, tuple)):
n_bands = len(value)
if value.count(0) == len(value):
return np.zeros((n_bands, n_rows, n_cols), dtype)
elif value.count(None) == len(value):
return np.empty((n_bands, n_rows, n_cols), dtype)
else:
array = np.empty((n_bands, n_rows, n_cols), dtype)
for i in range(n_bands):
array[i] = np.full((n_rows, n_cols), value[i], dtype)
return array
else:
if value == 0:
return np.zeros((n_rows, n_cols), dtype)
elif value is None:
return np.empty((n_rows, n_cols), dtype)
else:
return np.full((n_rows, n_cols), value, dtype)
def get_geotransform(self):
"""Return geo transform.
:return:
"""
return self.dataset.GetGeoTransform()
def transform(self, **kwargs):
"""Return a raster in the given coordinate system
Create an instance of RasterUpdate in 'MEM':
- If filename and driver are unset
- If driver is 'MEM'
:param kwargs:
:key epsg: (str)
:key proj4: (str)
:key wkt: (str)
:key output_raster: full path file name, any string if drivername='mem', or None
:key drivername: short driver name
:return:
"""
output_raster = kwargs.pop('output_raster', 'MEM')
drivername = kwargs.pop('drivername', 'Memory')
if output_raster:
target = output_raster
else:
target = drivername
srs = kwargs.pop('wkt', kwargs.pop('proj4', None))
if not srs:
srs = 'epsg:{}'.format(kwargs.pop('epsg', None))
return RasterUpdate(gdal.Warp(destNameOrDestDS=target, srcDSOrSrcDSTab=self.dataset, dstSRS=srs, **kwargs))
def get_coordinate_system(self):
"""Return the coordinate system
:return:
"""
return self.dataset.GetProjection()
def get_pixel_size(self):
"""Return pixel sizes (x, y): (column width, row height)
:return: pixel sizes (x, y): (column width, row height)
:rtype: tuple
"""
return pixel_size(self.get_geotransform())
def world_to_pixel(self, x, y):
"""Transform world to pixel coordinates
:param x:
:param y:
:return: pixel coordinates of (x, y)
:rtype: list of int
"""
return world_to_pixel(self.get_geotransform(), x, y)
def extent_world_to_pixel(self, min_x, max_x, min_y, max_y):
"""Return extent in pixel coordinates
:param min_x: minimum x (minimum longitude)
:type min_x: float
:param max_x: maximum x (maximum longitude)
:type max_x: float
:param min_y: minimum x (minimum latitude)
:type min_y: float
:param max_y: maximum x (maximum latitude)
:type max_y: float
:return: (u_min, u_max, v_min, v_max)
:rtype: tuple
"""
return extent_world_to_pixel(self.get_geotransform(), min_x, max_x, min_y, max_y)
def pixel_to_world(self, x, y):
"""Return the top-left world coordinate of the pixel
:param x:
:param y:
:return:
"""
return pixel_to_world(self.get_geotransform(), x, y)
def get_pixel_centroid_coordinates(self):
"""
:return:
"""
dx, dy = self.get_pixel_size()
dy = -dy
nc, nr = self.get_raster_size()
tr = self.get_geotransform()
arr = np.concatenate([x.reshape(nr, nc, 1) for x in np.indices((nr, nc))][::-1], 2).astype(np.float)
arr[:][:] *= np.array([dx, dy])
arr[:][:] += np.array([tr[0], tr[3]])
arr[:][:] += np.array([dx / 2.0, dy / 2.0])
return arr
def get_centroid_world_coordinates(self):
"""Return the raster centroid in world coordinates
:return:
"""
x_size, y_size = self.get_pixel_size()
return get_centroid_world_coordinates(self.get_geotransform(),
self.dataset.RasterXSize, self.dataset.RasterYSize,
x_size, y_size)
def resample(self, pixel_sizes, resample_alg=gdal.GRA_NearestNeighbour, **kwargs):
"""Resample the raster
:param pixel_sizes:
:param resample_alg:
:param kwargs:
:return:
"""
from girs.rast.proc import resample
return resample(self, pixel_sizes, resample_alg, **kwargs)
def strip(self):
from girs.rast.proc import strip
return strip(self)
# =============================================================================
# RasterReader
# =============================================================================
class RasterReader(Raster):
def __init__(self, filename, member=''):
"""Filename, also als .zip or .gz. In case of zip-files, a member name
can be also be given in case there are more then one raster files in the
zip file
:param filename:
:param member:
"""
super(RasterReader, self).__init__(filename, member=member)
fnl = filename.lower()
if fnl.endswith('.zip'): # /vsizip/path/to/the/file.zip/path/inside/the/zip/file
filename = '/vsizip/' + filename + '/' + member
elif fnl.endswith('.gz'): # /vsigzip/path/to/the/file.gz
filename = '/vsigzip/' + filename
self.dataset = gdal.Open(filename, GA_ReadOnly)
class RasterEditor(Raster):
def __init__(self, filename):
super(RasterEditor, self).__init__(filename)
def set_nodata(self, nodata, band_number=1):
"""Set nodata
:param nodata:
:param band_number:
:return:
"""
if isinstance(nodata, basestring):
nodata = [nodata]
else:
try:
len(nodata)
except TypeError:
nodata = [nodata]
try:
band_number_nodata = {bn: nodata[i] for i, bn in enumerate(band_number)}
except TypeError:
band_number = [band_number]
band_number_nodata = {bn: nodata[i] for i, bn in enumerate(band_number)}
for bn in band_number:
self.dataset.GetRasterBand(bn).SetNoDataValue(band_number_nodata[bn])
self.dataset.FlushCache()
def set_array(self, array, band_number=1):
"""Set array
:param array:
:param band_number:
:return:
"""
result = self.dataset.GetRasterBand(band_number).WriteArray(array)
self.dataset.FlushCache()
return result
def set_projection(self, srs):
"""Set projection
:param srs:
:return:
"""
result = self.dataset.SetProjection(srs)
self.dataset.FlushCache()
return result
def set_geotransform(self, x_min=0.0, x_pixel_size=1.0, x_rot=0.0, y_max=0.0, y_rot=0.0, y_pixel_size=1.0):
"""
:param x_min: x location of East corner of the raster
:param x_pixel_size: pixel width
:param x_rot: x pixel rotation, usually zero
:param y_max: y location of North corner of the raster
:param y_rot: x pixel rotation, usually zero
:param y_pixel_size: negative value of pixel height
:return: True if setting was successful else gdal.CE_Failure
"""
result = self.dataset.SetGeoTransform(x_min, x_pixel_size, x_rot, y_max, y_rot, y_pixel_size)
self.dataset.FlushCache()
return True if result == gdal.CE_None else gdal.CE_Failure
class RasterUpdate(RasterEditor):
def __init__(self, source, drivername=None):
"""
If source is another Raster or a raster dataset, create a copy of the dataset in 'MEM'
If source is a filename, open the file in update modus
:param source: raster filename, Raster, or raster dataset
"""
try:
super(RasterUpdate, self).__init__(source)
# No support for zip-files
if source.lower().endswith('.gz'): # /vsigzip/path/to/the/file.gz
source = '/vsigzip/' + source
if not drivername or drivername == 'MEM':
drv = gdal.GetDriverByName('MEM')
drv
else:
raise ValueError('No filename defined')
self.dataset = gdal.Open(source, GA_Update)
except AttributeError:
super(RasterUpdate, self).__init__('')
try:
self.dataset = gdal.GetDriverByName('MEM').CreateCopy('', source.dataset)
except AttributeError:
self.dataset = gdal.GetDriverByName('MEM').CreateCopy('', source)
self.filename = ''
class RasterWriter(RasterEditor):
def __init__(self, raster_parameters, source=None, drivername=None):
"""Create an instance of RasterWriter in 'MEM':
- If source and driver are not given
- If drivername is 'MEM'
:param raster_parameters:
:param source:
:param drivername: gdal driver short name or an instance of gdal.Driver
"""
super(RasterWriter, self).__init__(source)
drv = None
if not source:
if not drivername or drivername == 'MEM':
drv = gdal.GetDriverByName('MEM')
else:
raise ValueError('No filename defined')
elif source:
if not drivername:
drv = get_driver(source)
else:
try:
drv = gdal.GetDriverByName(drivername)
except TypeError as e:
if not isinstance(drivername, gdal.Driver):
raise e
drv = drivername
n_bands = raster_parameters.number_of_bands
try:
dt = raster_parameters.data_types[0]
except TypeError:
dt = raster_parameters.data_types
try:
filename = self.get_filename()
self.dataset = drv.Create(filename, raster_parameters.RasterXSize, raster_parameters.RasterYSize,
n_bands, dt)
except RuntimeError as e:
msg = '{} or raster {} is being eventually used (locked)'.format(e.message, self.filename)
raise RuntimeError(msg)
self.dataset.SetGeoTransform(raster_parameters.geo_trans)
self.dataset.SetProjection(raster_parameters.srs)
raster_parameters.set_nodata(raster_parameters.nodata)
for i in range(n_bands):
if raster_parameters.nodata[i] is not None:
rb_out = self.dataset.GetRasterBand(i+1)
rb_out.SetNoDataValue(raster_parameters.nodata[i])
rb_out.FlushCache()
# =============================================================================
# Functions
# =============================================================================
def info(raster, **kwargs):
"""Return raster information
:param raster:
:param kwargs:
:return:
"""
try:
raster = RasterReader(raster)
dataset = raster.dataset
except AttributeError:
try:
dataset = raster.dataset
except AttributeError:
dataset = raster
return gdal.Info(dataset, **kwargs)
def create_gifs(output_filename, *args, **kwargs):
"""
:param self:
:param output_filename:
:param args:
:param kwargs:
:key mask: default True
:key scale: default False
:key band_number:
:key nodata:
:key cmap_name: default Blues
:key cmap: default Blues
:key resize: default 1
:return:
"""
from PIL import Image
import matplotlib as mpl
resize = kwargs.pop('resize', 1)
cm = kwargs.pop('cmap', mpl.cm.get_cmap(kwargs.pop('cmap_name', 'plasma_r')))
images = list()
a_max, a_min = None, None
for i, arg in enumerate(args):
try:
r = RasterReader(arg)
except AttributeError:
r = arg
array = r.get_array(mask=True, scale=False)
a_min = array.min() if a_min is None else min(a_min, array.min())
a_max = array.max() if a_max is None else max(a_max, array.max())
for i, arg in enumerate(args):
try:
r = RasterReader(arg)
except AttributeError:
r = arg
array = r.get_array(mask=True, scale=False)
array = old_div((array - a_min), (a_max - a_min))
array = cm(array)
img = Image.fromarray((array * 255).astype('uint8'))
img = img.resize((img.size[0] * resize, img.size[1] * resize))
images.append(img)
images[0].save(output_filename, 'GIF', duration=1000, save_all=True, optimize=False, append_images=images[1:])
# def create_gifs(output_filename, *args, **kwargs):
# """
#
# :param self:
# :param output_filename:
# :param args:
# :param kwargs:
# :key mask: default True
# :key scale: default False
# :key band_number:
# :return:
# """
# from PIL import Image
# import matplotlib as mpl
# cm_hot = mpl.cm.get_cmap('hot')
# images = list()
# for i, arg in enumerate(args):
# try:
# r = RasterReader(arg)
# except AttributeError:
# r = arg
#
# p = r.get_parameters()
# nodata = p.nodata
# array = r.get_array(mask=False, scale=False)
# array[array == nodata] = 0
# array *= 255.0 / array.max()
# array = array.astype(np.uint8)
# array = cm_hot(array)
# array *= 255
# array = array.astype('uint8')
# print array
# # data = img.getdata()
# # max_d = max(data) * 1.2
# # img.putdata([item if item != nodata else 0 for item in data])
# img = Image.fromarray(array)
# img = img.resize((img.size[0] * 50, img.size[1] * 50))
# images.append(img)
# images[0].save(output_filename, 'GIF', duration=2000, save_all=True, optimize=False, append_images=images[1:])
# def create_gifs(output_filename, *args, **kwargs):
# """
#
# :param self:
# :param output_filename:
# :param args:
# :param kwargs:
# :key mask: default True
# :key scale: default False
# :key band_number:
# :return:
# """
# from PIL import Image
# import imageio as io
# images = list()
#
# for i, arg in enumerate(args):
# try:
# r = RasterReader(arg)
# except AttributeError:
# r = arg
# array = r.get_array(mask=False, scale=False)
# images = [io.imread(os.path.join(input_dir, f1)) for f1 in filenames]
# io.mimsave(output_gif, jpeg_images, duration=0.5)
def get_parameters(raster):
"""Return the raster parameters defined in this raster
:param raster: dataset or filename
:type raster: gdal.Dataset
:return: raster parameters
:rtype: RasterParameters
"""
return parameter.get_parameters(raster)
def copy(raster, dst_filename='', driver_short_name=None):
"""Return a copy of given raster
Creates a new RasterUpdate instance using the parameters of this raster
:param raster:
:param dst_filename:
:param driver_short_name:
:return: copy of this raster
:rtype: RasterUpdate
"""
try:
raster = RasterReader(raster)
except AttributeError:
raster = raster
drv = get_driver(dst_filename, driver_short_name)
dataset = drv.CreateCopy(dst_filename, raster.dataset)
return RasterUpdate(dataset)
def scale_array(array):
def scale(a):
array_min = np.amin(a)
array_max = np.amax(a)
return old_div((a - array_min), (array_max - array_min))
if len(array.shape) > 2: # ndim does not work for masked arrays
for i in range(len(array)):
array[i, :, :] = scale(array[i, :, :])
else:
array = scale(array)
return array
def pixel_size(geo_trans):
"""Return pixel sizes
:param geo_trans: geo transformation
:type geo_trans: tuple with six values
:return: pixel sizes (x, y): (column width, row height)
:rtype: tuple
"""
return geo_trans[1], -geo_trans[5]
def world_to_pixel(geo_trans, x, y, np_func=np.trunc):
"""Transform world into pixel coordinates
:param geo_trans: geo transformation
:type geo_trans: tuple with six values
:param x:
:param y:
:param np_func:
:return: pixel coordinates of (x, y)
:rtype: list of int
"""
# print geo_trans, x, y
# xOffset = int((x - geo_trans[0]) / geo_trans[1])
# yOffset = int((y - geo_trans[3]) / geo_trans[5])
# print xOffset, yOffset,
xOffset = np_func(np.divide(x - geo_trans[0], geo_trans[1])).astype(np.int)
yOffset = np_func(np.divide(y - geo_trans[3], geo_trans[5])).astype(np.int)
# print xOffset, yOffset
return xOffset, yOffset
def pixel_to_world(geo_trans, x, y):
"""Return the top-left world coordinate of the pixel
:param geo_trans: geo transformation
:type geo_trans: tuple with six values
:param x:
:param y:
:return:
"""
return geo_trans[0] + (x * geo_trans[1]), geo_trans[3] + (y * geo_trans[5])
def extent_pixel_to_world(geo_trans, raster_x_size, raster_y_size):
"""Return extent in world coordinates.
Transform the given pixel coordinates `raster_x_size` (number of columns) and `raster_y_size` (number of rows) into
world coordinates.
:param geo_trans: geo transformation
:type geo_trans: tuple with six values
:param raster_x_size: number of columns
:type raster_x_size: int
:param raster_y_size: number of rows
:return: (x_min, x_max, y_min, y_max)
:rtype: tuple
"""
x_min0, y_max0 = pixel_to_world(geo_trans, 0, 0)
x_max0, y_min0 = pixel_to_world(geo_trans, raster_x_size, raster_y_size)
return x_min0, x_max0, y_min0, y_max0
def extent_world_to_pixel(geo_trans, min_x, max_x, min_y, max_y):
"""Return extent in pixel coordinates
:param geo_trans: geo transformation
:type geo_trans: tuple with six values
:param min_x: minimum x (minimum longitude)
:type min_x: float
:param max_x: maximum x (maximum longitude)
:type max_x: float
:param min_y: minimum x (minimum latitude)
:type min_y: float
:param max_y: maximum x (maximum latitude)
:type max_y: float
:return: (u_min, u_max, v_min, v_max)
:rtype: tuple
"""
geo_trans = list(geo_trans)
u_min, v_min = world_to_pixel(geo_trans, min_x, max_y)
u_max, v_max = world_to_pixel(geo_trans, max_x, min_y)
geo_trans[0], geo_trans[3] = pixel_to_world(geo_trans, u_min, v_min)
return (u_min, u_max, v_min, v_max), geo_trans
def get_centroid_world_coordinates(geo_trans, raster_x_size, raster_y_size, x_pixel_size, y_pixel_size):
"""Return the raster centroid in world coordinates
:param geo_trans: geo transformation
:type geo_trans: tuple with six values
:param raster_x_size: number of columns
:type raster_x_size: int
:param raster_y_size: number of rows
:param x_pixel_size: pixel size in x direction
:type: x_pixel_size: float
:param y_pixel_size: pixel size in y direction
:type y_pixel_size: float
:return:
"""
x0, y0 = pixel_to_world(geo_trans, 0, 0)
x1, y1 = pixel_to_world(geo_trans, raster_x_size-1, raster_y_size-1)
x1 += x_pixel_size
y1 -= y_pixel_size
return (x0 + x1) * 0.5, (y0 + y1) * 0.5
def get_default_values(number_of_bands, values):
"""Return values for bands
Utility function to get values (e.g., nodata) for bands.
For n = number_of_bands:
- If values is a single value, transform it into a list with n elements
- If values is a list with size lower than n, extend the list to size n by repeating the last value (n=4, values=[1, 2], result=[1, 2, 2, 2]
- If values is a list with size greater than n, slice values to values[:n]
:param number_of_bands: number of bands
:type number_of_bands: int
:param values: value or list of values
:type values: same as raster type
:return: values
:rtype: same as input values
"""
try:
if number_of_bands < len(values):
values = values[:number_of_bands]
elif number_of_bands > len(values):
values = values[-1] * (number_of_bands - len(values))
except TypeError:
values = [values] * number_of_bands
except:
raise
return values
def rasters_get_extent(rasters, extent_type='intersection'):
"""Return the extent of a list of rasters.
Return the extent of the union or intersection of a list of rasters
:param rasters: list of rasters or raster filenames (also mixed)
:param extent_type: intersection or union
:return: (xmin, xmax, ymin, ymax) in world coordinates
"""
# Get get common extent
# Get the rasters
rasters = [RasterReader(ir) if isinstance(ir, basestring) else ir for ir in rasters]
return merge_envelopes([r.get_extent() for r in rasters])
def rasters_get_pixel_size(rasters, minmax='max'):
"""Return union or intersection of pixel sizes
- If minmax='min', return the intersection of the pixel sizes defined in the list of rasters. This corresponds to the smallest pixel size among all rasters.
- If minmax='max', return the union of the pixel sizes defined in the list of rasters. This corresponds to the largest pixel size among all rasters.
:param rasters: list of rasters
:type rasters: list of raster file names or list of Raster instances, also both types in the same list
:param minmax: 'min' for intersection and 'max' for union
:type minmax: str
:return: pixel sizes (x, y): (number of columns, number of rows)
:rtype: tuple
"""
rasters = [RasterReader(ir) if isinstance(ir, basestring) else ir for ir in rasters]
xs, ys = rasters[0].get_pixel_size()
if minmax == 'max':
for r in rasters:
xs0, ys0 = r.get_pixel_size()
xs = max(xs, xs0)
ys = max(ys, ys0)
elif minmax == 'min':
for r in rasters:
xs0, ys0 = r.get_pixel_size()
xs = min(xs, xs0)
ys = min(ys, ys0)
return xs, ys
| {
"content_hash": "6ddc50f722c1ac0f7bc8bd707e9df65e",
"timestamp": "",
"source": "github",
"line_count": 1090,
"max_line_length": 164,
"avg_line_length": 34.15321100917431,
"alnum_prop": 0.5718967416122707,
"repo_name": "JRoehrig/GIRS",
"id": "c501406da022256c6e274e5a821773aca7cdbe7c",
"size": "37227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "girs/rast/raster.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "258726"
}
],
"symlink_target": ""
} |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("zerver", "0203_realm_message_content_allowed_in_email_notifications"),
]
operations = [
migrations.RemoveField(
model_name="realm",
name="has_seat_based_plan",
),
migrations.RemoveField(
model_name="realm",
name="seat_limit",
),
]
| {
"content_hash": "c75f890a3db353632c176e9fe77a96df",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 80,
"avg_line_length": 22.63157894736842,
"alnum_prop": 0.5627906976744186,
"repo_name": "andersk/zulip",
"id": "98da63f4dbfdb70868228745440e98405faeab43",
"size": "481",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "zerver/migrations/0204_remove_realm_billing_fields.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "490256"
},
{
"name": "Dockerfile",
"bytes": "4025"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "749848"
},
{
"name": "Handlebars",
"bytes": "377098"
},
{
"name": "JavaScript",
"bytes": "4006373"
},
{
"name": "Perl",
"bytes": "10163"
},
{
"name": "Puppet",
"bytes": "112128"
},
{
"name": "Python",
"bytes": "10168530"
},
{
"name": "Ruby",
"bytes": "3459"
},
{
"name": "Shell",
"bytes": "146797"
},
{
"name": "TypeScript",
"bytes": "284837"
}
],
"symlink_target": ""
} |
'''
This is the Python version of the Random Search algorithm
presented in
"Clever Algorithms: Nature-Inspired Programming Recipes".
Find the book and the Ruby source codes on GitHub:
https://github.com/jbrownlee/CleverAlgorithm:s
'''
import random
def objective_function(vector):
return sum([x ** 2.0 for x in vector])
def random_vector(minmax):
return [minmax[i][0] + ((minmax[i][1] - minmax[i][0]) * random.random()) for i in range(len(minmax))]
def search(search_space, max_iter):
best = None
for i in range(max_iter):
candidate = {}
candidate["pos"] = random_vector(search_space)
candidate["cost"] = objective_function(candidate["pos"])
if best is None or candidate["cost"] < best["cost"]: best = candidate
print " > iteration %i, best=%.4g" % (i, best["cost"])
return best
if __name__ == "__main__":
search_space = [[-5,5],[-5,5]]
problem_size = 2
max_iter = 100
best = search(search_space, max_iter)
print "Done! Best solution: f = %.4g, v =" % best["cost"], best["pos"]
| {
"content_hash": "5e766210da1ac7e92817e9330d8f6518",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 105,
"avg_line_length": 29.16216216216216,
"alnum_prop": 0.623725671918443,
"repo_name": "mkrapp/semic",
"id": "b9bb45fa7cfff26a08c55d2309ac56dd0316eb87",
"size": "1079",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "optimize/optimization_algorithms/random_search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "55403"
},
{
"name": "Makefile",
"bytes": "4803"
},
{
"name": "Python",
"bytes": "46472"
}
],
"symlink_target": ""
} |
__name__ = 'jsonrpc2'
__author__ = 'Max <[email protected]>'
__version__ = 1, 0
__detail__ = 'Based on https://github.com/subutux/json-rpc2php'
import logging
logger = logging.getLogger(__name__)
import json
import requests
import pprint
class jsonrpc2(object):
'''jsonrpc2 client'''
host = ''
default_options = {
'username': '',
'password': '',
}
currId = 0
apiMethods = []
headers = {'Content-Type': 'application/json',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0'}
cookies = {}
def __init__(self, api_url, options=None):
self.host = api_url
if options is not None:
for i in options:
self.default_options[i] = options[i]
returned = self.rpc_list_available_commands()
self.apiMethods = returned['services']
def rpc_list_available_commands(self):
response = requests.get(self.host, auth=(self.default_options['username'],
self.default_options['password']),
headers=self.headers, data='')
self.cookies = response.cookies
return json.loads(response.content)
def rpc_call(self, method, params=None, notification=False):
"""main function to call the rpc api"""
request = {
'jsonrpc': '2.0',
'method': method,
'params': '',
}
if notification is False:
self.currId += 1
request['id'] = self.currId
if isinstance(params, str):
request['params'] = [params]
elif isinstance(params, dict):
request['params'] = params
elif isinstance(params, list):
request['params'] = params
jsonrequest = json.dumps(request)
response = requests.post(self.host, auth=(self.default_options['username'],
self.default_options['password']),
headers=self.headers,
data=jsonrequest)
print 'RPC CALL:', method, pprint.pformat(params)
if notification is False:
f_obj = json.loads(response.content)
if 'error' in f_obj.keys():
raise rpcException(f_obj['error'])
else:
print ' RPC RESPONSE:', pprint.pformat(f_obj)
return f_obj
def __getattr__(self, method):
"""Magic!"""
arg = ['', False]
if method in self.apiMethods:
def function(*args):
# Get the method arguments. If there are none provided, use the default.
try:
arg[0] = args[0]
except IndexError:
pass
# check if notification param is set. If not, use default (False)
try:
arg[1] = args[1]
except IndexError:
pass
return self.rpc_call(method, arg[0], arg[1])
return function
else:
raise rpcException("Unknown method: %s" % method)
class rpcException(Exception):
def __init__(self, jsonrpc2Error):
if type(jsonrpc2Error) is not str:
print jsonrpc2Error
message = str(jsonrpc2Error["code"]) + " :: " + jsonrpc2Error["message"]
self.errorCode = jsonrpc2Error["code"]
self.message = jsonrpc2Error["message"]
self.fullMessage = jsonrpc2Error['data']
else:
message = jsonrpc2Error
Exception.__init__(self, message)
| {
"content_hash": "d238a56f6058f2ec8eabd11029c06a45",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 108,
"avg_line_length": 32.78761061946903,
"alnum_prop": 0.5201079622132254,
"repo_name": "vesellov/callfeed.net",
"id": "0c5333c0e38620e4f72120bacf361c05af511235",
"size": "3705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mainapp/utils/jsonrpc2.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "143"
},
{
"name": "CSS",
"bytes": "349981"
},
{
"name": "HTML",
"bytes": "289531"
},
{
"name": "JavaScript",
"bytes": "767378"
},
{
"name": "Python",
"bytes": "477690"
},
{
"name": "Shell",
"bytes": "5323"
}
],
"symlink_target": ""
} |
import tests.expsmooth.expsmooth_dataset_test as exps
exps.analyze_dataset("frexport.csv" , 2);
exps.analyze_dataset("frexport.csv" , 4);
exps.analyze_dataset("frexport.csv" , 8);
exps.analyze_dataset("frexport.csv" , 12);
| {
"content_hash": "23b40d0919d22219d242f73e61063f61",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 53,
"avg_line_length": 22.8,
"alnum_prop": 0.7324561403508771,
"repo_name": "antoinecarme/pyaf",
"id": "5ed92c5c4a30c0144b5e10e5cd4cc90eabd0ed81",
"size": "228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/expsmooth/expsmooth_dataset_frexport.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/space/weapon/shared_wpn_medium_blaster.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "6ceabc68575c056119ac32d16203ee2f",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 86,
"avg_line_length": 24.23076923076923,
"alnum_prop": 0.6984126984126984,
"repo_name": "obi-two/Rebelion",
"id": "04674734c0d9a1fe1cc042cb52bc21a3b8eb2a58",
"size": "460",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/draft_schematic/space/weapon/shared_wpn_medium_blaster.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
__author__ = 'avathar'
class Chromosome(object):
"""
Chromosome represents an individual
"""
def __init__(self, genes):
self.genes = genes
self.fitness = 0
def __str__(self):
"""
String representation of a chromosome, with fitness and genes
"""
string = "%3.3i:\t[ " % self.fitness
for gen in self.genes:
string += "%s " % str(gen)
return string + "]"
| {
"content_hash": "b850ee15e551a7054049c0a271bb588c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 73,
"avg_line_length": 23.94736842105263,
"alnum_prop": 0.512087912087912,
"repo_name": "avathardev/ppolom",
"id": "a35f20e72936a1824ac3aac22e1a45969ecfca04",
"size": "455",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "darwin/chromosome.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10670"
}
],
"symlink_target": ""
} |
"""Assists with installing symlinks for all dotfiles/configs under this dir."""
import os
import sys
import errno
FILE_MAP = {
'bashrc': '~/.bashrc',
'colordiffrc': '~/.colordiffrc',
'gitconfig': '~/.gitconfig',
'pystartup': '~/.pystartup',
'tmux.conf': '~/.tmux.conf',
'vimrc': '~/.vimrc',
'alacritty.yml': '~/.config/alacritty/alacritty.yml',
# TODO(joshcb): This should be OSX only.
'slate': '~/.slate',
}
DIR_MAP = {
'sublime': '~/.config/sublime-text-3/Packages/User'
}
def install(src, dst):
ensure_dir_exists(dst)
os.symlink(src, dst)
print "Created %s" % dst
def ensure_dir_exists(path):
dirname = os.path.dirname(os.path.expanduser(path))
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
def ensure_installed(src, dst):
rel = os.path.relpath(dst, os.path.expanduser('~'))
if os.path.islink(dst):
print "%s is already a symlink, assuming we've already installed it." % rel
elif not os.path.exists(dst):
result = raw_input("%s doesn't exist. Install symlink? [y/n] " % rel)
if result.upper() == 'Y':
install(src, dst)
else:
print "WARNING: %s already exists and is not a symlink, skipping." % rel
def main():
# TODO(joshcb): Improve multi-platform support.
if sys.platform == 'darwin':
del DIR_MAP['sublime']
config_root = os.path.dirname(__file__)
full_map = {}
for src_dir, dst_dir in DIR_MAP.iteritems():
for file in os.listdir(os.path.join(config_root, src_dir)):
full_map[os.path.join(config_root, src_dir, file)] = os.path.join(
dst_dir, file)
for src_file, dst_file in FILE_MAP.iteritems():
full_map[os.path.join(config_root, src_file)] = dst_file
for src in sorted(full_map.keys()):
ensure_installed(os.path.abspath(src), os.path.expanduser(full_map[src]))
if __name__ == '__main__':
main()
| {
"content_hash": "9c0e532886033a2ad0611d87b6e2ab18",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 26.027397260273972,
"alnum_prop": 0.6368421052631579,
"repo_name": "minism/config",
"id": "b64d9e841c215ed201da924f8c944cecf1884457",
"size": "1923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "install.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "PowerShell",
"bytes": "4145"
},
{
"name": "Python",
"bytes": "5485"
},
{
"name": "Shell",
"bytes": "18011"
},
{
"name": "Vim Script",
"bytes": "347"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('statmaps', '0002_auto_20141001_2036'),
]
operations = [
migrations.AddField(
model_name='collection',
name='journal_name',
field=models.CharField(default=None, max_length=200, null=True, blank=True),
preserve_default=True,
),
]
| {
"content_hash": "95eb1526fee6cc40e5348e24a936e810",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 88,
"avg_line_length": 24.210526315789473,
"alnum_prop": 0.6065217391304348,
"repo_name": "erramuzpe/NeuroVault",
"id": "8ed78489a2424a7e1507a00e951e217343bec449",
"size": "484",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neurovault/apps/statmaps/migrations/0003_collection_journal_name.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "37250"
},
{
"name": "HTML",
"bytes": "178224"
},
{
"name": "JavaScript",
"bytes": "26595"
},
{
"name": "Nginx",
"bytes": "3944"
},
{
"name": "Perl",
"bytes": "1374"
},
{
"name": "Python",
"bytes": "548104"
},
{
"name": "Shell",
"bytes": "4437"
}
],
"symlink_target": ""
} |
from oslo.serialization import jsonutils
from nova import db
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
# TODO(berrange): Remove NovaObjectDictCompat
class PciDevice(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
"""Object to represent a PCI device on a compute node.
PCI devices are managed by the compute resource tracker, which discovers
the devices from the hardware platform, claims, allocates and frees
devices for instances.
The PCI device information is permanently maintained in a database.
This makes it convenient to get PCI device information, like physical
function for a VF device, adjacent switch IP address for a NIC,
hypervisor identification for a PCI device, etc. It also provides a
convenient way to check device allocation information for administrator
purposes.
A device can be in available/claimed/allocated/deleted/removed state.
A device is available when it is discovered..
A device is claimed prior to being allocated to an instance. Normally the
transition from claimed to allocated is quick. However, during a resize
operation the transition can take longer, because devices are claimed in
prep_resize and allocated in finish_resize.
A device becomes removed when hot removed from a node (i.e. not found in
the next auto-discover) but not yet synced with the DB. A removed device
should not be allocated to any instance, and once deleted from the DB,
the device object is changed to deleted state and no longer synced with
the DB.
Filed notes::
| 'dev_id':
| Hypervisor's identification for the device, the string format
| is hypervisor specific
| 'extra_info':
| Device-specific properties like PF address, switch ip address etc.
"""
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
# Version 1.2: added request_id field
VERSION = '1.2'
fields = {
'id': fields.IntegerField(),
# Note(yjiang5): the compute_node_id may be None because the pci
# device objects are created before the compute node is created in DB
'compute_node_id': fields.IntegerField(nullable=True),
'address': fields.StringField(),
'vendor_id': fields.StringField(),
'product_id': fields.StringField(),
'dev_type': fields.StringField(),
'status': fields.StringField(),
'dev_id': fields.StringField(nullable=True),
'label': fields.StringField(nullable=True),
'instance_uuid': fields.StringField(nullable=True),
'request_id': fields.StringField(nullable=True),
'extra_info': fields.DictOfStringsField(),
}
def obj_make_compatible(self, primitive, target_version):
target_version = utils.convert_version_to_tuple(target_version)
if target_version < (1, 2) and 'request_id' in primitive:
del primitive['request_id']
def update_device(self, dev_dict):
"""Sync the content from device dictionary to device object.
The resource tracker updates the available devices periodically.
To avoid meaningless syncs with the database, we update the device
object only if a value changed.
"""
# Note(yjiang5): status/instance_uuid should only be updated by
# functions like claim/allocate etc. The id is allocated by
# database. The extra_info is created by the object.
no_changes = ('status', 'instance_uuid', 'id', 'extra_info')
map(lambda x: dev_dict.pop(x, None),
[key for key in no_changes])
for k, v in dev_dict.items():
if k in self.fields.keys():
self[k] = v
else:
# Note (yjiang5) extra_info.update does not update
# obj_what_changed, set it explicitely
extra_info = self.extra_info
extra_info.update({k: v})
self.extra_info = extra_info
def __init__(self, *args, **kwargs):
super(PciDevice, self).__init__(*args, **kwargs)
self.obj_reset_changes()
self.extra_info = {}
@staticmethod
def _from_db_object(context, pci_device, db_dev):
for key in pci_device.fields:
if key != 'extra_info':
pci_device[key] = db_dev[key]
else:
extra_info = db_dev.get("extra_info")
pci_device.extra_info = jsonutils.loads(extra_info)
pci_device._context = context
pci_device.obj_reset_changes()
return pci_device
@base.remotable_classmethod
def get_by_dev_addr(cls, context, compute_node_id, dev_addr):
db_dev = db.pci_device_get_by_addr(
context, compute_node_id, dev_addr)
return cls._from_db_object(context, cls(), db_dev)
@base.remotable_classmethod
def get_by_dev_id(cls, context, id):
db_dev = db.pci_device_get_by_id(context, id)
return cls._from_db_object(context, cls(), db_dev)
@classmethod
def create(cls, dev_dict):
"""Create a PCI device based on hypervisor information.
As the device object is just created and is not synced with db yet
thus we should not reset changes here for fields from dict.
"""
pci_device = cls()
pci_device.update_device(dev_dict)
pci_device.status = 'available'
return pci_device
@base.remotable
def save(self, context):
if self.status == 'removed':
self.status = 'deleted'
db.pci_device_destroy(context, self.compute_node_id, self.address)
elif self.status != 'deleted':
updates = self.obj_get_changes()
if 'extra_info' in updates:
updates['extra_info'] = jsonutils.dumps(updates['extra_info'])
if updates:
db_pci = db.pci_device_update(context, self.compute_node_id,
self.address, updates)
self._from_db_object(context, self, db_pci)
class PciDeviceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# PciDevice <= 1.1
# Version 1.1: PciDevice 1.2
VERSION = '1.1'
fields = {
'objects': fields.ListOfObjectsField('PciDevice'),
}
child_versions = {
'1.0': '1.1',
# NOTE(danms): PciDevice was at 1.1 before we added this
'1.1': '1.2',
}
def __init__(self, *args, **kwargs):
super(PciDeviceList, self).__init__(*args, **kwargs)
self.objects = []
self.obj_reset_changes()
@base.remotable_classmethod
def get_by_compute_node(cls, context, node_id):
db_dev_list = db.pci_device_get_all_by_node(context, node_id)
return base.obj_make_list(context, cls(context), objects.PciDevice,
db_dev_list)
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, uuid):
db_dev_list = db.pci_device_get_all_by_instance_uuid(context, uuid)
return base.obj_make_list(context, cls(context), objects.PciDevice,
db_dev_list)
| {
"content_hash": "3821577addf20e79398d4a2c11e5bd9a",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 78,
"avg_line_length": 38.45077720207254,
"alnum_prop": 0.6292952432286754,
"repo_name": "silenceli/nova",
"id": "53e0a840cdd47badc43846e7fb0dc3710782a689",
"size": "8054",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/objects/pci_device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15235414"
},
{
"name": "Shell",
"bytes": "21613"
}
],
"symlink_target": ""
} |
from arelle import PluginManager
from arelle.ModelValue import qname
from arelle import XbrlConst
try:
import regex as re
except ImportError:
import re
from collections import defaultdict
def compile(list, traceRows):
if traceRows:
# compile so each row can be traced by separate expression (slow)
return [(rowNbr, re.compile("(^|\s)" + pattern + "($|\W+)", re.IGNORECASE))
for rowNbr, pattern in list]
else:
# compile single expression for fast execution
return re.compile("(^|\s)" + # always be sure first word starts at start or after space
"($|\W+)|(^|\s)".join(pattern for rowNbr, pattern in list)
.replace(r" ",r"\W+") + "($|\W+)",
re.IGNORECASE)
def setup(val, traceRows=False, *args, **kwargs):
if not val.validateLoggingSemantic: # all checks herein are SEMANTIC
return
# determiniation of two way concept label based on pattern
# definitions (from documentation label) are used if present, otherwise standard label for these tests
val.twoWayPriItemDefLabelPattern = compile([
# from http://www.sec.gov/spotlight/xbrl/staff-review-observations-061511.shtml
# Cash Flow
(4, r"increase (\w+ )?decrease"),
(5, r"provided by (\w+ )?used in"),
(7, r"net cash inflow or outflow"),
(6, r"net"),
(8, r"change in"),
(9, r"proceeds from (\w+ )?payments (for|to)"),
# Income statement
(13, r"(gain|profit) loss"),
(16, r"income (expense|loss)"),
(18, r"per share"),
# Statement of Stockholders Equity
(22, r"equity"),
(23, r"retained earnings"),
# removed? r"conversion of units",
], traceRows)
# standard label tests, indicate two-way label
val.twoWayPriItemStdLabelPattern = compile([
# from Eric Cohen
(4, r"Increase \(Decrease\)"),
(5, r"Provided by \(Used in\)"),
(6, r"Net"),
(8, r"Change in"),
(9, r"Proceeds from \(Payments for\)"),
(10, r"Proceeds from \(Payments to\)"),
(11, r"Payments for \(Proceeds from\)"),
(12, r"Proceeds from \(Repayments of\)"),
(13, r"Gain \(Loss\)"),
(14, r"Profit \(Loss\)"),
(15, r"Loss \(Gain\)"),
(16, r"Income \(Loss\)"),
(17, r"Income \(Expense\)"),
(18, r"Per Share"),
(19, r"Per Basic Share"),
(20, r"Per Diluted Share"),
(21, r"Per Basic and Diluted"),
(24, r"Appreciation \(Depreciation\)"),
(25, r"Asset \(Liability\)"),
(26, r"Assets Acquired \(Liabilities Assumed\)"),
(27, r"Benefit \(Expense\)"),
(28, r"Expense \(Benefit\)"),
(29, r"Cost[s] \(Credit[s]\)"),
(30, r"Deductions \(Charges\)"),
(31, r"Discount \(Premium\)"),
(32, r"Due from \(to\)"),
(33, r"Earnings \(Losses\)"),
(34, r"Earnings \(Deficit\)"),
(35, r"Excess \(Shortage\)"),
(36, r"Gains \(Losses\)"),
(37, r"Impairment \(Recovery\)"),
(38, r"Income \(Loss\)"),
(39, r"Liability \(Refund\)"),
(40, r"Loss \(Recovery\)"),
(41, r"Obligation[s] \(Asset[s]\)"),
(42, r"Proceeds from \(Repayments of\)"),
(43, r"Proceeds from \(Repurchase of\)"),
(44, r"Provided by \(Used in\)"),
(45, r"Provisions \(Recoveries\)"),
(46, r"Retained Earnings \(Accumulated Deficit\)"),
(47, r"per (\w+ )+"),
(70, r"Conversion of Units"),
(71, r"Effective (\w+ )?Rate"),
], traceRows)
# determination of a one-way concept based on standard label
val.oneWayPriItemDefLabelPattern = compile([
(49, r"dividend (\w+ )*(paid|received)"),
], traceRows)
val.oneWayPriItemStdLabelPattern = compile([
(48, r"Payments of (\w+ )*\((Dividends|Capital)\)"),
(49, r"Dividends (\w+ )*\((Pay(ment)?|Receive|Outstanding)\)"),
(50, r"(Stock|Shares) Issued"),
(51, r"Stock (\w+ )*Repurchased"),
(52, r"(Stock|Shares) (\w+ )*Repurchase[d]?"),
(53, r"Treasury Stock (\w+ )*(Beginning (\w+ )*Balance[s]?|Ending (\w+ )*Balance[s]?)"),
(54, r"Treasury Stock (\w+ )*Acquired"),
(55, r"Treasury Stock (\w+ )*Reissued"),
(56, r"Treasury Stock (\w+ )*Retired"),
(57, r"Accumulated Depreciation (\w+ )*Amortization"),
(58, r"Accumulated Other Than Temporary Impairments"),
(59, r"Allowance (\w+ )*Doubtful Accounts"),
(60, r"Amortization (\w+ )*Pension Costs"),
(61, r"Available for Sale Securities (\w+ )*Continuous Loss Position"),
(62, r"Available for Sale Securities Bross Unrealized Losses"),
(63, r"Accounts"),
], traceRows)
# determination of a two way fact based on any of fact's dimension member label
val.twoWayMemberStdLabelPattern = compile([
# per Eric Cohen
(64, r"Change (in|during) \w+"), # don't match word with change in it like exchange
(65, r"\w+ Elimination \w+"),
(66, r"Adjustment"),
(67, r"Effect\s"),
(68, r"Gain(s)? (\w+ )*Loss(es)?"),
(69, r"Income \(Loss\)"),
(70, r"Net(ting)?"), # don't want to match word with net in it like internet
], traceRows)
val.schedules = {}
val.elrMatches = (("1statement", re.compile(r"-\s+Statement\s+-\s+", re.IGNORECASE)),
("2disclosure", re.compile(r"-\s+Disclosure\s+-\s+", re.IGNORECASE)),
("3schedule", re.compile(r"-\s+Schedule\s+-\s+", re.IGNORECASE)))
def schedules(val, concept):
try:
return val.schedules[concept.qname]
except KeyError:
schedules = defaultdict(int)
for rel in val.modelXbrl.relationshipSet(XbrlConst.parentChild).toModelObject(concept):
for roleType in val.modelXbrl.roleTypes.get(rel.linkrole,()):
for elrType, elrPattern in val.elrMatches:
if elrPattern.search(roleType.definition):
schedules[elrType] += 1
scheduleStr = ""
for elrType, num in sorted(schedules.items()):
scheduleStr += ", {0} {1}{2}".format(num, elrType[1:], "s" if num > 1 else "")
val.schedules[concept.qname] = scheduleStr
return scheduleStr
def factCheck(val, fact, *args, **kwargs):
if not val.validateLoggingSemantic: # all checks herein are SEMANTIC
return
concept = fact.concept
context = fact.context
stdLabel = concept.label(lang="en-US", fallbackToQname=False)
defLabel = concept.label(preferredLabel=XbrlConst.documentationLabel, lang="en-US", fallbackToQname=False)
try:
if fact.isNumeric and not fact.isNil and fact.xValue is not None and fact.xValue < 0:
# is fact an explicit non neg
if ((defLabel is not None and val.oneWayPriItemDefLabelPattern.search(defLabel)) or
(stdLabel is not None and val.oneWayPriItemStdLabelPattern.search(stdLabel))):
if context.qnameDims: # if fact has a member
if any((val.twoWayMemberStdLabelPattern.search(dim.member.label(lang="en-US", fallbackToQname=False))
for dim in context.qnameDims.values()
if dim.isExplicit)): # any two way exception member
val.modelXbrl.log('INFO-SEMANTIC', "secStaffObservation.nonNegativeFact.info.A",
_("Negative fact of an explicit non-negative concept is tagged with a member expected to allow negative values: %(fact)s in context %(contextID)s unit %(unitID)s value %(value)s%(elrTypes)s"),
modelObject=fact, fact=fact.qname, contextID=fact.contextID, unitID=fact.unitID,
value=fact.effectiveValue, elrTypes=schedules(val,concept))
else:
val.modelXbrl.log('WARNING-SEMANTIC', "secStaffObservation.nonNegativeFact.warning.B",
_("Negative fact of an explicit non-negative concept, member may or not justify a negative value: %(fact)s in context %(contextID)s unit %(unitID)s value %(value)s%(elrTypes)s"),
modelObject=fact, fact=fact.qname, contextID=fact.contextID, unitID=fact.unitID,
value=fact.effectiveValue, elrTypes=schedules(val,concept))
else: # no member
val.modelXbrl.log('INCONSISTENCY', "secStaffObservation.nonNegativeFact.inconsistency.C",
_("Negative fact of an explicit non-negative concept: %(fact)s in context %(contextID)s unit %(unitID)s value %(value)s %(elrTypes)s"),
modelObject=fact, fact=fact.qname, contextID=fact.contextID, unitID=fact.unitID,
value=fact.effectiveValue, elrTypes=schedules(val,concept))
# else test if fact meets two way rules
elif ((defLabel is not None and val.twoWayPriItemDefLabelPattern.search(defLabel)) or
(stdLabel is not None and val.twoWayPriItemStdLabelPattern.search(stdLabel))):
val.modelXbrl.log('INFO-SEMANTIC', "secStaffObservation.nonNegativeFact.info.D",
_("Negative fact of concept expected to have positive and negative values: %(fact)s in context %(contextID)s unit %(unitID)s value %(value)s%(elrTypes)s"),
modelObject=fact, fact=fact.qname, contextID=fact.contextID, unitID=fact.unitID,
value=fact.effectiveValue, elrTypes=schedules(val,concept))
else:
if context.qnameDims: # if fact has a member
if any((val.twoWayMemberStdLabelPattern.search(dim.member.label(lang="en-US", fallbackToQname=False))
for dim in context.qnameDims.values()
if dim.isExplicit)): # any two way exception member
val.modelXbrl.log('INFO-SEMANTIC', "secStaffObservation.nonNegativeFact.info.E",
_("Negative fact for typically non-negative concept, but tagged with a member expected to allow negative values: %(fact)s in context %(contextID)s unit %(unitID)s value %(value)s%(elrTypes)s"),
modelObject=fact, fact=fact.qname, contextID=fact.contextID, unitID=fact.unitID,
value=fact.effectiveValue, elrTypes=schedules(val,concept))
else:
val.modelXbrl.log('WARNING-SEMANTIC', "secStaffObservation.nonNegativeFact.warning.F",
_("Negative fact of a typically non-negative concept, member may or not justify a negative value: %(fact)s in context %(contextID)s unit %(unitID)s value %(value)s%(elrTypes)s"),
modelObject=fact, fact=fact.qname, contextID=fact.contextID, unitID=fact.unitID,
value=fact.effectiveValue, elrTypes=schedules(val,concept))
else: # no member
val.modelXbrl.log('INCONSISTENCY', "secStaffObservation.nonNegativeFact.inconsistency.G",
_("Negative fact of a \"presumed by default\" non-negative concept: %(fact)s in context %(contextID)s unit %(unitID)s value %(value)s%(elrTypes)s"),
modelObject=fact, fact=fact.qname, contextID=fact.contextID, unitID=fact.unitID,
value=fact.effectiveValue, elrTypes=schedules(val,concept))
except Exception as ex:
val.modelXbrl.log('WARNING-SEMANTIC', "arelle:nonNegFactTestException",
_("%(fact)s in context %(contextID)s unit %(unitID)s value %(value)s%(elrTypes)s cannot be tested nonnegative"),
modelObject=fact, fact=fact.qname, contextID=fact.contextID, unitID=fact.unitID,
value=fact.effectiveValue, elrTypes=schedules(val,fact))
def final(val, conceptsUsed, *args, **kwargs):
if not val.validateLoggingSemantic: # all checks herein are SEMANTIC
return
del val.twoWayPriItemDefLabelPattern
del val.twoWayPriItemStdLabelPattern
del val.oneWayPriItemStdLabelPattern
del val.twoWayMemberStdLabelPattern
del val.schedules
def saveDtsMatches(dts, secDtsTagMatchesFile):
setup(dts, True)
import sys, csv
if sys.version[0] >= '3':
csvOpenMode = 'w'
csvOpenNewline = ''
else:
csvOpenMode = 'wb' # for 2.7
csvOpenNewline = None
csvFile = open(secDtsTagMatchesFile, csvOpenMode, newline=csvOpenNewline)
csvWriter = csv.writer(csvFile, dialect="excel")
csvWriter.writerow(("Concept", "Rule", "Row", "Pattern", "Label", "Documentation"))
num1wayConcepts = 0
num2wayConcepts = 0
num2wayMembers = 0
for qname, concept in sorted(dts.qnameConcepts.items(), key=lambda item: item[0]):
if concept.isItem and concept.isPrimaryItem: # both pri item and domain members
stdLabel = concept.label(lang="en-US", fallbackToQname=False)
defLabel = concept.label(preferredLabel=XbrlConst.documentationLabel, lang="en-US", fallbackToQname=False)
if concept.type is not None and concept.type.isDomainItemType:
if stdLabel is not None:
for rowNbr, pattern in dts.twoWayMemberStdLabelPattern:
if pattern.search(stdLabel):
csvWriter.writerow((str(qname), "member-2-way", rowNbr, pattern.pattern[6:-7], stdLabel, defLabel))
num2wayMembers += 1
elif concept.isNumeric and not concept.isAbstract: # not dimension domain/member
if defLabel is not None:
for rowNbr, pattern in dts.twoWayPriItemDefLabelPattern:
if pattern.search(defLabel):
csvWriter.writerow((str(qname), "concept-2-way-doc", rowNbr, pattern.pattern[6:-7], stdLabel, defLabel))
num2wayConcepts += 1
for rowNbr, pattern in dts.oneWayPriItemDefLabelPattern:
if pattern.search(defLabel):
csvWriter.writerow((str(qname), "concept-1-way-doc", rowNbr, pattern.pattern[6:-7], stdLabel, defLabel))
num1wayConcepts += 1
if stdLabel is not None:
for rowNbr, pattern in dts.twoWayPriItemStdLabelPattern:
if pattern.search(stdLabel):
csvWriter.writerow((str(qname), "concept-2-way-lbl", rowNbr, pattern.pattern[6:-7], stdLabel, defLabel))
num2wayConcepts += 1
for rowNbr, pattern in dts.oneWayPriItemStdLabelPattern:
if pattern.search(stdLabel):
csvWriter.writerow((str(qname), "concept-1-way-lbl", rowNbr, pattern.pattern[6:-7], stdLabel, defLabel))
num1wayConcepts += 1
csvFile.close()
dts.log('INFO-SEMANTIC', "info:saveSecDtsTagMatches",
_("SecDtsTagMatches entry %(entryFile)s has %(numberOfTwoWayPriItems)s two way primary items, %(numberOfOneWayPriItems)s one way primary items, %(numberOfTwoWayMembers)s two way members in output file %(secDtsTagMatchesFile)s."),
modelObject=dts,
entryFile=dts.uri,
numberOfTwoWayPriItems=num2wayConcepts,
numberOfOneWayPriItems=num1wayConcepts,
numberOfTwoWayMembers=num2wayMembers,
secDtsTagMatchesFile=secDtsTagMatchesFile)
final(dts)
def saveDtsMatchesMenuEntender(cntlr, menu, *args, **kwargs):
# Extend menu with an item for the savedts plugin
menu.add_command(label="Save SEC tag matches",
underline=0,
command=lambda: saveDtsMatchesMenuCommand(cntlr) )
def saveDtsMatchesMenuCommand(cntlr):
# save DTS menu item has been invoked
if cntlr.modelManager is None or cntlr.modelManager.modelXbrl is None:
cntlr.addToLog("No taxonomy loaded.")
return
# get file name into which to save log file while in foreground thread
secDtsTagMatchesFile = cntlr.uiFileDialog("save",
title=_("Save SEC DTS tag matches file"),
filetypes=[(_("DTS tag matches .csv file"), "*.csv")],
defaultextension=".txt")
if not secDtsTagMatchesFile:
return False
try:
saveDtsMatches(cntlr.modelManager.modelXbrl, secDtsTagMatchesFile)
except Exception as ex:
dts = cntlr.modelManager.modelXbrl
dts.error("exception",
_("SEC DTS Tags Matches exception: %(error)s"), error=ex,
modelXbrl=dts,
exc_info=True)
def saveDtsMatchesCommandLineOptionExtender(parser, *args, **kwargs):
# extend command line options with a save DTS option
parser.add_option("--save-sec-tag-dts-matches",
action="store",
dest="secDtsTagMatchesFile",
help=_("Save SEC DTS tag matches CSV file."))
def saveDtsMatchesCommandLineXbrlRun(cntlr, options, modelXbrl, *args, **kwargs):
# extend XBRL-loaded run processing for this option
if getattr(options, "secDtsTagMatchesFile", False):
if cntlr.modelManager is None or cntlr.modelManager.modelXbrl is None:
cntlr.addToLog("No taxonomy loaded.")
return
saveDtsMatches(cntlr.modelManager.modelXbrl, options.secDtsTagMatchesFile)
__pluginInfo__ = {
# Do not use _( ) in pluginInfo itself (it is applied later, after loading
'name': 'Validate US SEC Tagging',
'version': '0.9',
'description': '''US SEC Tagging Validation. Includes non-negative rules.''',
'license': 'Apache-2',
'author': 'Ewe S. Gap',
'copyright': '(c) Copyright 2012 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'Validate.EFM.Start': setup,
'Validate.EFM.Fact': factCheck,
'Validate.EFM.Finally': final,
'CntlrWinMain.Menu.Tools': saveDtsMatchesMenuEntender,
'CntlrCmdLine.Options': saveDtsMatchesCommandLineOptionExtender,
'CntlrCmdLine.Xbrl.Run': saveDtsMatchesCommandLineXbrlRun,
}
| {
"content_hash": "b8b3421db68b06c99c03e17f7b9a8d83",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 242,
"avg_line_length": 54.882697947214076,
"alnum_prop": 0.591023243387657,
"repo_name": "sternshus/Arelle",
"id": "97ff6efe0baf81402f019f92b53acf70be03b596",
"size": "18715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arelle/plugin/validate/USSecTagging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "31873"
},
{
"name": "C#",
"bytes": "850"
},
{
"name": "HTML",
"bytes": "8640"
},
{
"name": "Java",
"bytes": "4663"
},
{
"name": "Makefile",
"bytes": "5565"
},
{
"name": "NSIS",
"bytes": "9050"
},
{
"name": "PLSQL",
"bytes": "1056360"
},
{
"name": "Python",
"bytes": "5523072"
},
{
"name": "Shell",
"bytes": "13921"
}
],
"symlink_target": ""
} |
from json import loads, dumps
from winload import Winload
from linload import Linload
from network import Network
from sys import platform
class LocalController:
def __init__(self):
self.platform = ''
self.data = {}
self.dataGetter = None
self.networkWorker = None
self.config = {}
def setup(self):
if platform.startswith('linux'):
self.platform = 'linux'
self.dataGetter = Linload()
elif platform.startswith('win'):
self.platform = 'win'
self.dataGetter = Winload()
self.config = self.loadJsonFile('./data/config.json')
self.networkWorker = Network(self.config)
def loadJsonFile(self, filename):
with open(filename) as json_file:
data = loads(json_file.read())
return data
def saveJsonFile(self, filename, variable):
with open(filename, "w") as json_file:
data = json_file.write(dumps(variable, indent=4))
return data
def main(self):
self.setup()
self.networkWorker.startHeartBeat()
self.data = self.dataGetter.main()
self.saveJsonFile('./data/data.json', self.data)
self.networkWorker.sendFile()
if __name__ == '__main__':
LocalController().main()
| {
"content_hash": "523c57836f88ac03a77ed6baedb399c8",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 61,
"avg_line_length": 28.23913043478261,
"alnum_prop": 0.6058506543494996,
"repo_name": "arseniypetrikor/lc-client",
"id": "dc6cf5b9b0376f948b77320a3947555d4401cad7",
"size": "1299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5744"
}
],
"symlink_target": ""
} |
def space_out_camel_case(camel):
"""
Converts a "CamelCasedName" to "Camel Case Name".
"""
chars = []
for char in camel:
if len(chars) >= 2 and chars[-1] != ' ':
if char.isupper() and chars[-1].islower():
chars.append(' ')
elif char.islower() and chars[-1].isupper() and chars[-2].isupper():
chars.insert(len(chars) - 1, ' ')
chars.append(char)
return ''.join(chars)
| {
"content_hash": "6d3fc23e81052c8398a5ead9e2dc547e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 80,
"avg_line_length": 29.125,
"alnum_prop": 0.5128755364806867,
"repo_name": "servee/servee",
"id": "498394b734eeec12aed9463097d99bdd52ab95cd",
"size": "466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "servee/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "68505"
},
{
"name": "HTML",
"bytes": "46896"
},
{
"name": "JavaScript",
"bytes": "5602"
},
{
"name": "Python",
"bytes": "56014"
}
],
"symlink_target": ""
} |
import unittest
import os.path
import codecs
import json
from datetime import datetime
from mock import sentinel
from pyhy.api.registry import HyvesRegistry
from pyhy.api.jsonreader.scrapparser import ScrapParser
class Test(unittest.TestCase):
def setUp(self):
self.r = HyvesRegistry(sentinel.connectionobject)
self.p = ScrapParser(self.r)
def testParse(self):
datafile = os.path.join(os.path.split(__file__)[0], 'users.getScraps_homoapi.data')
scrapjson = json.load(codecs.open(datafile))['scrap'][0]
scrap = self.p.parse(scrapjson)
self.assertEqual(scrap, self.r.Scrap(u'cb1040b149d76baa'))
self.assertEqual(scrap.sender, self.r.User(u'6c7ec0b62fca4e5f'))
self.assertEqual(scrap.target, self.r.User(u'6f89a2f516034edc'))
self.assertEqual(scrap.created, datetime(2009, 12, 9, 11, 7, 13))
self.assertEqual(scrap.body[:10], u'I want my ')
| {
"content_hash": "6b1b279af50e1d59e245def4b6bdcdfa",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 91,
"avg_line_length": 34.55555555555556,
"alnum_prop": 0.7020364415862809,
"repo_name": "valhallasw/pyhy",
"id": "23b563d353ce1888a40d4185b6c267d364ad780e",
"size": "933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/api/jsonreader/test_scrapparser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30710"
}
],
"symlink_target": ""
} |
""" Downloading dataset
"""
from world import world, setup_module, teardown_module
import create_source_steps as source_create
import create_dataset_steps as dataset_create
class TestDownloadDataset(object):
def test_scenario1(self):
"""
Scenario: Successfully exporting a dataset:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I download the dataset file to "<local_file>"
Then file "<local_file>" is like file "<data>"
Examples:
| data | time_1 | time_2 | local_file |
| ../data/iris.csv | 30 | 30 | ./tmp/exported_iris.csv |
"""
print self.test_scenario1.__doc__
examples = [
['data/iris.csv', '30', '30', 'tmp/exported_iris.csv']]
for example in examples:
print "\nTesting with:\n", example
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
dataset_create.i_export_a_dataset(self, example[3])
dataset_create.files_equal(self, example[3], example[0])
| {
"content_hash": "21be98009488c13f4d986ffa2d46e8d3",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 81,
"avg_line_length": 42.82857142857143,
"alnum_prop": 0.580386924616411,
"repo_name": "ShaguptaS/python",
"id": "971487b85edaa674d0ede9c87dd2199fb1aeed56",
"size": "2118",
"binary": false,
"copies": "1",
"ref": "refs/heads/next",
"path": "bigml/tests/test_15_download_dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "600936"
}
],
"symlink_target": ""
} |
import sys
import os
import csv
import cv2
from collections import OrderedDict
import argparse
import numpy as np
import pandas as pd
from seqlearn.perceptron import StructuredPerceptron
from seqlearn.evaluation import SequenceKFold
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
from sklearn.preprocessing import MinMaxScaler
if __name__ == '__main__':
sys.path.append('../../')
#=============================================
class ImmersionDetector:
"""
Implements the detector of immersion using a Hidden Markov Model.
"""
#---------------------------------------------
def __init__(self):
"""
Class constructor.
"""
self._clf = StructuredPerceptron(max_iter=100)
"""
Gaussian HMM for the detection of the immersion level (as the hidden
states of the model) from the distance gradient and the blink rate of
players.
"""
self._states = OrderedDict([
(0, 'not at all'), (1, 'slightly'),
(2, 'moderately'), (3, 'fairly'), (4, 'extremely')
])
"""
Hidden states of the immersion level.
"""
modulePath = os.path.dirname(__file__)
self._modelFile = os.path.abspath('{}/./models/immersion_model.dat' \
.format(modulePath))
"""
Name of the file used to persist the model in the disk.
"""
# Load the model from the disk, if its file exists
if os.path.isfile(self._modelFile):
if not self.load():
print('Could not load the model from file {}' \
.format(self._modelFile))
#---------------------------------------------
def save(self):
"""
Persists the model to the disk.
Returns
-------
ret: bool
Indication on if the saving was succeeded or not.
"""
try:
joblib.dump(self._clf, self._modelFile)
except:
return False
return True
#---------------------------------------------
def load(self):
"""
Restores the model from the disk.
Returns
-------
ret: bool
Indication on if the loading was succeeded or not.
"""
try:
clf = joblib.load(self._modelFile)
except:
return False
self._clf = clf
return True
#---------------------------------------------
def detect(self, features):
"""
Detects the immersion level based on the given features.
Parameters
----------
features: TODO
TODO
Returns
-------
level: int
Immersion level, as one of the possible values: 0 ("not at all"),
1 ("slightly"), 2 ("moderately"), 3 ("fairly") or 4 ("extremely").
"""
pass # TODO
#---------------------------------------------
def readData(self, annotationPath):
"""
Reads the data used for training or cross-validating the model.
Parameters
----------
annotationPath: str
Path where to find the annotation files to read the data from.
Returns
-------
data: OrderedDict
Dictionary with the face distance gradient, blink rate and
immersion labels of each subject, in individual data frames.
"""
##################################
# Read the data
##################################
print('Reading the data...')
scaler = MinMaxScaler(feature_range=(0, 1))
subjects = [1, 2, 6, 7, 14, 15, 17, 18, 20, 21, 23, 25, 26, 30, 31, 34,
37, 38, 39, 40, 41]
data = OrderedDict()
for subject in subjects:
print('Reading data of subject {}...'.format(subject))
# Read the face data
name = '{}/player_{:03d}-face.csv' \
.format(annotationPath, subject)
face = pd.read_csv(name, index_col=0, usecols=(0, 1, 2, 3, 4, 142))
# Find the frames where the face detection failed
t = (face[[0, 1, 2, 3]] == 0).all(1)
fails = face[t].index[:]
# Drop the rows where face detection failed
face = face.drop(fails)
# Read the blink data
name = '{}/player_{:03d}-blinks.csv' \
.format(annotationPath, subject)
blink = pd.read_csv(name, index_col=0, usecols=(0, 2))
# Drop the rows where face detection failed
blink = blink.drop(fails)
# Read the review responses
name = '{}/player_{:03d}-review.csv' \
.format(annotationPath, subject)
review = pd.read_csv(name, index_col=0, usecols=(0, 2))
# Drop the rows where face detection failed
review = review.drop(fails)
# Join the features and labels in the same data frame
df = pd.DataFrame()
df['gradient'] = face['face.gradient']
df['rate'] = blink['blink.rate']
df['immersion'] = review['involvement']
# Keep only the data of the last 5 minutes of the video
# (with 30 fps, the start frame is: 5 * 60 * 30 = 9000)
df = df.loc[9000:]
# Normalize the data
grad = np.reshape(np.array(df['gradient']), (-1, 1))
rat = np.reshape(np.array(df['rate']), (-1, 1))
grad = scaler.fit_transform(grad)
rat = scaler.fit_transform(rat)
df['gradient'] = grad.squeeze(-1)
df['rate'] = rat.squeeze(-1)
# Store the data frame for the subject
data[subject] = df
return data
#---------------------------------------------
def crossValidate(self, args):
"""
Performs a cross-validation on the EmotionsDetector model.
Parameters
----------
args: object
Object produced by the package argparse with the command line
arguments.
Returns
-------
errLevel: int
Error level of the execution (i.e. 0 indicates success; any other
value indicates specific failure conditions).
"""
##################################
# Read the training data
##################################
if not os.path.isdir(args.annotationPath):
print('annotation path does not exist: {}' \
.format(args.annotationPath))
return -1
data = self.readData(args.annotationPath)
############################
# Execute the K-Fold cross validation
############################
x = []
y = []
l = []
for subject, df in data.items():
lx = df[['gradient', 'rate']].values.tolist()
#lx = df[['rate']].values.tolist()
ly = np.array(df[['immersion']].values.tolist()).squeeze(-1)
x.extend(lx)
y.extend(ly.tolist())
l.append(len(lx))
x = np.array(x)
y = np.array(y)
print('Executing cross-validation with k = {}...'.format(args.k))
clf = StructuredPerceptron(random_state=2)
scores = []
folds = SequenceKFold(l, n_folds=args.k)
for train_idx, train_len, test_idx, test_len in folds:
xTrain = x[train_idx]
yTrain = y[train_idx]
clf.fit(xTrain, yTrain, train_len)
xTest = x[test_idx]
yTest = y[test_idx]
yPred = clf.predict(xTest, test_len)
scores.append(accuracy_score(yTest, yPred))
scores = np.array(scores)
print(scores)
print('Result of the K-Fold CV: {:3f} (+- {:3f})' \
.format(scores.mean(), 2 * scores.std()))
############################
# Execute the Leave-One-Out cross validation
############################
return 0
#---------------------------------------------
def train(self, args):
"""
Trains the EmotionsDetector model.
Parameters
----------
args: object
Object produced by the package argparse with the command line
arguments.
Returns
-------
errLevel: int
Error level of the execution (i.e. 0 indicates success; any other
value indicates specific failure conditions).
"""
##################################
# Read the training data
##################################
if not os.path.isdir(args.annotationPath):
print('annotation path does not exist: {}' \
.format(args.annotationPath))
return -1
data = self.readData(args.annotationPath)
x = []
y = []
l = []
for subject, df in data.items():
lx = df[['gradient', 'rate']].values.tolist()
ly = np.array(df[['immersion']].values.tolist()).squeeze(-1)
x.extend(lx)
y.extend(ly.tolist())
l.append(len(lx))
############################
# Execute the training
############################
print('Training the detector...')
self._clf.fit(x, y, l)
if not self.save():
print('Could not persist the trained model to disk (in file {})' \
.format(self._modelFile))
return 0
#---------------------------------------------
def optimize(self, args):
"""
Optimizes the EmotionsDetector model, trying to find the SVM parameters
that would yield better results.
Parameters
----------
args: object
Object produced by the package argparse with the command line
arguments.
Returns
-------
errLevel: int
Error level of the execution (i.e. 0 indicates success; any other
value indicates specific failure conditions).
"""
############################
# Get the data
############################
# Read the CSV file ignoring the header and the first column (which
# contains the file name of the image used for extracting the data in
# a row)
try:
data = np.genfromtxt(args.featuresFile, delimiter=',',
skip_header=1)
data = data[:, 1:]
except:
print('Could not read CSV file: {}'.format(args.featuresFile))
return -1
x = data[:, :-1]
y = np.squeeze(data[:, -1:])
############################
# Execute the optimization
############################
tunningParams = [
{
'kernel': ['linear'],
'C': [1e-3, 1e-2, 1e-1, 1, 1e+1, 1e+2, 1e+3]
},
{
'kernel': ['rbf'],
'gamma': [1e-3, 1e-2, 1e-1, 1, 1e+1, 1e+2, 1e+3],
'C': [1e-3, 1e-2, 1e-1, 1, 1e+1, 1e+2, 1e+3]
},
]
scores = ['precision', 'recall']
for score in scores:
print('# Tuning hyper-parameters for {}\n'.format(score))
clf = GridSearchCV(svm.SVC(C=1), tunningParams, cv=5,
scoring=format('{}_macro'.format(score)))
clf.fit(x, y)
print('Best parameters set found on development set:\n')
print(clf.best_params_)
print('\nGrid scores on development set:\n')
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print('{:.3f} (+/-{:.3f}) for {}'.format(mean, std * 2, params))
#print('\nDetailed classification report:\n')
#print('The model is trained on the full development set.')
#print('The scores are computed on the full evaluation set.\n')
#y_true, y_pred = y_test, clf.predict(X_test)
#print(classification_report(y_true, y_pred))
#print()
return 0
#---------------------------------------------
# namespace verification for running this script
#---------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Automation of the '
'ImmersionDetector. Allows cross-'
'validating and training the model.')
subparser = parser.add_subparsers(help='Existing sub commands.',
dest='subParser')
cvParser = subparser.add_parser(name='crossValidate',
help='Runs a cross-validation in model '
'with the KFold method.')
cvParser.add_argument('annotationPath',
help='Path with the annotated data.'
)
cvParser.add_argument('-k', metavar='int', type=int, default=5,
help='Number of folds to use in the cross-validation. '
'The default is 5.'
)
trParser = subparser.add_parser(name='trainModel',
help='Trains the model from the annotated '
'data.')
trParser.add_argument('annotationPath',
help='Path with the annotated data.'
)
args = parser.parse_args()
if args.subParser is None:
parser.error('one subcomand is required')
model = ImmersionDetector()
if args.subParser == 'crossValidate':
if args.k < 5:
parser.error('value of option -k must be at least 5')
sys.exit(model.crossValidate(args))
elif args.subParser == 'trainModel':
sys.exit(model.train(args)) | {
"content_hash": "1fc62b0c44cd3960660d2cb367b31cbb",
"timestamp": "",
"source": "github",
"line_count": 445,
"max_line_length": 81,
"avg_line_length": 32.33483146067416,
"alnum_prop": 0.46806588366113006,
"repo_name": "luigivieira/fsdk",
"id": "bcbb5963cc44b6916b160017403c8a0c21842de2",
"size": "15698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fsdk/detectors/immersion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "348877"
}
],
"symlink_target": ""
} |
import pints
import pints.toy
import unittest
import numpy as np
class TestGermanCreditHierarchicalLogPDF(unittest.TestCase):
"""
Tests the logpdf toy distribution from fitting a hierarchical logistic
model to German credit data.
"""
@classmethod
def setUpClass(cls):
""" Set up problem for tests. """
# download data
model = pints.toy.GermanCreditHierarchicalLogPDF(download=True)
x, y, z = model.data()
cls.y = y
cls.x = x
cls.model = model
def test_download(self):
# tests that method can download data from UCI repo
x, y, z = self.model.data()
self.assertEqual(x.shape[0], 1000)
self.assertEqual(x.shape[1], 25)
self.assertEqual(len(y), 1000)
def test_errors(self):
# tests errors of inapropriate function calls and inits
self.assertRaises(ValueError, pints.toy.GermanCreditHierarchicalLogPDF,
np.zeros((27, 27)), self.y)
self.assertRaises(ValueError, pints.toy.GermanCreditHierarchicalLogPDF,
self.x, np.ones(1000) * 2)
self.assertRaises(ValueError, pints.toy.GermanCreditHierarchicalLogPDF,
self.x, self.y, True)
self.assertRaises(ValueError, pints.toy.GermanCreditHierarchicalLogPDF,
None, self.y)
self.assertRaises(ValueError, pints.toy.GermanCreditHierarchicalLogPDF,
self.x, None)
def test_local(self):
# tests that model can be instantiated using local files
x, y, z = self.model.data()
model = pints.toy.GermanCreditHierarchicalLogPDF(x=x, y=y)
x1, y1, z1 = model.data()
self.assertTrue(np.array_equal(x, x1))
self.assertTrue(np.array_equal(y, y1))
self.assertTrue(np.array_equal(z, z1))
def test_values(self):
# tests calls
self.assertAlmostEqual(self.model(np.ones(326)),
-20174.077700157857,
places=6)
def test_sensitivities(self):
# test sensitivity values vs reference
val, dp = self.model.evaluateS1(np.ones(326))
self.assertEqual(val, self.model(np.ones(326)))
self.assertEqual(len(dp), 326)
self.assertAlmostEqual(dp[0], -1000.02)
self.assertAlmostEqual(dp[1], -700.8386959844057, places=6)
def test_givens(self):
# tests whether boundaries are correct and n_parameters
self.assertEqual(326, self.model.n_parameters())
borders = self.model.suggested_bounds()
self.assertEqual(borders[0][0], -100)
self.assertEqual(borders[1][0], 100)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "d39541f81108f451ef55c5f3579fc09a",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 79,
"avg_line_length": 37.229729729729726,
"alnum_prop": 0.6127041742286752,
"repo_name": "martinjrobins/hobo",
"id": "1cb3e5071351154dad8e1bcc2326d135394eb626",
"size": "3025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pints/tests/test_toy_german_credit_hierarchical_logpdf.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "278656"
},
{
"name": "C++",
"bytes": "86361"
},
{
"name": "CMake",
"bytes": "1710"
},
{
"name": "Cuda",
"bytes": "7890"
},
{
"name": "M",
"bytes": "2347"
},
{
"name": "Matlab",
"bytes": "437018"
},
{
"name": "Python",
"bytes": "1841329"
},
{
"name": "Stan",
"bytes": "8353"
},
{
"name": "TeX",
"bytes": "88007"
},
{
"name": "mupad",
"bytes": "73951"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('workflow', '0032_auto_20190127_1536'),
('tola_management', '0002_indicatorauditlog'),
]
operations = [
migrations.CreateModel(
name='ProgramAdminAuditLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='Modification Date')),
('change_type', models.CharField(max_length=255, verbose_name='Modification Type')),
('previous_entry', models.TextField()),
('new_entry', models.TextField()),
('admin_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='workflow.TolaUser')),
('program', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='workflow.Program')),
],
),
]
| {
"content_hash": "1c34e8d3b1b6c4169e4abf696ade0c5a",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 137,
"avg_line_length": 41.851851851851855,
"alnum_prop": 0.6132743362831858,
"repo_name": "mercycorps/TolaActivity",
"id": "faab5dd6e20c1594857df62fb2ef70aad60eff03",
"size": "1203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tola_management/migrations/0003_programadminauditlog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "432462"
},
{
"name": "Dockerfile",
"bytes": "109"
},
{
"name": "HTML",
"bytes": "437661"
},
{
"name": "JavaScript",
"bytes": "5654491"
},
{
"name": "Python",
"bytes": "1741812"
},
{
"name": "Shell",
"bytes": "4752"
}
],
"symlink_target": ""
} |
import flask
from werkzeug.contrib.cache import SimpleCache
import config
import odkviewer.connector
app = flask.Flask(__name__)
cache = SimpleCache()
cache.default_timeout = config.cache_timeout
conn = odkviewer.connector.OdkConnector(config.odkurl, config.odkuser, config.odkpass)
@app.after_request
def gnu_terry_pratchett(resp):
resp.headers.add("X-Clacks-Overhead", "GNU Terry Pratchett")
return resp
def checkfid(formid):
# Make sure to get the data from the forms
if not cache.has('forms'):
cache.set('forms', conn.get_forms())
forms = cache.get('forms')
# If the form id is not in the form list, abort
if formid not in forms.keys():
flask.abort(404)
return forms.get(formid)
@app.route('/api/v1/forms')
def listforms():
if cache.has('forms'):
return flask.jsonify(cache.get('forms'))
forms = conn.get_forms()
cache.set('forms', forms)
return flask.jsonify(forms)
@app.route('/api/v1/forms/<formid>')
def getform(formid):
form = checkfid(formid)
return flask.jsonify(form)
@app.route('/api/v1/forms/<formid>/submissions')
def getsubmissions(formid):
form = checkfid(formid)
fdata = conn.get_submissions_from_form(formid, form)
return flask.jsonify(fdata)
@app.route('/')
def main():
return flask.render_template('index.html')
if __name__ == '__main__':
app.run(debug=config.debug)
| {
"content_hash": "a06554b1aba1b020a2a386a9b75f4e80",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 86,
"avg_line_length": 24.68421052631579,
"alnum_prop": 0.681592039800995,
"repo_name": "Ongawa/odk-viewer",
"id": "af6abed4011af47e43a1fb1e95dc473b1909495d",
"size": "1430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "844"
},
{
"name": "HTML",
"bytes": "1862"
},
{
"name": "JavaScript",
"bytes": "9400"
},
{
"name": "Python",
"bytes": "11595"
}
],
"symlink_target": ""
} |
import numpy as np
from astropy.units import UnitsError, UnitConversionError, Unit
from astropy import log
from .nddata import NDData
from .nduncertainty import NDUncertainty
from .mixins.ndslicing import NDSlicingMixin
from .mixins.ndarithmetic import NDArithmeticMixin
from .mixins.ndio import NDIOMixin
from .flag_collection import FlagCollection
__all__ = ['NDDataArray']
class NDDataArray(NDArithmeticMixin, NDSlicingMixin, NDIOMixin, NDData):
"""
An ``NDData`` object with arithmetic. This class is functionally equivalent
to ``NDData`` in astropy versions prior to 1.0.
The key distinction from raw numpy arrays is the presence of
additional metadata such as uncertainties, a mask, units, flags,
and/or a coordinate system.
See also: http://docs.astropy.org/en/stable/nddata/
Parameters
-----------
data : `~numpy.ndarray` or `NDData`
The actual data contained in this `NDData` object. Not that this
will always be copies by *reference* , so you should make copy
the ``data`` before passing it in if that's the desired behavior.
uncertainty : `~astropy.nddata.NDUncertainty`, optional
Uncertainties on the data.
mask : `~numpy.ndarray`-like, optional
Mask for the data, given as a boolean Numpy array or any object that
can be converted to a boolean Numpy array with a shape
matching that of the data. The values must be ``False`` where
the data is *valid* and ``True`` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
flags : `~numpy.ndarray`-like or `~astropy.nddata.FlagCollection`, optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type (or an object which can be converted
to a Numpy array) with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
wcs : undefined, optional
WCS-object containing the world coordinate system for the data.
.. warning::
This is not yet defined because the discussion of how best to
represent this class's WCS system generically is still under
consideration. For now just leave it as None
meta : `dict`-like object, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object. e.g., creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.UnitBase` instance or str, optional
The units of the data.
Raises
------
ValueError :
If the `uncertainty` or `mask` inputs cannot be broadcast (e.g., match
shape) onto ``data``.
"""
def __init__(self, data, *args, flags=None, **kwargs):
# Initialize with the parent...
super().__init__(data, *args, **kwargs)
# ...then reset uncertainty to force it to go through the
# setter logic below. In base NDData all that is done is to
# set self._uncertainty to whatever uncertainty is passed in.
self.uncertainty = self._uncertainty
# Same thing for mask.
self.mask = self._mask
# Initial flags because it is no longer handled in NDData
# or NDDataBase.
if isinstance(data, NDDataArray):
if flags is None:
flags = data.flags
else:
log.info("Overwriting NDDataArrays's current "
"flags with specified flags")
self.flags = flags
# Implement uncertainty as NDUncertainty to support propagation of
# uncertainties in arithmetic operations
@property
def uncertainty(self):
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
if isinstance(value, NDUncertainty):
class_name = self.__class__.__name__
if not self.unit and value._unit:
# Raise an error if uncertainty has unit and data does not
raise ValueError("Cannot assign an uncertainty with unit "
"to {} without "
"a unit".format(class_name))
self._uncertainty = value
self._uncertainty.parent_nddata = self
else:
raise TypeError("Uncertainty must be an instance of "
"a NDUncertainty object")
else:
self._uncertainty = value
# Override unit so that we can add a setter.
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
from . import conf
try:
if self._unit is not None and conf.warn_setting_unit_directly:
log.info('Setting the unit directly changes the unit without '
'updating the data or uncertainty. Use the '
'.convert_unit_to() method to change the unit and '
'scale values appropriately.')
except AttributeError:
# raised if self._unit has not been set yet, in which case the
# warning is irrelevant
pass
if value is None:
self._unit = None
else:
self._unit = Unit(value)
# Implement mask in a way that converts nicely to a numpy masked array
@property
def mask(self):
if self._mask is np.ma.nomask:
return None
else:
return self._mask
@mask.setter
def mask(self, value):
# Check that value is not either type of null mask.
if (value is not None) and (value is not np.ma.nomask):
mask = np.array(value, dtype=np.bool_, copy=False)
if mask.shape != self.data.shape:
raise ValueError("dimensions of mask do not match data")
else:
self._mask = mask
else:
# internal representation should be one numpy understands
self._mask = np.ma.nomask
@property
def shape(self):
"""
shape tuple of this object's data.
"""
return self.data.shape
@property
def size(self):
"""
integer size of this object's data.
"""
return self.data.size
@property
def dtype(self):
"""
`numpy.dtype` of this object's data.
"""
return self.data.dtype
@property
def ndim(self):
"""
integer dimensions of this object's data
"""
return self.data.ndim
@property
def flags(self):
return self._flags
@flags.setter
def flags(self, value):
if value is not None:
if isinstance(value, FlagCollection):
if value.shape != self.shape:
raise ValueError("dimensions of FlagCollection does not match data")
else:
self._flags = value
else:
flags = np.array(value, copy=False)
if flags.shape != self.shape:
raise ValueError("dimensions of flags do not match data")
else:
self._flags = flags
else:
self._flags = value
def __array__(self):
"""
This allows code that requests a Numpy array to use an NDData
object as a Numpy array.
"""
if self.mask is not None:
return np.ma.masked_array(self.data, self.mask)
else:
return np.array(self.data)
def __array_prepare__(self, array, context=None):
"""
This ensures that a masked array is returned if self is masked.
"""
if self.mask is not None:
return np.ma.masked_array(array, self.mask)
else:
return array
def convert_unit_to(self, unit, equivalencies=[]):
"""
Returns a new `NDData` object whose values have been converted
to a new unit.
Parameters
----------
unit : `astropy.units.UnitBase` instance or str
The unit to convert to.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
Returns
-------
result : `~astropy.nddata.NDData`
The resulting dataset
Raises
------
UnitsError
If units are inconsistent.
"""
if self.unit is None:
raise ValueError("No unit specified on source data")
data = self.unit.to(unit, self.data, equivalencies=equivalencies)
if self.uncertainty is not None:
uncertainty_values = self.unit.to(unit, self.uncertainty.array,
equivalencies=equivalencies)
# should work for any uncertainty class
uncertainty = self.uncertainty.__class__(uncertainty_values)
else:
uncertainty = None
if self.mask is not None:
new_mask = self.mask.copy()
else:
new_mask = None
# Call __class__ in case we are dealing with an inherited type
result = self.__class__(data, uncertainty=uncertainty,
mask=new_mask,
wcs=self.wcs,
meta=self.meta, unit=unit)
return result
| {
"content_hash": "b6db4979b1e5d817c39c67ade58e4fec",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 88,
"avg_line_length": 34.52097902097902,
"alnum_prop": 0.582801580066849,
"repo_name": "bsipocz/astropy",
"id": "a230151d918666471f8803f18b339dcbd17703b7",
"size": "10000",
"binary": false,
"copies": "3",
"ref": "refs/heads/hacking",
"path": "astropy/nddata/compat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "442627"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Objective-C",
"bytes": "615"
},
{
"name": "Python",
"bytes": "9395160"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
"""
Installation script for Quantum's development virtualenv
"""
import os
import subprocess
import sys
ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
VENV = os.path.join(ROOT, '.venv')
PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires')
PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
VENV_EXISTS = bool(os.path.exists(VENV))
def die(message, *args):
print >> sys.stderr, message % args
sys.exit(1)
def run_command(cmd, redirect_output=True, check_exit_code=True):
"""
Runs a command in an out-of-process shell, returning the
output of that command. Working directory is ROOT.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
raise Exception('Command "%s" failed.\n%s' % (' '.join(cmd), output))
return output
HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'],
check_exit_code=False).strip())
HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'],
check_exit_code=False).strip())
def check_dependencies():
"""Make sure virtualenv is in the path."""
if not HAS_VIRTUALENV:
raise Exception('Virtualenv not found. ' + \
'Try installing python-virtualenv')
print 'done.'
def create_virtualenv(venv=VENV, install_pip=False):
"""Creates the virtual environment and installs PIP only into the
virtual environment
"""
print 'Creating venv...',
install = ['virtualenv', '-q', venv]
run_command(install)
print 'done.'
print 'Installing pip in virtualenv...',
if install_pip and \
not run_command(['tools/with_venv.sh', 'easy_install',
'pip>1.0']):
die("Failed to install pip.")
print 'done.'
def install_dependencies(venv=VENV):
print 'Installing dependencies with pip (this can take a while)...'
run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r',
PIP_REQUIRES], redirect_output=False)
# Tell the virtual env how to "import quantum"
pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages",
"quantum.pth")
f = open(pthfile, 'w')
f.write("%s\n" % ROOT)
def print_help():
help = """
Quantum development environment setup is complete.
Quantum development uses virtualenv to track and manage Python dependencies
while in development and testing.
To activate the Quantum virtualenv for the extent of your current shell
session you can run:
$ source .venv/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print help
def main(argv):
check_dependencies()
create_virtualenv()
install_dependencies()
print_help()
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "4880c9d9c4a538fae2d1640c4790de48",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 77,
"avg_line_length": 28.205357142857142,
"alnum_prop": 0.6293130737575182,
"repo_name": "rcbops/quantum-buildpackage",
"id": "040adbfe8b99c2de2854a6f922ef7369a8534413",
"size": "3992",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/install_venv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "60525"
},
{
"name": "Python",
"bytes": "872355"
},
{
"name": "Shell",
"bytes": "7516"
}
],
"symlink_target": ""
} |
"""A module containing tests for the library implementation of accessing resources."""
import collections
from decimal import Decimal
import os
import re
import pytest
import iati.constants
import iati.resources
import iati.utilities
import iati.validator
import iati.version
import iati.tests.resources
class TestResourceConstants:
"""A container for tests relating to checks of resource constants."""
@pytest.mark.parametrize('folder_path', [
iati.resources.BASE_PATH_STANDARD,
iati.resources.BASE_PATH_LIB_DATA
])
def test_base_folders_valid_values(self, folder_path):
"""Test that constants that should be folder paths 2-levels deep are paths that are 2-levels deep, rooted at the base folder.
The contents of the second component is tested in test_folder_names_valid_values()
"""
path_components = folder_path.split(os.path.sep)
assert len(path_components) == 2
assert path_components[0] == iati.resources.BASE_PATH
@pytest.mark.parametrize('folder_name', [
iati.resources.BASE_PATH,
iati.resources.BASE_PATH_STANDARD.split(os.path.sep).pop(),
iati.resources.BASE_PATH_LIB_DATA.split(os.path.sep).pop(),
iati.resources.PATH_CODELISTS,
iati.resources.PATH_SCHEMAS,
iati.resources.PATH_RULESETS,
iati.resources.PATH_VERSION_INDEPENDENT
])
def test_folder_names_valid_values(self, folder_name):
"""Test that constants that should be folder names are lower case strings separated by underscores."""
folder_name_regex = re.compile(r'^([a-z]+_)*[a-z]+$')
assert re.match(folder_name_regex, folder_name)
@pytest.mark.parametrize('file_name', [
iati.resources.FILE_CODELIST_MAPPING,
iati.resources.FILE_RULESET_SCHEMA_NAME,
iati.resources.FILE_RULESET_STANDARD_NAME,
iati.resources.FILE_SCHEMA_ACTIVITY_NAME,
iati.resources.FILE_SCHEMA_ORGANISATION_NAME
])
def test_file_names_valid_values(self, file_name):
"""Test that constants that should be file names are lower case strings separated by hyphens or underscores."""
file_name_regex = re.compile(r'^([a-z]+[\-_])*[a-z]+$')
assert re.match(file_name_regex, file_name)
@pytest.mark.parametrize('file_extension', [
iati.resources.FILE_CODELIST_EXTENSION,
iati.resources.FILE_DATA_EXTENSION,
iati.resources.FILE_RULESET_EXTENSION,
iati.resources.FILE_SCHEMA_EXTENSION
])
def test_file_extensions_valid_values(self, file_extension):
"""Test that constants that should be file extensions are a dot followed by a lower case string."""
file_extension_regex = re.compile(r'^\.[a-z]+$')
assert re.match(file_extension_regex, file_extension)
class TestResourceFilesystemPaths:
"""A container for tests relating to specific filesystem paths."""
def test_resource_filesystem_path(self, filename_no_meaning):
"""Check that resource file names are found correctly."""
base_path = iati.resources.resource_filesystem_path('')
full_path = iati.resources.resource_filesystem_path(filename_no_meaning)
assert len(full_path) > len(filename_no_meaning)
assert full_path.startswith(base_path)
assert full_path.endswith(filename_no_meaning)
assert os.path.abspath(full_path) == full_path
def test_resource_filesystem_path_folders(self, folderpath_no_meaning):
"""Check that resource folder names are found correctly."""
base_path = iati.resources.resource_filesystem_path('')
full_path = iati.resources.resource_filesystem_path(folderpath_no_meaning)
assert len(full_path) > len(folderpath_no_meaning)
assert full_path.startswith(base_path)
assert full_path.endswith(folderpath_no_meaning)
assert os.path.abspath(full_path) + os.path.sep == full_path
def test_resource_filesystem_path_empty_path(self, filepath_empty):
"""Check that the base resource folder is located when given an empty filepath."""
full_path = iati.resources.resource_filesystem_path(filepath_empty)
assert full_path != ''
assert os.path.isdir(full_path)
class TestResourceLibData:
"""A container for tests relating to handling paths for pyIATI library-specific data."""
def test_create_lib_data_path(self, filename_no_meaning):
"""Check that library data can be located."""
full_path = iati.resources.create_lib_data_path(filename_no_meaning)
assert iati.resources.BASE_PATH_LIB_DATA in full_path
assert full_path.endswith(filename_no_meaning)
class TestResourceHandlingInvalidPaths:
"""A container for tests relating to handling paths that are invalid and being passed to functions that are version-independent."""
@pytest.fixture(params=[
iati.resources.create_lib_data_path,
iati.resources.resource_filesystem_path
])
def resource_func(self, request):
"""A resource function that takes in file paths as an input."""
return request.param
def test_create_lib_data_path_empty_path(self, filepath_empty):
"""Check that a ValueError is raised when given an empty filepath."""
with pytest.raises(ValueError):
iati.resources.create_lib_data_path(filepath_empty)
def test_create_lib_data_path_valueerr(self, filepath_invalid_value, resource_func):
"""Check that functions cause a value error when given a string that cannot be a filepath."""
with pytest.raises(ValueError):
resource_func(filepath_invalid_value)
def test_create_lib_data_path_typeerr(self, filepath_invalid_type, resource_func):
"""Check that functions cause a type error when given a path of an incorrect type."""
with pytest.raises(TypeError):
resource_func(filepath_invalid_type)
class TestResourcePathComponents:
"""A container for tests relating to generation of component parts of a resource path."""
@pytest.mark.parametrize('version, expected_version_foldername', [
('2.03', '2-03'),
('2.02', '2-02'),
('2.01', '2-01'),
('1.05', '1-05'),
('1.04', '1-04'),
('1.03', '1-03'),
('1.02', '1-02'),
('1.01', '1-01'),
('2.2.0', '2-03'),
('2.1.10', '2-02'),
('2.0.5', '2-01'),
('1.4.4', '1-05'),
('1.3.3', '1-04'),
('1.2.2', '1-03'),
('1.1.1', '1-02'),
('1.1.0', '1-02'),
('1.0.0', '1-01'),
(Decimal('1.05'), '1-05'),
(Decimal('1.04'), '1-04'),
(Decimal('1.03'), '1-03'),
(Decimal('1.02'), '1-02'),
(Decimal('1.01'), '1-01'),
(iati.Version('2.03'), '2-03'),
(iati.Version('2.02'), '2-02'),
(iati.Version('2.01'), '2-01'),
(iati.Version('1.05'), '1-05'),
(iati.Version('1.04'), '1-04'),
(iati.Version('1.03'), '1-03'),
(iati.Version('1.02'), '1-02'),
(iati.Version('1.01'), '1-01'),
('1', '1'),
('2', '2'),
(iati.version.STANDARD_VERSION_ANY, iati.resources.PATH_VERSION_INDEPENDENT)
])
@pytest.mark.latest_version('2.03')
def test_folder_name_for_version_generation_known(self, version, expected_version_foldername):
"""Check that the correct folder name is returned for known version numbers."""
folder_name = iati.resources.folder_name_for_version(version)
assert expected_version_foldername == folder_name
def test_folder_name_for_version_generation_unknown(self, std_ver_all_mixedinst_valid_unknown):
"""Check that a ValueError is raised when trying to create a folder name for an unknown version."""
with pytest.raises(ValueError):
iati.resources.folder_name_for_version(std_ver_all_mixedinst_valid_unknown)
def test_folder_name_for_version_valueerr(self, std_ver_all_uninst_valueerr):
"""Check that a version of the Standard of the correct type, but an incorrect value raises a ValueError."""
with pytest.raises(ValueError):
iati.resources.folder_name_for_version(std_ver_all_uninst_valueerr)
def test_folder_name_for_version_typeerr(self, std_ver_all_uninst_typeerr):
"""Check that a version of the Standard of the correct type, but an incorrect value raises a TypeError."""
with pytest.raises(TypeError):
iati.resources.folder_name_for_version(std_ver_all_uninst_typeerr)
def test_folder_name_for_version_requires_version(self):
"""Check that a version must be specified when requesting a folder name for a version (there is no default)."""
with pytest.raises(TypeError):
iati.resources.folder_name_for_version() # pylint: disable=no-value-for-parameter
class TestResoucePathCreationEntireStandard:
"""A container for tests relating to generating entire filepaths for any part of the Standard."""
def test_folder_path_for_version_known(self, std_ver_any_mixedinst_valid_known):
"""Check that expected components are present within folder paths for data for known versions of the IATI Standard."""
expected_components = ['resources', 'standard', iati.resources.BASE_PATH_STANDARD]
version_folder = iati.resources.folder_name_for_version(std_ver_any_mixedinst_valid_known)
full_path = iati.resources.folder_path_for_version(std_ver_any_mixedinst_valid_known)
assert version_folder in full_path
for component in expected_components:
assert component in full_path
def test_folder_path_for_version_unknown_valueerr(self, std_ver_all_mixedinst_valid_unknown):
"""Check that a ValueError is raised when trying to create a path for an unknown version of the IATI Standard."""
with pytest.raises(ValueError):
iati.resources.folder_path_for_version(std_ver_all_mixedinst_valid_unknown)
def test_folder_path_for_version_typeerr(self, std_ver_all_uninst_typeerr):
"""Check that a TypeError is raised when trying to create a folder path for a value of a type that cannot be a version number."""
with pytest.raises(TypeError):
iati.resources.folder_path_for_version(std_ver_all_uninst_typeerr)
def test_folder_path_for_version_requires_version(self):
"""Check that a version must be specified when requesting a folder path for a version (there is no default)."""
with pytest.raises(TypeError):
iati.resources.folder_path_for_version() # pylint: disable=no-value-for-parameter
def test_path_for_version_known(self, filename_no_meaning, std_ver_any_mixedinst_valid_known):
"""Check that expected components are present within absolute paths for data for known versions of the IATI Standard."""
relative_path = iati.resources.folder_path_for_version(std_ver_any_mixedinst_valid_known)
abs_path = iati.resources.path_for_version(filename_no_meaning, std_ver_any_mixedinst_valid_known)
assert abs_path.startswith(os.path.sep)
assert relative_path in abs_path
assert abs_path.endswith(filename_no_meaning)
def test_path_for_version_empty_path(self, filepath_empty, std_ver_any_mixedinst_valid_known):
"""Check that expected components are present within an absolute path for an empty path within a version folder."""
relative_path = iati.resources.folder_path_for_version(std_ver_any_mixedinst_valid_known)
abs_path = iati.resources.path_for_version(filepath_empty, std_ver_any_mixedinst_valid_known)
assert abs_path.startswith(os.path.sep)
assert relative_path in abs_path
assert abs_path.split(os.path.sep).pop() == filepath_empty
# there are not currently folders for integer resource versions
if isinstance(std_ver_any_mixedinst_valid_known, iati.Version) or std_ver_any_mixedinst_valid_known == iati.version.STANDARD_VERSION_ANY:
assert os.path.isdir(abs_path)
def test_path_for_version_unknown_ver_valueerr(self, filename_no_meaning_single, std_ver_all_mixedinst_valid_unknown):
"""Check that a ValueError is raised when trying to create a path for an unknown version of the IATI Standard."""
with pytest.raises(ValueError):
iati.resources.path_for_version(filename_no_meaning_single, std_ver_all_mixedinst_valid_unknown)
def test_path_for_version_unknown_ver_typeerr(self, filename_no_meaning_single, std_ver_all_uninst_typeerr):
"""Check that a TypeError is raised when trying to create a folder path for a value of a type that cannot be a version number."""
with pytest.raises(TypeError):
iati.resources.path_for_version(filename_no_meaning_single, std_ver_all_uninst_typeerr)
def test_path_for_version_requires_version(self, filename_no_meaning_single):
"""Check that a version must be specified when requesting a path for a version (there is no default)."""
with pytest.raises(TypeError):
iati.resources.path_for_version(filename_no_meaning_single) # pylint: disable=no-value-for-parameter
def test_path_for_version_path_valueerr(self, filepath_invalid_value, std_ver_minor_inst_valid_single):
"""Check that a ValueError is raised when trying to create a path from a string that cannot be a filepath."""
with pytest.raises(ValueError):
iati.resources.path_for_version(filepath_invalid_value, std_ver_minor_inst_valid_single)
def test_path_for_version_path_typeerr(self, filepath_invalid_type, std_ver_minor_inst_valid_single):
"""Check that a TypeError is raised when trying to create an absolute path from a path of an incorrect type."""
with pytest.raises(TypeError):
iati.resources.path_for_version(filepath_invalid_type, std_ver_minor_inst_valid_single)
class TestResourcePathCreationCodelistMapping:
"""A container for tests relating to creating Codelist Mapping File paths."""
def test_create_codelist_mapping_path_minor(self, std_ver_minor_mixedinst_valid_known):
"""Check that there is a single Codelist Mapping File for minor versions."""
version_folder = iati.resources.folder_name_for_version(std_ver_minor_mixedinst_valid_known)
path = iati.resources.create_codelist_mapping_path(std_ver_minor_mixedinst_valid_known)
assert isinstance(path, str)
assert path.endswith(iati.resources.FILE_CODELIST_MAPPING + iati.resources.FILE_CODELIST_EXTENSION)
assert version_folder in path
def test_create_codelist_mapping_path_major(self, std_ver_major_uninst_valid_known):
"""Check that requesting a Codelist Mapping File for a major version returns the same path as for the last minor within the major."""
minor_version = max(iati.version.versions_for_integer(std_ver_major_uninst_valid_known))
path_major = iati.resources.create_codelist_mapping_path(std_ver_major_uninst_valid_known)
path_minor = iati.resources.create_codelist_mapping_path(minor_version)
assert path_major == path_minor
def test_create_codelist_mapping_path_version_independent(self):
"""Check that a ValueError is raised when requesting a version-independent Codelist Mapping File."""
with pytest.raises(ValueError):
iati.resources.create_codelist_mapping_path(iati.version.STANDARD_VERSION_ANY)
def test_create_codelist_mapping_path_unknown(self, std_ver_all_mixedinst_valid_unknown):
"""Check that a ValueError is raised when requesting a Codelist Mapping file for an unknown version of the Standard."""
with pytest.raises(ValueError):
iati.resources.create_codelist_mapping_path(std_ver_all_mixedinst_valid_unknown)
def test_create_codelist_mapping_path_no_version(self):
"""Check that specifying a version of the Standard to create a Codelist Mapping path for is required."""
with pytest.raises(TypeError):
iati.resources.create_codelist_mapping_path() # pylint: disable=no-value-for-parameter
def test_create_codelist_mapping_path_typerr(self, std_ver_all_uninst_typeerr):
"""Check that a TypeError is raised when using a generation function to create a Codelist Mapping path from a version of an incorrect type."""
with pytest.raises(TypeError):
iati.resources.create_codelist_mapping_path(std_ver_all_uninst_typeerr)
class TestResourcePathCreationCoreComponents:
"""A container for tests relating to path creation for core components in the IATI Standard.
Core components include Codelists, Rulesets and Schemas.
Each of these should act equivalently across different version and path inputs since their parameters are the same.
Schemas are available at more versions than Rulesets, though this is not an issue since the create_x_path() functions do not check whether a path actually exists.
"""
@pytest.fixture(params=[
iati.resources.create_codelist_path,
iati.resources.create_ruleset_path,
iati.resources.create_schema_path
])
def func_to_test(self, request):
"""Return a function to test."""
return request.param
@pytest.fixture(params=[
iati.resources.create_ruleset_path,
iati.resources.create_schema_path
])
def func_to_test_decimalised_integers(self, request):
"""Return a function to test that treats integers as the latest minor within the major."""
return request.param
@pytest.fixture(params=[
(iati.resources.create_codelist_path, iati.resources.FILE_CODELIST_EXTENSION, iati.resources.PATH_CODELISTS),
(iati.resources.create_ruleset_path, iati.resources.FILE_RULESET_EXTENSION, iati.resources.PATH_RULESETS),
(iati.resources.create_schema_path, iati.resources.FILE_SCHEMA_EXTENSION, iati.resources.PATH_SCHEMAS)
])
def func_plus_expected_data(self, request):
"""Return a tuple containing a function to test, plus the extension and a component that should be present in the returned path."""
output = collections.namedtuple('output', 'func_to_test expected_extension expected_component')
return output(func_to_test=request.param[0], expected_extension=request.param[1], expected_component=request.param[2])
def test_create_path_minor_known(self, filename_no_meaning, std_ver_minor_independent_mixedinst_valid_known, func_plus_expected_data):
"""Check that the expected components are present in a path from a generation function at a known minor or independent version of the Standard."""
version_folder = iati.resources.folder_name_for_version(std_ver_minor_independent_mixedinst_valid_known)
full_path = func_plus_expected_data.func_to_test(filename_no_meaning, std_ver_minor_independent_mixedinst_valid_known)
assert isinstance(full_path, str)
assert full_path.endswith(filename_no_meaning + func_plus_expected_data.expected_extension)
assert version_folder in full_path
assert func_plus_expected_data.expected_component in full_path
def test_create_path_major_known_codelists(self, filename_no_meaning_single, std_ver_major_uninst_valid_known):
"""Check that a generation function returns a value for a major version.
This is relevant to Codelists, but not other components. These are tested separately.
"""
version_folder = iati.resources.folder_name_for_version(std_ver_major_uninst_valid_known)
full_path = iati.resources.create_codelist_path(filename_no_meaning_single, std_ver_major_uninst_valid_known)
assert isinstance(full_path, str)
assert full_path.endswith(filename_no_meaning_single + iati.resources.FILE_CODELIST_EXTENSION)
assert os.path.sep + version_folder + os.path.sep in full_path
assert iati.resources.PATH_CODELISTS in full_path
def test_create_path_major_known_decimalised_integers(self, filename_no_meaning_single, std_ver_major_uninst_valid_known, func_to_test_decimalised_integers):
"""Check that a generation function returns the same value for a major version as the last minor within the major.
This is relevant to some Standard components, though not all. As such, it uses a different fixture to other functions in this class.
"""
minor_version = max(iati.version.versions_for_integer(std_ver_major_uninst_valid_known))
major_path = func_to_test_decimalised_integers(filename_no_meaning_single, std_ver_major_uninst_valid_known)
minor_path = func_to_test_decimalised_integers(filename_no_meaning_single, minor_version)
assert major_path == minor_path
def test_create_path_no_version(self, filename_no_meaning_single, func_to_test):
"""Check that specifying a version of the Standard to create a path for is required."""
with pytest.raises(TypeError):
func_to_test(filename_no_meaning_single)
def test_create_path_unknown(self, filename_no_meaning_single, std_ver_all_mixedinst_valid_unknown, func_to_test):
"""Check that a ValueError is raised when using a generation function to create a path for a at an unknown version of the Standard."""
with pytest.raises(ValueError):
func_to_test(filename_no_meaning_single, std_ver_all_mixedinst_valid_unknown)
def test_create_path_ver_typerr(self, filename_no_meaning_single, std_ver_all_uninst_typeerr, func_to_test):
"""Check that a TypeError is raised when using a generation function to create a path from a version of an incorrect type."""
with pytest.raises(TypeError):
func_to_test(filename_no_meaning_single, std_ver_all_uninst_typeerr)
def test_create_path_path_valueerr(self, filepath_invalid_value, std_ver_minor_inst_valid_single, func_to_test):
"""Check that a ValueError is raised when providing a generation function a path to work from that is a string that cannot be a filepath."""
with pytest.raises(ValueError):
func_to_test(filepath_invalid_value, std_ver_minor_inst_valid_single)
def test_create_path_path_typeerr(self, filepath_invalid_type, std_ver_minor_inst_valid_single, func_to_test):
"""Check that a TypeError is raised when providing a generation function a path to work from that is of a type that cannot be a filepath."""
with pytest.raises(TypeError):
func_to_test(filepath_invalid_type, std_ver_minor_inst_valid_single)
class TestResourceGetCodelistPaths:
"""A container for get_codelist_paths() tests."""
def test_find_codelist_paths(self, codelist_lengths_by_version):
"""Check that all codelist paths are being found.
This covers major, minor and version-independent.
"""
decimalised_version = iati.version._decimalise_integer(codelist_lengths_by_version.version) # pylint: disable=protected-access
expected_root = iati.resources.path_for_version(iati.resources.PATH_CODELISTS, decimalised_version)
paths = iati.resources.get_codelist_paths(codelist_lengths_by_version.version)
assert len(paths) == len(set(paths))
assert len(paths) == codelist_lengths_by_version.expected_length
for path in paths:
assert path[-4:] == iati.resources.FILE_CODELIST_EXTENSION
assert expected_root in path
assert os.path.isfile(path)
def test_get_codelist_mapping_paths_independent(self):
"""Test getting a list of version-independent Codelist files.
Todo:
Look to better determine how to access the different categories of Codelist.
"""
result = iati.resources.get_codelist_paths(iati.version.STANDARD_VERSION_ANY)
assert result == []
def test_get_codelist_paths_minor_partsupport(self, std_ver_minor_mixedinst_valid_partsupport):
"""Test getting a list of Codelist paths. The requested version is partially supported by pyIATI."""
result = iati.resources.get_codelist_paths(std_ver_minor_mixedinst_valid_partsupport)
assert result == []
def test_get_codelist_paths_minor_unknown(self, std_ver_all_mixedinst_valid_unknown):
"""Test getting a list of Codelist paths. The requested version is not known by pyIATI."""
result = iati.resources.get_codelist_paths(std_ver_all_mixedinst_valid_unknown)
assert result == []
class TestResourceGetCodelistMappingPaths:
"""A container for get_codelist_mapping_paths() tests.
Note:
This class contains very similar tests to the equivalent for Rulesets. They are different because the Ruleset creation function takes two arguments, not one.
"""
def test_get_codelist_mapping_paths_minor_fullsupport(self, std_ver_minor_mixedinst_valid_fullsupport):
"""Test getting a list of Codelist Mapping paths. The requested version is fully supported by pyIATI."""
result = iati.resources.get_codelist_mapping_paths(std_ver_minor_mixedinst_valid_fullsupport)
assert len(result) == 1
assert result[0] == iati.resources.create_codelist_mapping_path(std_ver_minor_mixedinst_valid_fullsupport)
assert os.path.isfile(result[0])
def test_get_codelist_mapping_paths_independent(self):
"""Test getting a list of version-independent Codelist Mapping files."""
result = iati.resources.get_codelist_mapping_paths(iati.version.STANDARD_VERSION_ANY)
assert result == []
def test_get_codelist_mapping_paths_minor_partsupport(self, std_ver_minor_mixedinst_valid_partsupport):
"""Test getting a list of Codelist Mapping paths. The requested version is partially supported by pyIATI."""
result = iati.resources.get_codelist_mapping_paths(std_ver_minor_mixedinst_valid_partsupport)
assert result == []
def test_get_codelist_mapping_paths_minor_unknown(self, std_ver_all_mixedinst_valid_unknown):
"""Test getting a list of Codelist Mapping paths. The requested version is not known by pyIATI."""
result = iati.resources.get_codelist_mapping_paths(std_ver_all_mixedinst_valid_unknown)
assert result == []
def test_get_codelist_mapping_paths_major_known(self, std_ver_major_uninst_valid_known):
"""Test getting a list of Codelist Mapping paths. The requested version is a known integer version. The list should contain paths for each supported minor within the major."""
supported_versions_at_major = [version for version in iati.version.versions_for_integer(std_ver_major_uninst_valid_known) if version in iati.version.STANDARD_VERSIONS_SUPPORTED]
expected_path_count = len(supported_versions_at_major)
result = iati.resources.get_codelist_mapping_paths(std_ver_major_uninst_valid_known)
assert len(result) == expected_path_count
for version in supported_versions_at_major:
assert iati.resources.create_codelist_mapping_path(version) in result
class TestResourceGetRulesetPaths:
"""A container for get_ruleset_paths() tests."""
def test_get_ruleset_paths_minor_fullsupport(self, std_ver_minor_mixedinst_valid_fullsupport):
"""Test getting a list of Ruleset paths. The requested version is fully supported by pyIATI."""
result = iati.resources.get_ruleset_paths(std_ver_minor_mixedinst_valid_fullsupport)
assert len(result) == 1
assert result[0] == iati.resources.create_ruleset_path(iati.resources.FILE_RULESET_STANDARD_NAME, std_ver_minor_mixedinst_valid_fullsupport)
assert os.path.isfile(result[0])
def test_get_ruleset_paths_independent(self):
"""Test getting a list of version-independent standard Rulesets."""
result = iati.resources.get_ruleset_paths(iati.version.STANDARD_VERSION_ANY)
assert result == []
def test_get_ruleset_paths_minor_partsupport(self, std_ver_minor_mixedinst_valid_partsupport):
"""Test getting a list of Ruleset paths. The requested version is partially supported by pyIATI."""
result = iati.resources.get_ruleset_paths(std_ver_minor_mixedinst_valid_partsupport)
assert result == []
def test_get_ruleset_paths_minor_unknown(self, std_ver_all_mixedinst_valid_unknown):
"""Test getting a list of Ruleset paths. The requested version is not known by pyIATI."""
result = iati.resources.get_ruleset_paths(std_ver_all_mixedinst_valid_unknown)
assert result == []
def test_get_ruleset_paths_major_known(self, std_ver_major_uninst_valid_known):
"""Test getting a list of Ruleset paths. The requested version is a known integer version. The list should contain paths for each supported minor within the major."""
supported_versions_at_major = [version for version in iati.version.versions_for_integer(std_ver_major_uninst_valid_known) if version in iati.version.STANDARD_VERSIONS_SUPPORTED]
expected_path_count = len(supported_versions_at_major)
result = iati.resources.get_ruleset_paths(std_ver_major_uninst_valid_known)
assert len(result) == expected_path_count
for version in supported_versions_at_major:
assert iati.resources.create_ruleset_path(iati.resources.FILE_RULESET_STANDARD_NAME, version) in result
class TestResourceGetSchemaPaths:
"""A container for get_x_schema_paths() tests."""
@pytest.fixture(params=[
(iati.resources.get_activity_schema_paths, iati.resources.FILE_SCHEMA_ACTIVITY_NAME),
(iati.resources.get_organisation_schema_paths, iati.resources.FILE_SCHEMA_ORGANISATION_NAME)
])
def func_and_name(self, request):
"""Return a named tuple containing a function to generate the paths for a type of Schema, plus the name of the Schema."""
output = collections.namedtuple('output', 'func schema_name')
return output(func=request.param[0], schema_name=request.param[1])
@pytest.fixture(params=[
iati.resources.get_all_schema_paths,
iati.resources.get_activity_schema_paths,
iati.resources.get_organisation_schema_paths
])
def schema_path_func_all(self, request):
"""Return a function that returns a list of paths for Schema resources."""
return request.param
def test_get_schema_paths_minor_known(self, std_ver_minor_mixedinst_valid_known, func_and_name):
"""Test getting a list of Org or Activity Schema paths. The requested version is known by pyIATI."""
result = func_and_name.func(std_ver_minor_mixedinst_valid_known)
assert len(result) == 1
assert result[0] == iati.resources.create_schema_path(func_and_name.schema_name, std_ver_minor_mixedinst_valid_known)
assert os.path.isfile(result[0])
def test_get_schema_paths_minor_unknown(self, std_ver_all_mixedinst_valid_unknown, schema_path_func_all):
"""Test getting a list of Org or Activity Schema paths. The requested version is not known by pyIATI."""
result = schema_path_func_all(std_ver_all_mixedinst_valid_unknown)
assert result == []
def test_get_schema_paths_independent(self, schema_path_func_all):
"""Test getting a list of version-independent Org or Activity Schemas."""
result = schema_path_func_all(iati.version.STANDARD_VERSION_ANY)
assert result == []
def test_get_schema_paths_major_known(self, std_ver_major_uninst_valid_known, func_and_name):
"""Test getting a list of Org or Activity Schema paths. The requested version is a known integer version. The list should contain paths for each supported minor within the major."""
versions_at_major = [version for version in iati.version.versions_for_integer(std_ver_major_uninst_valid_known)]
expected_path_count = len(versions_at_major)
result = func_and_name.func(std_ver_major_uninst_valid_known)
assert len(result) == expected_path_count
for version in versions_at_major:
assert iati.resources.create_schema_path(func_and_name.schema_name, version) in result
def test_get_all_schema_paths_minor_known(self, std_ver_minor_mixedinst_valid_known):
"""Test getting a list of all Schema paths. The requested version is known by pyIATI."""
activity_path = iati.resources.get_activity_schema_paths(std_ver_minor_mixedinst_valid_known)[0]
org_path = iati.resources.get_organisation_schema_paths(std_ver_minor_mixedinst_valid_known)[0]
result = iati.resources.get_all_schema_paths(std_ver_minor_mixedinst_valid_known)
assert len(result) == 2
assert activity_path in result
assert org_path in result
def test_get_all_schema_paths_major_known(self, std_ver_major_uninst_valid_known):
"""Test getting a list of all Schema paths. The requested version is a known integer version. The list should contain paths for each supported minor within the major."""
versions_at_major = [version for version in iati.version.versions_for_integer(std_ver_major_uninst_valid_known)]
expected_path_count = len(versions_at_major) * 2
activity_paths = iati.resources.get_activity_schema_paths(std_ver_major_uninst_valid_known)
org_paths = iati.resources.get_organisation_schema_paths(std_ver_major_uninst_valid_known)
result = iati.resources.get_all_schema_paths(std_ver_major_uninst_valid_known)
assert len(result) == expected_path_count
for path in activity_paths:
assert path in result
for path in org_paths:
assert path in result
class TestResourceGetPathsNotAVersion:
"""A container for get_*_paths() tests where the function is provided a value that cannot represent a version."""
@pytest.fixture(params=[
iati.resources.get_codelist_paths,
iati.resources.get_codelist_mapping_paths,
iati.resources.get_ruleset_paths,
iati.resources.get_all_schema_paths,
iati.resources.get_activity_schema_paths,
iati.resources.get_organisation_schema_paths
])
def func_to_test(self, request):
"""Return a function to test the behavior of. The function takes a single argument, which takes a value that can represent a version number."""
return request.param
def test_get_x_path_valueerr(self, std_ver_all_uninst_valueerr, func_to_test):
"""Check that a ValueError is raised when requesting paths for an value that cannot be a version of the Standard."""
with pytest.raises(ValueError):
func_to_test(std_ver_all_uninst_valueerr)
def test_get_x_path_no_version(self, func_to_test):
"""Check that a TypeError is raised when requesting paths without specifying a version."""
with pytest.raises(TypeError):
func_to_test()
def test_get_x_path_typerr(self, std_ver_all_uninst_typeerr, func_to_test):
"""Check that a TypeError is raised when requesting paths for a version of an incorrect type."""
with pytest.raises(TypeError):
func_to_test(std_ver_all_uninst_typeerr)
class TestResourceTestDataFolders:
"""A container for tests relating to resource folders."""
@pytest.mark.parametrize('version, expected_num_paths', [
('2.03', 323),
('2.02', 237),
('2.01', 217),
('1.05', 17),
('1.04', 17),
('1.03', 17),
('1.02', 17),
('1.01', 16),
('1', 0),
('2', 0),
(iati.version.STANDARD_VERSION_ANY, 0)
])
@pytest.mark.latest_version('2.03')
def test_get_test_data_paths_in_folder(self, version, expected_num_paths):
"""Check that test data is being found in specified subfolders.
Look for the number of paths in the `ssot-activity-xml-fail` folder.
"""
paths = iati.tests.resources.get_test_data_paths_in_folder('ssot-activity-xml-fail', version)
assert len(paths) == expected_num_paths
| {
"content_hash": "5697c6aad0478b8cbaf4d0ba30309004",
"timestamp": "",
"source": "github",
"line_count": 687,
"max_line_length": 189,
"avg_line_length": 52.41048034934498,
"alnum_prop": 0.6968005332444592,
"repo_name": "IATI/iati.core",
"id": "36ee80c695f37c99b96335d3d3427961a6eae8fa",
"size": "36006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iati/tests/test_resources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "861"
},
{
"name": "Python",
"bytes": "126390"
},
{
"name": "Shell",
"bytes": "538"
}
],
"symlink_target": ""
} |
def func_1(apple, my_list):
if apple<10:
# Do something
my_list.append(apple)
return my_list[1:]
def func_2(spongebob, squarepants):
"""A less messy function"""
for char in spongebob:
if char in squarepants:
return char
unused=1
return None
| {
"content_hash": "f00177f73ab6b3b3555738296fc8c0f4",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 35,
"avg_line_length": 25.166666666666668,
"alnum_prop": 0.5927152317880795,
"repo_name": "edx/diff-cover",
"id": "08991bc7eede1d7399bda54bfa86a0b4adce1ea6",
"size": "302",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "diff_cover/tests/fixtures/violations_test_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "355"
},
{
"name": "Python",
"bytes": "178159"
}
],
"symlink_target": ""
} |
import math
from collections import defaultdict
from olymap.utilities import get_oid, get_name, get_subkind, to_oid, loop_here2, get_ship_damage
from olypy.db import loop_here
from olymap.utilities import calc_ship_pct_loaded
from olymap.storm import build_basic_storm_dict
from olymap.char import build_basic_char_dict
def get_complete(v):
effort_given = int(v.get('SL', {}).get('eg', [0])[0])
effort_required = int(v.get('SL', {}).get('er', [0])[0])
if effort_required > 0:
complete = (effort_given / effort_required) * 100
elif effort_required == 0 and effort_given == 0:
complete = 100
else:
complete = 0
return complete
def get_load(k, v, data):
return calc_ship_pct_loaded(data, k, v)
def get_defense(v):
return v.get('SL', {}).get('de', [0])
def build_loc_dict(v, data):
loc_id = v['LI']['wh'][0]
loc_rec = data[loc_id]
loc_dict = {'id': loc_id,
'oid': get_oid(loc_id),
'name': get_name(loc_rec),
'subkind': get_subkind(loc_rec, data)}
return loc_dict
def get_owner(v):
owner_id = v.get('LI', {}).get('hl', [None])[0]
if owner_id is not None:
return owner_id
return None
def build_owner_dict(v, data):
if get_owner(v) is not None:
owner_id = get_owner(v)
owner_rec = data[owner_id]
owner_dict = {'id': owner_id,
'oid': get_oid(owner_id),
'name': get_name(owner_rec)}
else:
owner_dict = None
return owner_dict
def get_bound_storm(v):
return v.get('SL', {}).get('bs', [None])[0]
def build_storm_dict(v, data):
if get_bound_storm(v) is not None:
storm_id = get_bound_storm(v)
storm_rec = data[storm_id]
storm_dict = build_basic_storm_dict(storm_id, storm_rec, data)
else:
storm_dict = None
return storm_dict
def build_seenhere_dict(k, v, data, instance, pledge_chain, prisoner_chain):
stack_list = []
stack_list = loop_here2(data, k)
# print (stack_list)
seen_here = []
# here_list = v.get('LI', {}).get('hl', [None])
if len(stack_list) > 0:
for characters in stack_list:
char_rec = data[characters[0]]
seen_entry = build_basic_char_dict(characters[0], char_rec, data, True)
seen_entry.update({'level': characters[1]})
seen_here.append(seen_entry)
return seen_here
def build_non_prominent_items_dict(k, v, data):
npi_list = []
seen_here_list = loop_here(data, k, False, True)
list_length = len(seen_here_list)
if list_length > 1:
for un in seen_here_list:
unit_rec = data[un]
if 'il' in unit_rec:
item_list = unit_rec['il']
for items in range(0, len(item_list), 2):
item_rec = data[item_list[items]]
if 'IT' in item_rec and 'pr' in item_rec['IT'] and item_rec['IT']['pr'][0] == '1':
pass
else:
if int(item_list[items + 1]) > 0:
weight = 0
qty = int(item_list[items + 1])
if 'wt' in item_rec['IT']:
weight = int(item_rec['IT']['wt'][0])
total_weight = int(qty * weight)
if total_weight > 0:
npi_entry = {'possessor_oid': to_oid(un),
'possessor_name': unit_rec['na'][0],
'item_oid': to_oid(item_list[items]),
'item_name': item_rec['na'][0],
'qty': qty,
'weight': total_weight}
npi_list.append(npi_entry)
return npi_list
def build_basic_ship_dict(k, v, data):
ship_dict = {'oid': get_oid(k),
'name': get_name(v),
'subkind': get_subkind(v, data),
'kind': 'ship',
'complete': get_complete(v),
'load': get_load(k, v, data),
'defense': get_defense(v)[0],
'damage': get_ship_damage(v),
'owner': build_owner_dict(v, data),
'storm': build_storm_dict(v, data),
'loc': build_loc_dict(v, data)}
return ship_dict
def build_complete_ship_dict(k, v, data, instance, pledge_chain, prisoner_chain):
ship_dict = {'oid': get_oid(k),
'name': get_name(v),
'subkind': get_subkind(v, data),
'kind': 'kind',
'complete': get_complete(v),
'load': get_load(k, v, data),
'defense': get_defense(v)[0],
'damage': get_ship_damage(v),
'owner': build_owner_dict(v, data),
'storm': build_storm_dict(v, data),
'seen_here': build_seenhere_dict(k, v, data, instance, pledge_chain, prisoner_chain),
'non_prominent_items': build_non_prominent_items_dict(k, v, data),
'loc': build_loc_dict(v, data)}
return ship_dict
| {
"content_hash": "54dda084964c49525c8e20bb58fe3a5a",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 102,
"avg_line_length": 35.630872483221474,
"alnum_prop": 0.4925598041062347,
"repo_name": "olympiag3/olypy",
"id": "7a5d33e37b83803e86f99f09330935b477f10b87",
"size": "5327",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "olymap/ship.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "776"
},
{
"name": "HTML",
"bytes": "75745"
},
{
"name": "JavaScript",
"bytes": "17372"
},
{
"name": "Makefile",
"bytes": "2045"
},
{
"name": "Python",
"bytes": "486509"
},
{
"name": "Shell",
"bytes": "1630"
}
],
"symlink_target": ""
} |
import sys
import os
import re
import gzip
#@lint-avoid-python-3-compatibility-imports
# Make directory for output if it doesn't exist
try:
os.mkdir(sys.argv[2] + "/" + sys.argv[1].split("/")[-2])
except OSError:
pass
# Strip off .gz ending
end = "/".join(sys.argv[1].split("/")[-2:])[:-len(".xml.gz")] + ".txt"
out = open(sys.argv[2] + end, "w")
# Parse and print titles and articles
NONE, HEAD, NEXT, TEXT = 0, 1, 2, 3
MODE = NONE
title_parse = ""
article_parse = []
# FIX: Some parses are mis-parenthesized.
def fix_paren(parse):
if len(parse) < 2:
return parse
if parse[0] == "(" and parse[1] == " ":
return parse[2:-1]
return parse
def get_words(parse):
words = []
for w in parse.split():
if w[-1] == ')':
words.append(w.strip(")"))
if words[-1] == ".":
break
return words
def remove_digits(parse):
return re.sub(r'\d', '#', parse)
for l in gzip.open(sys.argv[1]):
if MODE == HEAD:
title_parse = remove_digits(fix_paren(l.strip()))
MODE = NEXT
if MODE == TEXT:
article_parse.append(remove_digits(fix_paren(l.strip())))
if MODE == NONE and l.strip() == "<HEADLINE>":
MODE = HEAD
if MODE == NEXT and l.strip() == "<P>":
MODE = TEXT
if MODE == TEXT and l.strip() == "</P>":
articles = []
# Annotated gigaword has a poor sentence segmenter.
# Ensure there is a least a period.
for i in range(len(article_parse)):
articles.append(article_parse[i])
if "(. .)" in article_parse[i]:
break
article_parse = "(TOP " + " ".join(articles) + ")"
# title_parse \t article_parse \t title \t article
print >>out, "\t".join([title_parse, article_parse,
" ".join(get_words(title_parse)),
" ".join(get_words(article_parse))])
article_parse = []
MODE = NONE
| {
"content_hash": "459b281d539548bf19a49095d7b3a0b2",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 70,
"avg_line_length": 26.236842105263158,
"alnum_prop": 0.5336008024072216,
"repo_name": "W4ngatang/NAMAS",
"id": "e7ac88b65da255d821b3d5cdcf4c9354adde4973",
"size": "2446",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "dataset/process_agiga.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Lua",
"bytes": "43927"
},
{
"name": "Python",
"bytes": "15180"
},
{
"name": "Shell",
"bytes": "5698"
}
],
"symlink_target": ""
} |
import sys
import os
import matplotlib
import numpy
import matplotlib
matplotlib.use('AGG')
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import FormatStrFormatter
def getArg(param, default=""):
if (sys.argv.count(param) == 0): return default
i = sys.argv.index(param)
return sys.argv[i + 1]
lastsecs = int(getArg("lastsecs", 240))
fname = sys.argv[1]
try:
tdata = numpy.loadtxt(fname, delimiter=" ")
except:
exit(0)
if len(tdata.shape) < 2 or tdata.shape[0] < 2 or tdata.shape[1] < 2:
print "Too small data - do not try to plot yet."
exit(0)
times = tdata[:, 0]
values = tdata[:, 1]
lastt = max(times)
#majorFormatter = FormatStrFormatter('%.2f')
fig = plt.figure(figsize=(3.5, 2.0))
plt.plot(times[times > lastt - lastsecs], values[times > lastt - lastsecs])
plt.gca().xaxis.set_major_locator( MaxNLocator(nbins = 7, prune = 'lower') )
plt.xlim([max(0, lastt - lastsecs), lastt])
#plt.ylim([lastt - lastsecs, lastt])
plt.gca().yaxis.set_major_locator( MaxNLocator(nbins = 7, prune = 'lower') )
#plt.gca().yaxis.set_major_formatter(majorFormatter)
plt.savefig(fname.replace(".dat", ".png"), format="png", bbox_inches='tight')
| {
"content_hash": "833477a5752b4bcd02f478a635f568e4",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 77,
"avg_line_length": 24.32,
"alnum_prop": 0.6916118421052632,
"repo_name": "antonyms/AntonymPipeline",
"id": "6533ebd6ffefefed3e7df35379937b7a46bd0f6e",
"size": "1235",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bptf/conf/adminhtml/plots/plotter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "58295"
},
{
"name": "C++",
"bytes": "3232670"
},
{
"name": "CSS",
"bytes": "3060"
},
{
"name": "Java",
"bytes": "134710"
},
{
"name": "Makefile",
"bytes": "1451"
},
{
"name": "Objective-C++",
"bytes": "60341699"
},
{
"name": "Python",
"bytes": "28435"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "867"
}
],
"symlink_target": ""
} |
""" unit tests for the model and descriptor packager """
from rdkit import RDConfig
from rdkit.ML.Data import DataUtils
import unittest,os,sys
import io
from rdkit.six.moves import cPickle
from rdkit.ML.ModelPackage import Packager
from rdkit import Chem
import random
def feq(a,b,tol=1e-4):
return abs(a-b)<=tol
class TestCase(unittest.TestCase):
def setUp(self):
self.dataDir =os.path.join(RDConfig.RDCodeDir,'ML/ModelPackage/test_data')
self.testD = [
# NOTE: the confidences here can be twitchy due to changes in descriptors:
('Fc1ccc(NC(=O)c2cccnc2Oc3cccc(c3)C(F)(F)F)c(F)c1',0,0.8 ),
#(r'CN/1(=C\C=C(/C=C1)\C\2=C\C=N(C)(Cl)\C=C2)Cl',0,0.70),
(r'NS(=O)(=O)c1cc(ccc1Cl)C2(O)NC(=O)c3ccccc32',1,0.70),
]
def _verify(self,pkg,testD):
for smi,pred,conf in testD:
try:
m = Chem.MolFromSmiles(smi)
except:
sys.stderr.write('SMILES: %s failed\n'%(smi))
else:
p,c = pkg.Classify(m)
assert p==pred,'bad prediction (%d) for smiles %s'%(p,smi)
assert feq(c,conf),'bad confidence (%f) for smiles %s'%(c,smi)
def _verify2(self,pkg,testD):
for smi,pred,conf in testD:
try:
m = Chem.MolFromSmiles(smi)
except:
sys.stderr.write('SMILES: %s failed\n'%(smi))
else:
p,c = pkg.Classify(m)
assert p==pred,'bad prediction (%d) for smiles %s'%(p,smi)
assert feq(c,conf),'bad confidence (%f) for smiles %s'%(c,smi)
p,c = pkg.Classify(m)
assert p==pred,'bad prediction (%d) for smiles %s'%(p,smi)
assert feq(c,conf),'bad confidence (%f) for smiles %s'%(c,smi)
def testBuild(self):
""" tests building and screening a packager """
with open(os.path.join(self.dataDir,'Jan9_build3_calc.dsc'),'r') as calcTF:
buf = calcTF.read().replace('\r\n', '\n').encode('utf-8')
calcTF.close()
with io.BytesIO(buf) as calcF:
calc = cPickle.load(calcF)
with open(os.path.join(self.dataDir,'Jan9_build3_model.pkl'),'rb') as modelF:
model = cPickle.load(modelF)
pkg = Packager.ModelPackage(descCalc=calc,model=model)
self._verify(pkg,self.testD)
def testLoad(self):
""" tests loading and screening a packager """
with open(os.path.join(self.dataDir,'Jan9_build3_pkg.pkl'),'r') as pkgTF:
buf = pkgTF.read().replace('\r\n', '\n').encode('utf-8')
pkgTF.close()
with io.BytesIO(buf) as pkgF:
pkg = cPickle.load(pkgF)
self._verify(pkg,self.testD)
def testLoad2(self):
""" tests loading and screening a packager 2 """
with open(os.path.join(self.dataDir,'Jan9_build3_pkg.pkl'),'r') as pkgTF:
buf = pkgTF.read().replace('\r\n', '\n').encode('utf-8')
pkgTF.close()
with io.BytesIO(buf) as pkgF:
pkg = cPickle.load(pkgF)
self._verify2(pkg,self.testD)
def testPerm1(self):
""" tests the descriptor remapping stuff in a packager """
from rdkit.Chem import Descriptors
with open(os.path.join(self.dataDir,'Jan9_build3_pkg.pkl'),'r') as pkgTF:
buf = pkgTF.read().replace('\r\n', '\n').encode('utf-8')
pkgTF.close()
with io.BytesIO(buf) as pkgF:
pkg = cPickle.load(pkgF)
calc = pkg.GetCalculator()
names = calc.GetDescriptorNames()
ref = {}
DataUtils.InitRandomNumbers((23,42))
for smi,pred,conf in self.testD:
for desc in names:
fn = getattr(Descriptors,desc,lambda x:777)
m = Chem.MolFromSmiles(smi)
ref[desc] = fn(m)
for i in range(5):
perm = list(names)
random.shuffle(perm,random=random.random)
m = Chem.MolFromSmiles(smi)
for desc in perm:
fn = getattr(Descriptors,desc,lambda x:777)
val = fn(m)
assert feq(val,ref[desc],1e-4),'%s: %s(%s): %f!=%f'%(str(perm),
smi,
desc,
val,
ref[desc])
def testPerm2(self):
""" tests the descriptor remapping stuff in a packager """
with open(os.path.join(self.dataDir,'Jan9_build3_pkg.pkl'),'r') as pkgTF:
buf = pkgTF.read().replace('\r\n', '\n').encode('utf-8')
pkgTF.close()
with io.BytesIO(buf) as pkgF:
pkg = cPickle.load(pkgF)
calc = pkg.GetCalculator()
names = calc.GetDescriptorNames()
DataUtils.InitRandomNumbers((23,42))
perm = list(names)
random.shuffle(perm,random=random.random)
calc.simpleList = perm
calc.descriptorNames = perm
pkg.Init()
self._verify(pkg,self.testD)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "a862268180ef3e9f03e5a1801a2dba3e",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 81,
"avg_line_length": 35.62406015037594,
"alnum_prop": 0.5844238075137189,
"repo_name": "strets123/rdkit",
"id": "b4dce747630637d9b7597360f47705409dc59925",
"size": "4877",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rdkit/ML/ModelPackage/UnitTestPackage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "385"
},
{
"name": "C",
"bytes": "203078"
},
{
"name": "C#",
"bytes": "6745"
},
{
"name": "C++",
"bytes": "7068170"
},
{
"name": "CMake",
"bytes": "584702"
},
{
"name": "CSS",
"bytes": "4742"
},
{
"name": "FORTRAN",
"bytes": "7661"
},
{
"name": "HTML",
"bytes": "65468"
},
{
"name": "Java",
"bytes": "248620"
},
{
"name": "JavaScript",
"bytes": "11595"
},
{
"name": "LLVM",
"bytes": "27271"
},
{
"name": "Lex",
"bytes": "4508"
},
{
"name": "Makefile",
"bytes": "15431"
},
{
"name": "Objective-C",
"bytes": "299"
},
{
"name": "Python",
"bytes": "3033212"
},
{
"name": "QMake",
"bytes": "389"
},
{
"name": "SMT",
"bytes": "3010"
},
{
"name": "Shell",
"bytes": "8899"
},
{
"name": "Smarty",
"bytes": "5864"
},
{
"name": "Yacc",
"bytes": "49170"
}
],
"symlink_target": ""
} |
import mock
from neutron.api import extensions as neutron_extensions
from neutron.api.v2 import attributes
from neutron import context
import neutron.db.api as db
from neutron.extensions import portbindings
from neutron import manager
from neutron.plugins.cisco.common import cisco_constants as c_const
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
from neutron.plugins.cisco.db import n1kv_db_v2
from neutron.plugins.cisco.db import n1kv_models_v2
from neutron.plugins.cisco.db import network_db_v2 as cdb
from neutron.plugins.cisco import extensions
from neutron.plugins.cisco.extensions import n1kv
from neutron.plugins.cisco.extensions import network_profile
from neutron.plugins.cisco.n1kv import n1kv_client
from neutron.plugins.cisco.n1kv import n1kv_neutron_plugin
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit.cisco.n1kv import fake_client
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_db_plugin as test_plugin
from neutron.tests.unit import test_l3_plugin
from neutron.tests.unit import test_l3_schedulers
PHYS_NET = 'some-phys-net'
VLAN_MIN = 100
VLAN_MAX = 110
class FakeResponse(object):
"""
This object is returned by mocked requests lib instead of normal response.
Initialize it with the status code, header and buffer contents you wish to
return.
"""
def __init__(self, status, response_text, headers):
self.buffer = response_text
self.status_code = status
self.headers = headers
def json(self, *args, **kwargs):
return self.buffer
def _fake_setup_vsm(self):
"""Fake establish Communication with Cisco Nexus1000V VSM."""
self.agent_vsm = True
self._populate_policy_profiles()
class NetworkProfileTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
network_profile.RESOURCE_ATTRIBUTE_MAP)
return network_profile.Network_profile.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class N1kvPluginTestCase(test_plugin.NeutronDbPluginV2TestCase):
_plugin_name = ('neutron.plugins.cisco.n1kv.'
'n1kv_neutron_plugin.N1kvNeutronPluginV2')
tenant_id = "some_tenant"
DEFAULT_RESP_BODY = ""
DEFAULT_RESP_CODE = 200
DEFAULT_CONTENT_TYPE = ""
fmt = "json"
def _make_test_policy_profile(self, name='service_profile'):
"""
Create a policy profile record for testing purpose.
:param name: string representing the name of the policy profile to
create. Default argument value chosen to correspond to the
default name specified in config.py file.
"""
uuid = test_api_v2._uuid()
profile = {'id': uuid,
'name': name}
return n1kv_db_v2.create_policy_profile(profile)
def _make_test_profile(self,
name='default_network_profile',
segment_type=c_const.NETWORK_TYPE_VLAN,
segment_range='386-400'):
"""
Create a profile record for testing purposes.
:param name: string representing the name of the network profile to
create. Default argument value chosen to correspond to the
default name specified in config.py file.
:param segment_type: string representing the type of network segment.
:param segment_range: string representing the segment range for network
profile.
"""
db_session = db.get_session()
profile = {'name': name,
'segment_type': segment_type,
'tenant_id': self.tenant_id,
'segment_range': segment_range}
if segment_type == c_const.NETWORK_TYPE_OVERLAY:
profile['sub_type'] = 'unicast'
profile['multicast_ip_range'] = '0.0.0.0'
net_p = n1kv_db_v2.create_network_profile(db_session, profile)
n1kv_db_v2.sync_vxlan_allocations(db_session, net_p)
elif segment_type == c_const.NETWORK_TYPE_VLAN:
profile['physical_network'] = PHYS_NET
net_p = n1kv_db_v2.create_network_profile(db_session, profile)
n1kv_db_v2.sync_vlan_allocations(db_session, net_p)
return net_p
def setUp(self):
"""
Setup method for n1kv plugin tests.
First step is to define an acceptable response from the VSM to
our requests. This needs to be done BEFORE the setUp() function
of the super-class is called.
This default here works for many cases. If you need something
extra, please define your own setUp() function in your test class,
and set your DEFAULT_RESPONSE value also BEFORE calling the
setUp() of the super-function (this one here). If you have set
a value already, it will not be overwritten by this code.
"""
if not self.DEFAULT_RESP_BODY:
self.DEFAULT_RESP_BODY = {
"icehouse-pp": {"properties": {"name": "icehouse-pp",
"id": "some-uuid-1"}},
"havana_pp": {"properties": {"name": "havana_pp",
"id": "some-uuid-2"}},
"dhcp_pp": {"properties": {"name": "dhcp_pp",
"id": "some-uuid-3"}},
}
# Creating a mock HTTP connection object for requests lib. The N1KV
# client interacts with the VSM via HTTP. Since we don't have a VSM
# running in the unit tests, we need to 'fake' it by patching the HTTP
# library itself. We install a patch for a fake HTTP connection class.
# Using __name__ to avoid having to enter the full module path.
http_patcher = mock.patch(n1kv_client.requests.__name__ + ".request")
FakeHttpConnection = http_patcher.start()
# Now define the return values for a few functions that may be called
# on any instance of the fake HTTP connection class.
self.resp_headers = {"content-type": "application/json"}
FakeHttpConnection.return_value = (FakeResponse(
self.DEFAULT_RESP_CODE,
self.DEFAULT_RESP_BODY,
self.resp_headers))
# Patch some internal functions in a few other parts of the system.
# These help us move along, without having to mock up even more systems
# in the background.
# Return a dummy VSM IP address
mock.patch(n1kv_client.__name__ + ".Client._get_vsm_hosts",
new=lambda self: "127.0.0.1").start()
# Return dummy user profiles
mock.patch(cdb.__name__ + ".get_credential_name",
new=lambda self: {"user_name": "admin",
"password": "admin_password"}).start()
n1kv_neutron_plugin.N1kvNeutronPluginV2._setup_vsm = _fake_setup_vsm
neutron_extensions.append_api_extensions_path(extensions.__path__)
ext_mgr = NetworkProfileTestExtensionManager()
# Save the original RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.items():
self.saved_attr_map[resource] = attrs.copy()
# Update the RESOURCE_ATTRIBUTE_MAP with n1kv specific extended attrs.
attributes.RESOURCE_ATTRIBUTE_MAP["networks"].update(
n1kv.EXTENDED_ATTRIBUTES_2_0["networks"])
attributes.RESOURCE_ATTRIBUTE_MAP["ports"].update(
n1kv.EXTENDED_ATTRIBUTES_2_0["ports"])
self.addCleanup(self.restore_resource_attribute_map)
self.addCleanup(db.clear_db)
super(N1kvPluginTestCase, self).setUp(self._plugin_name,
ext_mgr=ext_mgr)
# Create some of the database entries that we require.
self._make_test_profile()
self._make_test_policy_profile()
def restore_resource_attribute_map(self):
# Restore the original RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def test_plugin(self):
self._make_network('json',
'some_net',
True,
tenant_id=self.tenant_id,
set_context=True)
req = self.new_list_request('networks', params="fields=tenant_id")
req.environ['neutron.context'] = context.Context('', self.tenant_id)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
body = self.deserialize('json', res)
self.assertIn('tenant_id', body['networks'][0])
class TestN1kvNetworkProfiles(N1kvPluginTestCase):
def _prepare_net_profile_data(self,
segment_type,
sub_type=None,
segment_range=None,
mcast_ip_range=None):
netp = {'name': 'netp1',
'segment_type': segment_type,
'tenant_id': self.tenant_id}
if segment_type == c_const.NETWORK_TYPE_VLAN:
netp['segment_range'] = segment_range or '100-110'
netp['physical_network'] = PHYS_NET
elif segment_type == c_const.NETWORK_TYPE_OVERLAY:
netp['segment_range'] = segment_range or '10000-10010'
netp['sub_type'] = sub_type or 'enhanced'
netp['multicast_ip_range'] = (mcast_ip_range or
"224.1.1.1-224.1.1.10")
elif segment_type == c_const.NETWORK_TYPE_TRUNK:
netp['sub_type'] = c_const.NETWORK_TYPE_VLAN
data = {"network_profile": netp}
return data
def test_create_network_profile_vlan(self):
data = self._prepare_net_profile_data(c_const.NETWORK_TYPE_VLAN)
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
def test_create_network_profile_overlay(self):
data = self._prepare_net_profile_data(c_const.NETWORK_TYPE_OVERLAY)
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
def test_create_network_profile_trunk(self):
data = self._prepare_net_profile_data(c_const.NETWORK_TYPE_TRUNK)
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
def test_create_network_profile_trunk_missing_subtype(self):
data = self._prepare_net_profile_data(c_const.NETWORK_TYPE_TRUNK)
data['network_profile'].pop('sub_type')
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_create_network_profile_overlay_unreasonable_seg_range(self):
data = self._prepare_net_profile_data(c_const.NETWORK_TYPE_OVERLAY,
segment_range='10000-1000000001')
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_update_network_profile_plugin(self):
net_p_dict = (self.
_prepare_net_profile_data(c_const.NETWORK_TYPE_OVERLAY))
net_p_req = self.new_create_request('network_profiles', net_p_dict)
net_p = self.deserialize(self.fmt,
net_p_req.get_response(self.ext_api))
data = {'network_profile': {'name': 'netp2'}}
update_req = self.new_update_request('network_profiles',
data,
net_p['network_profile']['id'])
update_res = update_req.get_response(self.ext_api)
self.assertEqual(update_res.status_int, 200)
def test_update_network_profile_physical_network_fail(self):
net_p = self._make_test_profile(name='netp1')
data = {'network_profile': {'physical_network': PHYS_NET}}
net_p_req = self.new_update_request('network_profiles',
data,
net_p['id'])
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_update_network_profile_segment_type_fail(self):
net_p = self._make_test_profile(name='netp1')
data = {'network_profile': {
'segment_type': c_const.NETWORK_TYPE_OVERLAY}}
net_p_req = self.new_update_request('network_profiles',
data,
net_p['id'])
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_update_network_profile_sub_type_fail(self):
net_p_dict = (self.
_prepare_net_profile_data(c_const.NETWORK_TYPE_OVERLAY))
net_p_req = self.new_create_request('network_profiles', net_p_dict)
net_p = self.deserialize(self.fmt,
net_p_req.get_response(self.ext_api))
data = {'network_profile': {'sub_type': c_const.NETWORK_TYPE_VLAN}}
update_req = self.new_update_request('network_profiles',
data,
net_p['network_profile']['id'])
update_res = update_req.get_response(self.ext_api)
self.assertEqual(update_res.status_int, 400)
def test_update_network_profiles_with_networks_fail(self):
net_p = self._make_test_profile(name='netp1')
data = {'network_profile': {'segment_range': '200-210'}}
update_req = self.new_update_request('network_profiles',
data,
net_p['id'])
update_res = update_req.get_response(self.ext_api)
self.assertEqual(update_res.status_int, 200)
net_data = {'network': {'name': 'net1',
n1kv.PROFILE_ID: net_p['id'],
'tenant_id': 'some_tenant'}}
network_req = self.new_create_request('networks', net_data)
network_res = network_req.get_response(self.api)
self.assertEqual(network_res.status_int, 201)
data = {'network_profile': {'segment_range': '300-310'}}
update_req = self.new_update_request('network_profiles',
data,
net_p['id'])
update_res = update_req.get_response(self.ext_api)
self.assertEqual(update_res.status_int, 409)
def test_create_overlay_network_profile_invalid_multicast_fail(self):
data = self._prepare_net_profile_data(c_const.NETWORK_TYPE_OVERLAY,
sub_type=(c_const.
NETWORK_SUBTYPE_NATIVE_VXLAN),
mcast_ip_range='1.1.1.1')
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_create_overlay_network_profile_no_multicast_fail(self):
data = self._prepare_net_profile_data(c_const.NETWORK_TYPE_OVERLAY,
sub_type=(c_const.
NETWORK_SUBTYPE_NATIVE_VXLAN))
data['network_profile']['multicast_ip_range'] = ''
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_create_overlay_network_profile_wrong_split_multicast_fail(self):
data = self._prepare_net_profile_data(c_const.NETWORK_TYPE_OVERLAY,
sub_type=(c_const.
NETWORK_SUBTYPE_NATIVE_VXLAN),
mcast_ip_range=
'224.1.1.1.224.1.1.3')
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_create_overlay_network_profile_invalid_minip_multicast_fail(self):
data = self._prepare_net_profile_data(c_const.NETWORK_TYPE_OVERLAY,
sub_type=(c_const.
NETWORK_SUBTYPE_NATIVE_VXLAN),
mcast_ip_range=
'10.0.0.1-224.1.1.3')
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_create_overlay_network_profile_invalid_maxip_multicast_fail(self):
data = self._prepare_net_profile_data(c_const.NETWORK_TYPE_OVERLAY,
sub_type=(c_const.
NETWORK_SUBTYPE_NATIVE_VXLAN),
mcast_ip_range=
'224.1.1.1-20.0.0.1')
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_create_overlay_network_profile_correct_multicast_pass(self):
data = self._prepare_net_profile_data(c_const.NETWORK_TYPE_OVERLAY)
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
def test_update_overlay_network_profile_correct_multicast_pass(self):
data = self._prepare_net_profile_data(c_const.NETWORK_TYPE_OVERLAY)
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
net_p = self.deserialize(self.fmt, res)
data = {'network_profile': {'multicast_ip_range':
'224.0.1.0-224.0.1.100'}}
update_req = self.new_update_request('network_profiles',
data,
net_p['network_profile']['id'])
update_res = update_req.get_response(self.ext_api)
self.assertEqual(update_res.status_int, 200)
def test_create_overlay_network_profile_reservedip_multicast_fail(self):
data = self._prepare_net_profile_data(c_const.NETWORK_TYPE_OVERLAY,
sub_type=(c_const.
NETWORK_SUBTYPE_NATIVE_VXLAN),
mcast_ip_range=
'224.0.0.100-224.0.1.100')
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 400)
def test_update_overlay_network_profile_reservedip_multicast_fail(self):
data = self._prepare_net_profile_data(c_const.NETWORK_TYPE_OVERLAY)
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
net_p = self.deserialize(self.fmt, res)
data = {'network_profile': {'multicast_ip_range':
'224.0.0.11-224.0.0.111'}}
update_req = self.new_update_request('network_profiles',
data,
net_p['network_profile']['id'])
update_res = update_req.get_response(self.ext_api)
self.assertEqual(update_res.status_int, 400)
def test_update_vlan_network_profile_multicast_fail(self):
net_p = self._make_test_profile(name='netp1')
data = {'network_profile': {'multicast_ip_range':
'224.0.1.0-224.0.1.100'}}
update_req = self.new_update_request('network_profiles',
data,
net_p['id'])
update_res = update_req.get_response(self.ext_api)
self.assertEqual(update_res.status_int, 400)
def test_update_trunk_network_profile_segment_range_fail(self):
data = self._prepare_net_profile_data(c_const.NETWORK_TYPE_TRUNK)
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
net_p = self.deserialize(self.fmt, res)
data = {'network_profile': {'segment_range':
'100-200'}}
update_req = self.new_update_request('network_profiles',
data,
net_p['network_profile']['id'])
update_res = update_req.get_response(self.ext_api)
self.assertEqual(update_res.status_int, 400)
def test_update_trunk_network_profile_multicast_fail(self):
data = self._prepare_net_profile_data(c_const.NETWORK_TYPE_TRUNK)
net_p_req = self.new_create_request('network_profiles', data)
res = net_p_req.get_response(self.ext_api)
self.assertEqual(res.status_int, 201)
net_p = self.deserialize(self.fmt, res)
data = {'network_profile': {'multicast_ip_range':
'224.0.1.0-224.0.1.100'}}
update_req = self.new_update_request('network_profiles',
data,
net_p['network_profile']['id'])
update_res = update_req.get_response(self.ext_api)
self.assertEqual(update_res.status_int, 400)
def test_create_network_profile_populate_vlan_segment_pool(self):
db_session = db.get_session()
net_p_dict = self._prepare_net_profile_data(c_const.NETWORK_TYPE_VLAN)
net_p_req = self.new_create_request('network_profiles', net_p_dict)
self.deserialize(self.fmt,
net_p_req.get_response(self.ext_api))
for vlan in range(VLAN_MIN, VLAN_MAX + 1):
self.assertIsNotNone(n1kv_db_v2.get_vlan_allocation(db_session,
PHYS_NET,
vlan))
self.assertFalse(n1kv_db_v2.get_vlan_allocation(db_session,
PHYS_NET,
vlan).allocated)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
db_session,
PHYS_NET,
VLAN_MIN - 1)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
db_session,
PHYS_NET,
VLAN_MAX + 1)
def test_delete_network_profile_with_network_fail(self):
net_p = self._make_test_profile(name='netp1')
net_data = {'network': {'name': 'net1',
n1kv.PROFILE_ID: net_p['id'],
'tenant_id': 'some_tenant'}}
network_req = self.new_create_request('networks', net_data)
network_res = network_req.get_response(self.api)
self.assertEqual(network_res.status_int, 201)
self._delete('network_profiles', net_p['id'],
expected_code=409)
def test_delete_network_profile_deallocate_vlan_segment_pool(self):
db_session = db.get_session()
net_p_dict = self._prepare_net_profile_data(c_const.NETWORK_TYPE_VLAN)
net_p_req = self.new_create_request('network_profiles', net_p_dict)
net_p = self.deserialize(self.fmt,
net_p_req.get_response(self.ext_api))
self.assertIsNotNone(n1kv_db_v2.get_vlan_allocation(db_session,
PHYS_NET,
VLAN_MIN))
self._delete('network_profiles', net_p['network_profile']['id'])
for vlan in range(VLAN_MIN, VLAN_MAX + 1):
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
db_session,
PHYS_NET,
vlan)
def test_create_network_profile_rollback_profile_binding(self):
"""Test rollback of profile binding if network profile create fails."""
db_session = db.get_session()
client_patch = mock.patch(n1kv_client.__name__ + ".Client",
new=fake_client.TestClientInvalidResponse)
client_patch.start()
net_p_dict = self._prepare_net_profile_data(c_const.NETWORK_TYPE_VLAN)
self.new_create_request('network_profiles', net_p_dict)
bindings = (db_session.query(n1kv_models_v2.ProfileBinding).filter_by(
profile_type="network"))
self.assertEqual(bindings.count(), 0)
class TestN1kvBasicGet(test_plugin.TestBasicGet,
N1kvPluginTestCase):
pass
class TestN1kvHTTPResponse(test_plugin.TestV2HTTPResponse,
N1kvPluginTestCase):
pass
class TestN1kvPorts(test_plugin.TestPortsV2,
N1kvPluginTestCase,
test_bindings.PortBindingsTestCase):
VIF_TYPE = portbindings.VIF_TYPE_OVS
HAS_PORT_FILTER = False
def test_create_port_with_default_n1kv_policy_profile_id(self):
"""Test port create without passing policy profile id."""
with self.port() as port:
db_session = db.get_session()
pp = n1kv_db_v2.get_policy_profile(
db_session, port['port'][n1kv.PROFILE_ID])
self.assertEqual(pp['name'], 'service_profile')
def test_create_port_with_n1kv_policy_profile_id(self):
"""Test port create with policy profile id."""
profile_obj = self._make_test_policy_profile(name='test_profile')
with self.network() as network:
data = {'port': {n1kv.PROFILE_ID: profile_obj.id,
'tenant_id': self.tenant_id,
'network_id': network['network']['id']}}
port_req = self.new_create_request('ports', data)
port = self.deserialize(self.fmt,
port_req.get_response(self.api))
self.assertEqual(port['port'][n1kv.PROFILE_ID],
profile_obj.id)
self._delete('ports', port['port']['id'])
def test_update_port_with_n1kv_policy_profile_id(self):
"""Test port update failure while updating policy profile id."""
with self.port() as port:
data = {'port': {n1kv.PROFILE_ID: 'some-profile-uuid'}}
port_req = self.new_update_request('ports',
data,
port['port']['id'])
res = port_req.get_response(self.api)
# Port update should fail to update policy profile id.
self.assertEqual(res.status_int, 400)
def test_create_first_port_invalid_parameters_fail(self):
"""Test parameters for first port create sent to the VSM."""
profile_obj = self._make_test_policy_profile(name='test_profile')
with self.network() as network:
client_patch = mock.patch(n1kv_client.__name__ + ".Client",
new=fake_client.TestClientInvalidRequest)
client_patch.start()
data = {'port': {n1kv.PROFILE_ID: profile_obj.id,
'tenant_id': self.tenant_id,
'network_id': network['network']['id'],
}}
port_req = self.new_create_request('ports', data)
res = port_req.get_response(self.api)
self.assertEqual(res.status_int, 500)
client_patch.stop()
def test_create_next_port_invalid_parameters_fail(self):
"""Test parameters for subsequent port create sent to the VSM."""
with self.port() as port:
client_patch = mock.patch(n1kv_client.__name__ + ".Client",
new=fake_client.TestClientInvalidRequest)
client_patch.start()
data = {'port': {n1kv.PROFILE_ID: port['port']['n1kv:profile_id'],
'tenant_id': port['port']['tenant_id'],
'network_id': port['port']['network_id']}}
port_req = self.new_create_request('ports', data)
res = port_req.get_response(self.api)
self.assertEqual(res.status_int, 500)
client_patch.stop()
def test_create_first_port_rollback_vmnetwork(self):
"""Test whether VMNetwork is cleaned up if port create fails on VSM."""
db_session = db.get_session()
profile_obj = self._make_test_policy_profile(name='test_profile')
with self.network() as network:
client_patch = mock.patch(n1kv_client.__name__ + ".Client",
new=fake_client.
TestClientInvalidResponse)
client_patch.start()
data = {'port': {n1kv.PROFILE_ID: profile_obj.id,
'tenant_id': self.tenant_id,
'network_id': network['network']['id'],
}}
self.new_create_request('ports', data)
self.assertRaises(c_exc.VMNetworkNotFound,
n1kv_db_v2.get_vm_network,
db_session,
profile_obj.id,
network['network']['id'])
# Explicit stop of failure response mock from controller required
# for network object clean up to succeed.
client_patch.stop()
def test_create_next_port_rollback_vmnetwork_count(self):
"""Test whether VMNetwork count if port create fails on VSM."""
db_session = db.get_session()
with self.port() as port:
pt = port['port']
old_vmn = n1kv_db_v2.get_vm_network(db_session,
pt['n1kv:profile_id'],
pt['network_id'])
client_patch = mock.patch(n1kv_client.__name__ + ".Client",
new=fake_client.
TestClientInvalidResponse)
client_patch.start()
data = {'port': {n1kv.PROFILE_ID: pt['n1kv:profile_id'],
'tenant_id': pt['tenant_id'],
'network_id': pt['network_id']}}
self.new_create_request('ports', data)
new_vmn = n1kv_db_v2.get_vm_network(db_session,
pt['n1kv:profile_id'],
pt['network_id'])
self.assertEqual(old_vmn.port_count, new_vmn.port_count)
# Explicit stop of failure response mock from controller required
# for network object clean up to succeed.
client_patch.stop()
class TestN1kvPolicyProfiles(N1kvPluginTestCase):
def test_populate_policy_profile(self):
client_patch = mock.patch(n1kv_client.__name__ + ".Client",
new=fake_client.TestClient)
client_patch.start()
instance = n1kv_neutron_plugin.N1kvNeutronPluginV2()
instance._populate_policy_profiles()
db_session = db.get_session()
profile = n1kv_db_v2.get_policy_profile(
db_session, '00000000-0000-0000-0000-000000000001')
self.assertEqual('pp-1', profile['name'])
client_patch.stop()
def test_populate_policy_profile_delete(self):
# Patch the Client class with the TestClient class
with mock.patch(n1kv_client.__name__ + ".Client",
new=fake_client.TestClient):
# Patch the _get_total_profiles() method to return a custom value
with mock.patch(fake_client.__name__ +
'.TestClient._get_total_profiles') as obj_inst:
# Return 3 policy profiles
obj_inst.return_value = 3
plugin = manager.NeutronManager.get_plugin()
plugin._populate_policy_profiles()
db_session = db.get_session()
profile = n1kv_db_v2.get_policy_profile(
db_session, '00000000-0000-0000-0000-000000000001')
# Verify that DB contains only 3 policy profiles
self.assertEqual('pp-1', profile['name'])
profile = n1kv_db_v2.get_policy_profile(
db_session, '00000000-0000-0000-0000-000000000002')
self.assertEqual('pp-2', profile['name'])
profile = n1kv_db_v2.get_policy_profile(
db_session, '00000000-0000-0000-0000-000000000003')
self.assertEqual('pp-3', profile['name'])
self.assertRaises(c_exc.PolicyProfileIdNotFound,
n1kv_db_v2.get_policy_profile,
db_session,
'00000000-0000-0000-0000-000000000004')
# Return 2 policy profiles
obj_inst.return_value = 2
plugin._populate_policy_profiles()
# Verify that the third policy profile is deleted
self.assertRaises(c_exc.PolicyProfileIdNotFound,
n1kv_db_v2.get_policy_profile,
db_session,
'00000000-0000-0000-0000-000000000003')
class TestN1kvNetworks(test_plugin.TestNetworksV2,
N1kvPluginTestCase):
def _prepare_net_data(self, net_profile_id):
return {'network': {'name': 'net1',
n1kv.PROFILE_ID: net_profile_id,
'tenant_id': self.tenant_id}}
def test_create_network_with_default_n1kv_network_profile_id(self):
"""Test network create without passing network profile id."""
with self.network() as network:
db_session = db.get_session()
np = n1kv_db_v2.get_network_profile(
db_session, network['network'][n1kv.PROFILE_ID])
self.assertEqual(np['name'], 'default_network_profile')
def test_create_network_with_n1kv_network_profile_id(self):
"""Test network create with network profile id."""
profile_obj = self._make_test_profile(name='test_profile')
data = self._prepare_net_data(profile_obj.id)
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual(network['network'][n1kv.PROFILE_ID],
profile_obj.id)
def test_update_network_with_n1kv_network_profile_id(self):
"""Test network update failure while updating network profile id."""
with self.network() as network:
data = {'network': {n1kv.PROFILE_ID: 'some-profile-uuid'}}
network_req = self.new_update_request('networks',
data,
network['network']['id'])
res = network_req.get_response(self.api)
# Network update should fail to update network profile id.
self.assertEqual(res.status_int, 400)
def test_create_network_rollback_deallocate_vlan_segment(self):
"""Test vlan segment deallocation on network create failure."""
profile_obj = self._make_test_profile(name='test_profile',
segment_range='20-23')
data = self._prepare_net_data(profile_obj.id)
client_patch = mock.patch(n1kv_client.__name__ + ".Client",
new=fake_client.TestClientInvalidResponse)
client_patch.start()
self.new_create_request('networks', data)
db_session = db.get_session()
self.assertFalse(n1kv_db_v2.get_vlan_allocation(db_session,
PHYS_NET,
20).allocated)
def test_create_network_rollback_deallocate_overlay_segment(self):
"""Test overlay segment deallocation on network create failure."""
profile_obj = self._make_test_profile('test_np',
c_const.NETWORK_TYPE_OVERLAY,
'10000-10001')
data = self._prepare_net_data(profile_obj.id)
client_patch = mock.patch(n1kv_client.__name__ + ".Client",
new=fake_client.TestClientInvalidResponse)
client_patch.start()
self.new_create_request('networks', data)
db_session = db.get_session()
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(db_session,
10000).allocated)
class TestN1kvSubnets(test_plugin.TestSubnetsV2,
N1kvPluginTestCase):
def setUp(self):
super(TestN1kvSubnets, self).setUp()
def test_create_subnet_with_invalid_parameters(self):
"""Test subnet creation with invalid parameters sent to the VSM"""
with self.network() as network:
client_patch = mock.patch(n1kv_client.__name__ + ".Client",
new=fake_client.TestClientInvalidRequest)
client_patch.start()
data = {'subnet': {'network_id': network['network']['id'],
'cidr': "10.0.0.0/24"}}
subnet_req = self.new_create_request('subnets', data)
subnet_resp = subnet_req.get_response(self.api)
# Subnet creation should fail due to invalid network name
self.assertEqual(subnet_resp.status_int, 400)
class TestN1kvL3Test(test_l3_plugin.L3NatExtensionTestCase):
pass
class TestN1kvL3SchedulersTest(test_l3_schedulers.L3SchedulerTestCase):
pass
| {
"content_hash": "4c6d21cedf6735c75d620161bc02b7cb",
"timestamp": "",
"source": "github",
"line_count": 808,
"max_line_length": 79,
"avg_line_length": 49.14108910891089,
"alnum_prop": 0.5516546617639652,
"repo_name": "virtualopensystems/neutron",
"id": "0d71bf4ab99e6493507132d9e815e9efb76440d5",
"size": "40462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/cisco/n1kv/test_n1kv_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60527"
},
{
"name": "Python",
"bytes": "9873662"
},
{
"name": "Shell",
"bytes": "9202"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
import argparse
import os
from scheduled_bots.utils import make_deletion_templates, create_rfd
from wikidataintegrator import wdi_core, wdi_helpers
try:
from scheduled_bots.local import WDUSER, WDPASS
except ImportError:
if "WDUSER" in os.environ and "WDPASS" in os.environ:
WDUSER = os.environ['WDUSER']
WDPASS = os.environ['WDPASS']
else:
raise ValueError("WDUSER and WDPASS must be specified in local.py or as environment variables")
def get_deprecated_items(releases):
# releases is a list of qids who when used as "stated in" we should delete the item
# this is for the old refs. Should already be deleted
'''
query = """SELECT ?item ?itemLabel ?ipr WHERE {
?item p:P2926 ?s .
?s ps:P2926 ?ipr .
?s prov:wasDerivedFrom ?ref .
?ref pr:P348 "58.0" .
#SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
}"""
bindings = wdi_core.WDItemEngine.execute_sparql_query(query)['results']['bindings']
qids = {x['item']['value'].rsplit("/")[-1] for x in bindings}
'''
query = """
SELECT ?item ?itemLabel ?iprurl WHERE {
?item p:P2926 ?s .
?s ps:P2926 ?ipr .
?s prov:wasDerivedFrom ?ref .
?ref pr:P248 ?release .
values ?release **releases_str** .
BIND(IRI(REPLACE(?ipr, '^(.+)$', ?formatterurl)) AS ?iprurl).
wd:P2926 wdt:P1630 ?formatterurl .
}"""
releases_str = '{' + " ".join(['wd:' + x for x in releases]) + '}'
query = query.replace("**releases_str**", releases_str)
print(query)
bindings = wdi_core.WDItemEngine.execute_sparql_query(query)['results']['bindings']
qids2 = {x['item']['value'].rsplit("/")[-1] for x in bindings}
items = qids2
return items
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("current_release", help="The current release. (e.g.: '64.0')")
parser.add_argument('--title', help='deletion request title', type=str, default="Delete deprecated Interpro Items")
parser.add_argument('--reason', help='deletion request reason', type=str,
default="These items are deprecated")
parser.add_argument('--force', help='force run if deleting a large number of genes', action='store_true')
parser.add_argument('--dummy', help='dont actually create the deletion request', action='store_true')
args = parser.parse_args()
current_release = args.current_release
release_qid = wdi_helpers.id_mapper('P393', (('P629', "Q3047275"),)) # interpro releases
to_remove = {v for k, v in release_qid.items() if k != current_release}
print(to_remove)
qids = get_deprecated_items(to_remove)
print("|".join(qids))
print(len(qids))
if len(qids) > 200 and not args.force:
raise ValueError(
"Trying to delete {} items. If you really want to do this, re run with --force".format(len(qids)))
if len(qids) > 0:
s = make_deletion_templates(qids, args.title, args.reason)
if not args.dummy:
create_rfd(s, WDUSER, WDPASS)
log_path = "deletion_log.txt"
with open(log_path, 'w') as f:
f.write("\n".join(qids))
| {
"content_hash": "f08707a635b3f7451d8d4e25fbc46853",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 119,
"avg_line_length": 38.71084337349398,
"alnum_prop": 0.6308745720510427,
"repo_name": "SuLab/scheduled-bots",
"id": "f2c0ae848262c8778c9d6e227c6658ad132d6865",
"size": "3213",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scheduled_bots/interpro/DeleteBot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1296"
},
{
"name": "Jupyter Notebook",
"bytes": "1049300"
},
{
"name": "Python",
"bytes": "709603"
},
{
"name": "Shell",
"bytes": "5313"
}
],
"symlink_target": ""
} |
'''
Clipboard Gtk3: an implementation of the Clipboard using Gtk3.
'''
__all__ = ('ClipboardGtk3',)
from kivy.utils import platform
from kivy.support import install_gobject_iteration
from kivy.core.clipboard import ClipboardBase
if platform != 'linux':
raise SystemError('unsupported platform for gtk3 clipboard')
from gi.repository import Gtk, Gdk
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
class ClipboardGtk3(ClipboardBase):
_is_init = False
def init(self):
if self._is_init:
return
install_gobject_iteration()
self._is_init = True
def get(self, mimetype='text/plain;charset=utf-8'):
self.init()
if mimetype == 'text/plain;charset=utf-8':
contents = clipboard.wait_for_text()
if contents:
return contents
return ''
def put(self, data, mimetype='text/plain;charset=utf-8'):
self.init()
if mimetype == 'text/plain;charset=utf-8':
text = data.decode(self._encoding)
clipboard.set_text(text, -1)
clipboard.store()
def get_types(self):
self.init()
return ['text/plain;charset=utf-8']
| {
"content_hash": "4107dbb2447fb6e25252963072b0c7e7",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 64,
"avg_line_length": 26.6,
"alnum_prop": 0.6265664160401002,
"repo_name": "el-ethan/kivy",
"id": "ce53a3d1b974bc971adda77832c3a46728650d4b",
"size": "1197",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "kivy/core/clipboard/clipboard_gtk3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "340566"
},
{
"name": "Emacs Lisp",
"bytes": "9695"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "19384"
},
{
"name": "Makefile",
"bytes": "4202"
},
{
"name": "Objective-C",
"bytes": "14779"
},
{
"name": "Python",
"bytes": "3679127"
},
{
"name": "Vim script",
"bytes": "1123"
}
],
"symlink_target": ""
} |
"""
Maximum cardinality matching in general graphs.
This module implements the general matching algorithm given in "The General
Maximum Matching Algorithm of Micali and Vazirani" by Paul A. Peterson and
Michael C. Loui, Algorithmica, 1988.
Many terms used in the code comments are explained in the paper by Peterson
and Loui. The paper could prove necessary in making sense of this code.
:filename matching.py
"""
# Authorship information
__author__ = "Alexander Soloviev"
__email__ = "[email protected]"
__date__ = "04/04/2015"
__all__ = [ 'max_cardinality_matching' ]
# Necessary imports
import structures
def max_cardinality_matching( G ):
"""Compute a maximum cardinality matching in a general graph G.
A matching is a subset of edges in which no node occurs more than once.
The cardinality of a matching is the number of matched edges.
The maximum matching is a matching of maximum cardinality.
:param G - the NetworkX graph given
Undirected graph.
:return mate - dictionary
The matching is returned as a dictionary such that
mate[v] == w if node v is matched to node w. Unmatched
nodes do not occur as a key in mate.
:notes
This function takes time O(sqrt(number_of_nodes) * number_of_edges).
This method is based on the "blossom" method for finding augmenting
paths.
:references
..[1] "The General Maximum Matching Algorithm of Micali and Vazirani",
Paul A. Peterson and Michael C. Loui, Algorithmica, 1988
"""
# Global variables for initializing node attributes
INFINITY = len( G ) + 1 # Odd and even level attribute value
UNERASED = False # Erase attribute value
ERASED = True
UNVISITED = False # Visit attribute value
VISITED = True
LEFT = -1 # Left and right attribute value
UNMARKED = 0
RIGHT = 1
# Global variables for initializing edge attributes
UNUSED = False
USED = True
class Bloom:
""" Representation of a bloom (a generalization of a blossom).
A blossom is a circuit of odd length, say 2k+1, that has k matched
edges. This class stores only the peak vertices and the base vertex
of the bloom.
"""
__slots__ = [ 'peaks', 'base' ]
class DfsInfo:
""" The information needed by the left and right depth first searches.
In calling leftDfs and rightDfs, the vertices could get updated or
modified. This class stores all of the parameters that could be
altered.
"""
def __init__(self, s, t, vL, vR, dcv, barrier):
self.s = s
self.t = t
self.vL = vL
self.vR = vR
self.dcv = dcv
self.barrier = barrier
# Get a list of vertices
gnodes = G.nodes()
if not gnodes:
return { } # Ignore empty graphs
# Initialize the top-level data structures for node attributes.
# Each of these is a dictionary indexed by the node.
nodeEvenLevel = { }
nodeOddLevel = { }
nodeBloom = { }
nodePredecessors = { }
nodeSuccessors = { }
nodeAnomalies = { }
nodeCount = { }
nodeErase = { }
nodeVisit = { }
nodeMark = { }
nodeParent = { }
# Path compression:
#nodeBaseStar = { }
# Initialize the top-level data structure for nodes marked
# left or right during the current call to augmentBlossom. If a
# bloom is found, these nodes will be a part of the bloom.
bloomNodes = [ ]
# Initialize the top-level data structure for candidates.
# Candidates is constructed so that candidates[i] contains all of the
# vertices to search at the current level i.
candidates = { }
# Initialize the top-level data structure for bridges.
# Bridges is constructed so that bridges[i] contains all bridges at
# level i. A bridge is an edge whose removal leaves a disconnected graph.
bridges = { }
# If v is a matched vertex, mate[v] is its partner vertex.
# If v is a single vertex, v does not occur as a key in mate.
# Initially all vertices are single and are updated during augmentation.
mate = { }
def search():
""" The search subroutine.
Find all augmenting paths of minimal length and increase the current
matching along these paths. Call the augmentBlossom function with
each bridge found.
:return augmented - True if matching was augmented, False otherwise
"""
i = 0 # Counter for the current level
# Insert each exposed vertex into candidates
for v in G.nodes_iter():
if v not in mate:
nodeEvenLevel[v] = 0
candidates[0].append( v )
# Perform a breadth-first search through each of the vertices.
# Continue iteration while candidates is not empty and no augmentation
# occurred at level i-1.
augmented = False
while (i < len( gnodes ) + 1) and not augmented:
if i % 2 == 0: # If level i is even
for v in candidates[i]:
# For each unerased and unmatched neighbor u of node v,
# determine whether the edge (u, v) is a bridge.
for u in G.neighbors_iter( v ):
if u == v: continue # Ignore self-loops
if mate.get(v) != u and nodeErase[u] == UNERASED:
assert mate.get(u) != v
if nodeEvenLevel[u] < INFINITY:
j = (nodeEvenLevel[u] + nodeEvenLevel[v]) / 2
bridges[j].add( tuple( sorted( [u, v] ) ) )
else:
if nodeOddLevel[u] == INFINITY:
nodeOddLevel[u] = i + 1
if nodeOddLevel[u] == i + 1:
nodeCount[u] += 1
nodePredecessors[u].append( v )
nodeSuccessors[v].append( u )
candidates[i + 1].append( u )
elif nodeOddLevel[u] < i:
nodeAnomalies[u].append( v )
else: # If level i is odd
for v in candidates[i]:
# For each node v in candidates such that v belongs to no bloom,
# determine whether the edge (u, v) is a bridge, where u is the
# mate of v.
if nodeBloom[v] == None:
u = mate[v]
if nodeOddLevel[u] < INFINITY:
j = (nodeOddLevel[u] + nodeOddLevel[v]) / 2
bridges[j].add( tuple( sorted( [u, v] ) ) )
elif nodeEvenLevel[u] == INFINITY:
nodePredecessors[u] = [v]
nodeSuccessors[v] = [u]
nodeCount[u] = 1
nodeEvenLevel[u] = i + 1
candidates[i + 1].append( u )
# Call augmentBlossom for each edge in bridges
for s, t in bridges[i]:
if nodeErase[s] == UNERASED and nodeErase[t] == UNERASED:
augmented = augmentBlossom(s, t, i)
i += 1 # Increment the level counter
return augmented
def augmentBlossom(s, t, i):
""" The augmentBlossom subroutine, or blossAug.
Either define a new blossom, discover that s and t are in the same
blossom, or find an augmenting path by using a double depth-first
search. Use the functions leftDfs, rightDfs, and erasePath. Upon
return, augmented is True if an augmenting path was found.
:param s - first node of a bridge
:param t - second node of a bridge
:param i - the current level
:return augmented - True if augmenting path was found, False otherwise
"""
# Boolean flags for whether a bloom was found or augmentation occurred
foundBloom = False
augmented = False
vL = baseStar(s) if nodeBloom[s] else s
vR = baseStar(t) if nodeBloom[t] else t
if vL == vR:
return False # Exit if s and t belong to same compressed bloom
# Set the parent nodes accordingly
if nodeBloom[s]:
nodeParent[vL] = s
if nodeBloom[t]:
nodeParent[vR] = t
# Mark vL left and vR right
nodeMark[vL] = LEFT
nodeMark[vR] = RIGHT
bloomNodes.append( vL )
bloomNodes.append( vR )
# DfsInfo stores information about s, t, vL, vR, dcv, and barrier vertices
dfsInfo = DfsInfo(s, t, vL, vR, None, vR)
# While a bloom has not been found and no augmentation has occurred,
# perform the double depth-first search.
while not foundBloom and not augmented:
# Get the levels of both vL and vR
if dfsInfo.vL == None or dfsInfo.vR == None: return False
level_vL = min(nodeEvenLevel[dfsInfo.vL], nodeOddLevel[dfsInfo.vL])
level_vR = min(nodeEvenLevel[dfsInfo.vR], nodeOddLevel[dfsInfo.vR])
# Increase the matching if vL and vR are both exposed
if dfsInfo.vL not in mate and dfsInfo.vR not in mate:
pathL = findPath(dfsInfo.s, dfsInfo.vL, None)
pathR = findPath(dfsInfo.t, dfsInfo.vR, None)
path = connectPath(pathL, pathR, dfsInfo.s, dfsInfo.t)
augmentMatching(dfsInfo.vL, dfsInfo.vR)
erasePath(path)
augmented = True
break
elif level_vL >= level_vR:
foundBloom = leftDfs( dfsInfo ) # Call leftDfs
else:
foundBloom = rightDfs( dfsInfo ) # Call rightDfs
# Create a new bloom if a bloom is found by the depth-first search.
if foundBloom and dfsInfo.dcv != None:
nodeMark[dfsInfo.dcv] = UNMARKED # Vertex dcv cannot be in the bloom
b = Bloom() # Create a new bloom
b.peaks = (dfsInfo.s, dfsInfo.t) # Assign it the peak vertices
b.base = dfsInfo.dcv # Assign it a base vertex
# Path compression
#baseStardcv = baseStar( dfsInfo.dcv )
#assert baseStardcv != None
# Put each vertex marked left or right during this call in the
# new bloom
for v in bloomNodes:
if nodeMark[v] == UNMARKED or nodeBloom[v] != None: continue # If no mark or bloom already defined, skip it
# Set the bloom attribute of the vertex
nodeBloom[v] = b
# Path compression
# Set the base* attribute of the vertex
#nodeBaseStar[v] = baseStardcv
level_v = min(nodeEvenLevel[v], nodeOddLevel[v])
if level_v % 2 == 0: # Check if v is outer
nodeOddLevel[v] = 2*i + 1 - nodeEvenLevel[v]
else: # Else v is inner
nodeEvenLevel[v] = 2*i + 1 - nodeOddLevel[v]
candidates[ nodeEvenLevel[v] ].append( v )
for z in nodeAnomalies[v]:
j = (nodeEvenLevel[v] + nodeEvenLevel[z]) / 2
bridges[j].add( tuple( sorted( [v, z] ) ) )
G[v][z]['use'] = USED
# Clear the bloomNodes list
del bloomNodes[:]
return augmented
def connectPath(pathL, pathR, s, t):
""" Connect two paths into a single path.
:param pathL - the left path given as a list
:param pathR - the right path given as a list
:param s - first node of a bridge
:param t - second node of a bridge
:return path - the combination of both paths
"""
reverseL = True if s == pathL[0] else False
reverseR = True if t == pathR[-1] else False
# Reverse the parent pointers of pathL
if reverseL:
nodeParent[ pathL[0] ] = None
prevv = None
currentv = pathL[-1]
nextv = None
while currentv != None:
nextv = nodeParent[currentv]
nodeParent[currentv] = prevv
prevv = currentv
currentv = nextv
# Reverse the list pathL
pathL.reverse()
# Reverse the parent pointers of pathR
if reverseR:
nodeParent[ pathR[0] ] = None
prevv = None
currentv = pathR[-1]
nextv = None
while currentv != None:
nextv = nodeParent[currentv]
nodeParent[currentv] = prevv
prevv = currentv
currentv = nextv
# Reverse the list pathR
pathR.reverse()
# Initialize the combined path
path = [ ]
path.extend( pathL )
path.extend( pathR )
# Connect the parent pointers of the path nodes
nodeParent[ pathR[0] ] = pathL[-1]
return path
def augmentMatching(lv, rv):
""" Augment the matching by the path from vertex lv to vertex rv.
:param lv - the left vertex
:param rv - the right vertex
"""
# Iterate through the path by following the parent pointers
firstv = rv
secondv = None
while firstv != lv:
# Get the parent node of firstv
secondv = nodeParent[firstv]
if mate.get(secondv) != firstv:
assert mate.get(firstv) != secondv
# Add the vertices to mate
mate[firstv] = secondv
mate[secondv] = firstv
firstv = secondv
def leftDfs(dfsInfo):
""" The leftDfs subroutine.
One step of the left depth-first search process. This step either
advances vL to a predecessor, backtracks, or signals the discovery
of a bloom.
:param dfsInfo - the information stored for the depth-first searches
:return bool - True if a bloom was found, False otherwise
"""
# Search through all unused and unerased predecessor edges of vL
for uL in nodePredecessors[dfsInfo.vL]:
# Skip the edge (vL, uL) if it is used or erased
if G[dfsInfo.vL][uL]['use'] == USED or nodeErase[uL] == ERASED:
continue
# Mark the edge (vL, uL) as used
G[dfsInfo.vL][uL]['use'] = USED
# If uL belongs to a bloom, set the bloombase of uL
if nodeBloom[uL]:
uL = baseStar(uL)
# If uL is unmarked, set its mark and exit
if nodeMark[uL] == UNMARKED:
nodeMark[uL] = LEFT
nodeParent[uL] = dfsInfo.vL
dfsInfo.vL = uL
bloomNodes.append( uL )
return False
# Otherwise if u is equal to vR, set the dcv equal to u
elif uL == dfsInfo.vR:
dfsInfo.dcv = uL
# If u has a mark, then leftDfs is backtracking
if dfsInfo.vL == dfsInfo.s:
return True # Signal discovery of a bloom
elif nodeParent[dfsInfo.vL] != None:
dfsInfo.vL = nodeParent[dfsInfo.vL] # Keep backtracking
return False
def rightDfs(dfsInfo):
""" The rightDfs subroutine.
One step of the right depth-first search process. This step either
advances vR to a predecessor or backtracks, or signals the discovery
of a bloom.
:param dfsInfo - the information stored for the depth-first searches
:return bool - True if a bloom was found, False otherwise
"""
# Search through all unused and unerased predecessor edges of vR
for uR in nodePredecessors[dfsInfo.vR]:
# Skip the edge (vR, uR) if it is used or erased
if G[dfsInfo.vR][uR]['use'] == USED or nodeErase[uR] == ERASED:
continue
# Mark the edge (vR, uR) as used
G[dfsInfo.vR][uR]['use'] = USED
# If uR belongs to a bloom, set the bloombase of uR
if nodeBloom[uR]:
uR = baseStar(uR)
# If u is unmarked, set its mark and exit
if nodeMark[uR] == UNMARKED:
nodeMark[uR] = RIGHT
nodeParent[uR] = dfsInfo.vR
dfsInfo.vR = uR
bloomNodes.append( uR )
return False
# Otherwise if u is equal to vL, set the dcv equal to u
elif uR == dfsInfo.vL:
dfsInfo.dcv = uR
# The vertex vR has no more unused predecessor edges
if dfsInfo.vR == dfsInfo.barrier:
dfsInfo.vR = dfsInfo.dcv
dfsInfo.barrier = dfsInfo.dcv
nodeMark[dfsInfo.vR] = RIGHT
if nodeParent[dfsInfo.vL] != None:
dfsInfo.vL = nodeParent[dfsInfo.vL] # Force leftDfs to backtrack from vL = dcv
elif nodeParent[dfsInfo.vR] != None:
dfsInfo.vR = nodeParent[dfsInfo.vR] # Keep backtracking
return False
def erasePath(path):
""" The erasePath subroutine (erase).
Set the erase attribute for all vertices in the input path to
erased. Once all predecessors of a vertex have been erased, the
vertex itself is erased too.
:param path - the list of vertices to be erased
"""
# While there are vertices left in the path
while path:
# Get a vertex from the path
y = path.pop()
nodeErase[y] = ERASED
# Iterate through each of its successors
for z in nodeSuccessors[y]:
if nodeErase[z] == UNERASED:
nodeCount[z] -= 1
# If the successor is unerased, add it to the path
if nodeCount[z] == 0:
path.append( z )
def findPath(high, low, b):
""" The findPath subroutine.
Find an alternating path from vertex high to vertex low through
the predecessor vertices. Note that the level of high is greater
or equal to the level of low. Call openBloom to find paths through
blooms other than bloom b.
:param high - the high vertex
:param low - the low vertex
:param b - the bloom given
:return path - the alternating path found
"""
# Determine the level of the vertices high and low
level_high = min(nodeEvenLevel[high], nodeOddLevel[high])
level_low = min(nodeEvenLevel[low], nodeOddLevel[low])
assert level_high >= level_low
# If the vertices are equivalent, return a single node path
if high == low:
return [high]
# Initialize the alternating path
path = [ ]
# Perform a depth-first search to find the vertex low from the vertex high
v = high
u = high
while u != low:
# Check whether v has unvisited predecessor edges
hasUnvisitedPredecessor = False
for p in nodePredecessors[v]:
# Break if the edge (p, v) is unvisited
if G[p][v]['visit'] == UNVISITED:
hasUnvisitedPredecessor = True
# Check whether vertex v belongs to a bloom, set u accordingly
if nodeBloom[v] == None or nodeBloom[v] == b:
G[p][v]['visit'] = VISITED
u = p
else:
u = nodeBloom[v].base
break
# There are no unvisited predecessor edges, so backtrack
if not hasUnvisitedPredecessor:
assert nodeParent[v] != None
v = nodeParent[v]
else:
# Get the level of node u
level_u = min(nodeEvenLevel[u], nodeOddLevel[u])
# Mark u visited and set the parent pointers
if nodeErase[u] == UNERASED and level_u >= level_low \
and ( u == low or ( nodeVisit[u] == UNVISITED \
and ( nodeMark[u] == nodeMark[high] != UNMARKED \
or ( nodeBloom[u] != None and nodeBloom[u] != b ) ) ) ):
nodeVisit[u] = VISITED
nodeParent[u] = v
v = u
# Compute the path
while u != high:
path.append(u)
u = nodeParent[u]
path.append( u )
path.reverse()
# The path has been found, except for blooms other than bloom b
# These blooms must be opened using openBloom
j = 0
while j < len(path) - 1:
xj = path[j]
# Replace the part of the path by the output of openBloom
if nodeBloom[xj] != None and nodeBloom[xj] != b:
nodeVisit[xj] = UNVISITED
path[j : j + 2], pathLength = openBloom( xj )
nodeParent[ xj ] = path[j - 1] if j > 0 else None
j += pathLength - 1
j += 1
return path
def openBloom(x):
""" The openBloom subroutine (open).
Return an alternating path from vertex x through the bloom of
x to the base of the bloom. Call findPath to get this alternating
path.
:param x - the vertex given
:return path - the alternating path through the bloom
"""
# Get the bloom that vertex x corresponds to
bloom = nodeBloom[x]
base = bloom.base
level_x = min(nodeEvenLevel[x], nodeOddLevel[x])
path = [ ]
if level_x % 2 == 0: # If x is outer
path = findPath(x, base, bloom)
else: # Else x is inner
# Get the peaks of the bloom
(leftPeak, rightPeak) = bloom.peaks
if nodeMark[x] == LEFT: # If x is marked left
pathLeft = findPath(leftPeak, x, bloom)
pathRight = findPath(rightPeak, base, bloom)
path = connectPath(pathLeft, pathRight, leftPeak, rightPeak)
elif nodeMark[x] == RIGHT: # Else x is marked right
pathLeft = findPath(rightPeak, x, bloom)
pathRight = findPath(leftPeak, base, bloom)
path = connectPath(pathLeft, pathRight, rightPeak, leftPeak)
return ( path, len(path) )
def baseStar(v):
""" The base* function.
Return the base* of the vertex v and compress the path
traversed. This has the effect of shrinking a bloom into its
base*.
:param v - the vertex given
:return base - the base* of v
"""
base = v
while nodeBloom[base] != None:
assert nodeBloom[base].base != base
base = nodeBloom[base].base
# Path compression:
#while nodeBaseStar[n] != None:
# n = nodeBaseStar[n]
#while v != n:
# vNext = nodeBaseStar[v]
# nodeBaseStar[v] = n
# v = vNext
return base
# Main loop: continue iteration until no further augmentation is possible.
augmented = True
while augmented:
# Initialize/reset the nodes
for v in G.nodes_iter():
nodeEvenLevel[v] = INFINITY
nodeOddLevel[v] = INFINITY
nodeBloom[v] = None
nodePredecessors[v] = [ ]
nodeSuccessors[v] = [ ]
nodeAnomalies[v] = [ ]
nodeCount[v] = 0
nodeErase[v] = UNERASED
nodeVisit[v] = UNVISITED
nodeMark[v] = UNMARKED
nodeParent[v] = None
# Path compression
#nodeBaseStar[v] = None
# Initialize/reset the edges
for u, v, d in G.edges_iter( data=True ):
if u == v: continue # Ignore self-loops
d['use'] = UNUSED
d['visit'] = UNVISITED
# Initialize/reset the candidates and bridges
for i in range( len( gnodes ) + 1 ):
candidates[i] = [ ]
bridges[i] = structures.OrderedSet()
# Call the search subroutine
augmented = search()
# Paranoia check that the matching is symmetric
for v in mate:
assert mate[ mate[v] ] == v
# Delete edge attributes from graph G
for u, v, d in G.edges_iter( data=True ):
if u == v: continue # Ignore self-loops
del d['use']
del d['visit']
return mate
#end | {
"content_hash": "bc9bfed16d142bd439f98a724bfcc0d1",
"timestamp": "",
"source": "github",
"line_count": 702,
"max_line_length": 123,
"avg_line_length": 38.13247863247863,
"alnum_prop": 0.5050244686017408,
"repo_name": "AlexanderSoloviev/mv-matching",
"id": "b41e3290386625dea1cf5827618413c1ca75606f",
"size": "26792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "matching/matching.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "82960"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse
def home(request):
return HttpResponse("Hello world!") | {
"content_hash": "1c1101cef2585ad5b91a7eaf6dc51825",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 39,
"avg_line_length": 19.4,
"alnum_prop": 0.7628865979381443,
"repo_name": "alex/tracebin",
"id": "1b5680c8b2dd626a52a534a3bb5cdc8a50867e8d",
"size": "97",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/tracebin_server/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "7712"
},
{
"name": "Python",
"bytes": "96821"
}
],
"symlink_target": ""
} |
"""
based on a Java version:
Based on original version written in BCPL by Dr Martin Richards
in 1981 at Cambridge University Computer Laboratory, England
and a C++ version derived from a Smalltalk version written by
L Peter Deutsch.
Java version: Copyright (C) 1995 Sun Microsystems, Inc.
Translation from C++, Mario Wolczko
Outer loop added by Alex Jacoby
"""
import pyperf
# Task IDs
I_IDLE = 1
I_WORK = 2
I_HANDLERA = 3
I_HANDLERB = 4
I_DEVA = 5
I_DEVB = 6
# Packet types
K_DEV = 1000
K_WORK = 1001
# Packet
BUFSIZE = 4
BUFSIZE_RANGE = range(BUFSIZE)
class Packet(object):
def __init__(self, l, i, k):
self.link = l
self.ident = i
self.kind = k
self.datum = 0
self.data = [0] * BUFSIZE
def append_to(self, lst):
self.link = None
if lst is None:
return self
else:
p = lst
next = p.link
while next is not None:
p = next
next = p.link
p.link = self
return lst
# Task Records
class TaskRec(object):
pass
class DeviceTaskRec(TaskRec):
def __init__(self):
self.pending = None
class IdleTaskRec(TaskRec):
def __init__(self):
self.control = 1
self.count = 10000
class HandlerTaskRec(TaskRec):
def __init__(self):
self.work_in = None
self.device_in = None
def workInAdd(self, p):
self.work_in = p.append_to(self.work_in)
return self.work_in
def deviceInAdd(self, p):
self.device_in = p.append_to(self.device_in)
return self.device_in
class WorkerTaskRec(TaskRec):
def __init__(self):
self.destination = I_HANDLERA
self.count = 0
# Task
class TaskState(object):
def __init__(self):
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
def packetPending(self):
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
return self
def waiting(self):
self.packet_pending = False
self.task_waiting = True
self.task_holding = False
return self
def running(self):
self.packet_pending = False
self.task_waiting = False
self.task_holding = False
return self
def waitingWithPacket(self):
self.packet_pending = True
self.task_waiting = True
self.task_holding = False
return self
def isPacketPending(self):
return self.packet_pending
def isTaskWaiting(self):
return self.task_waiting
def isTaskHolding(self):
return self.task_holding
def isTaskHoldingOrWaiting(self):
return self.task_holding or (not self.packet_pending and self.task_waiting)
def isWaitingWithPacket(self):
return self.packet_pending and self.task_waiting and not self.task_holding
tracing = False
layout = 0
def trace(a):
global layout
layout -= 1
if layout <= 0:
print()
layout = 50
print(a, end='')
TASKTABSIZE = 10
class TaskWorkArea(object):
def __init__(self):
self.taskTab = [None] * TASKTABSIZE
self.taskList = None
self.holdCount = 0
self.qpktCount = 0
taskWorkArea = TaskWorkArea()
class Task(TaskState):
def __init__(self, i, p, w, initialState, r):
self.link = taskWorkArea.taskList
self.ident = i
self.priority = p
self.input = w
self.packet_pending = initialState.isPacketPending()
self.task_waiting = initialState.isTaskWaiting()
self.task_holding = initialState.isTaskHolding()
self.handle = r
taskWorkArea.taskList = self
taskWorkArea.taskTab[i] = self
def fn(self, pkt, r):
raise NotImplementedError
def addPacket(self, p, old):
if self.input is None:
self.input = p
self.packet_pending = True
if self.priority > old.priority:
return self
else:
p.append_to(self.input)
return old
def runTask(self):
if self.isWaitingWithPacket():
msg = self.input
self.input = msg.link
if self.input is None:
self.running()
else:
self.packetPending()
else:
msg = None
return self.fn(msg, self.handle)
def waitTask(self):
self.task_waiting = True
return self
def hold(self):
taskWorkArea.holdCount += 1
self.task_holding = True
return self.link
def release(self, i):
t = self.findtcb(i)
t.task_holding = False
if t.priority > self.priority:
return t
else:
return self
def qpkt(self, pkt):
t = self.findtcb(pkt.ident)
taskWorkArea.qpktCount += 1
pkt.link = None
pkt.ident = self.ident
return t.addPacket(pkt, self)
def findtcb(self, id):
t = taskWorkArea.taskTab[id]
if t is None:
raise Exception("Bad task id %d" % id)
return t
# DeviceTask
class DeviceTask(Task):
def __init__(self, i, p, w, s, r):
Task.__init__(self, i, p, w, s, r)
def fn(self, pkt, r):
d = r
assert isinstance(d, DeviceTaskRec)
if pkt is None:
pkt = d.pending
if pkt is None:
return self.waitTask()
else:
d.pending = None
return self.qpkt(pkt)
else:
d.pending = pkt
if tracing:
trace(pkt.datum)
return self.hold()
class HandlerTask(Task):
def __init__(self, i, p, w, s, r):
Task.__init__(self, i, p, w, s, r)
def fn(self, pkt, r):
h = r
assert isinstance(h, HandlerTaskRec)
if pkt is not None:
if pkt.kind == K_WORK:
h.workInAdd(pkt)
else:
h.deviceInAdd(pkt)
work = h.work_in
if work is None:
return self.waitTask()
count = work.datum
if count >= BUFSIZE:
h.work_in = work.link
return self.qpkt(work)
dev = h.device_in
if dev is None:
return self.waitTask()
h.device_in = dev.link
dev.datum = work.data[count]
work.datum = count + 1
return self.qpkt(dev)
# IdleTask
class IdleTask(Task):
def __init__(self, i, p, w, s, r):
Task.__init__(self, i, 0, None, s, r)
def fn(self, pkt, r):
i = r
assert isinstance(i, IdleTaskRec)
i.count -= 1
if i.count == 0:
return self.hold()
elif i.control & 1 == 0:
i.control //= 2
return self.release(I_DEVA)
else:
i.control = i.control // 2 ^ 0xd008
return self.release(I_DEVB)
# WorkTask
A = ord('A')
class WorkTask(Task):
def __init__(self, i, p, w, s, r):
Task.__init__(self, i, p, w, s, r)
def fn(self, pkt, r):
w = r
assert isinstance(w, WorkerTaskRec)
if pkt is None:
return self.waitTask()
if w.destination == I_HANDLERA:
dest = I_HANDLERB
else:
dest = I_HANDLERA
w.destination = dest
pkt.ident = dest
pkt.datum = 0
for i in BUFSIZE_RANGE: # range(BUFSIZE)
w.count += 1
if w.count > 26:
w.count = 1
pkt.data[i] = A + w.count - 1
return self.qpkt(pkt)
def schedule():
t = taskWorkArea.taskList
while t is not None:
if tracing:
print("tcb =", t.ident)
if t.isTaskHoldingOrWaiting():
t = t.link
else:
if tracing:
trace(chr(ord("0") + t.ident))
t = t.runTask()
class Richards(object):
def run(self, iterations):
for i in range(iterations):
taskWorkArea.holdCount = 0
taskWorkArea.qpktCount = 0
IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec())
wkq = Packet(None, 0, K_WORK)
wkq = Packet(wkq, 0, K_WORK)
WorkTask(I_WORK, 1000, wkq, TaskState(
).waitingWithPacket(), WorkerTaskRec())
wkq = Packet(None, I_DEVA, K_DEV)
wkq = Packet(wkq, I_DEVA, K_DEV)
wkq = Packet(wkq, I_DEVA, K_DEV)
HandlerTask(I_HANDLERA, 2000, wkq, TaskState(
).waitingWithPacket(), HandlerTaskRec())
wkq = Packet(None, I_DEVB, K_DEV)
wkq = Packet(wkq, I_DEVB, K_DEV)
wkq = Packet(wkq, I_DEVB, K_DEV)
HandlerTask(I_HANDLERB, 3000, wkq, TaskState(
).waitingWithPacket(), HandlerTaskRec())
wkq = None
DeviceTask(I_DEVA, 4000, wkq,
TaskState().waiting(), DeviceTaskRec())
DeviceTask(I_DEVB, 5000, wkq,
TaskState().waiting(), DeviceTaskRec())
schedule()
if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246:
pass
else:
return False
return True
if __name__ == "__main__":
runner = pyperf.Runner()
runner.metadata['description'] = "The Richards benchmark"
richard = Richards()
runner.bench_func('richards', richard.run, 1)
| {
"content_hash": "32fa503c7bbb28ae534f6b59aac1ecbf",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 83,
"avg_line_length": 22.55082742316785,
"alnum_prop": 0.5377922214068561,
"repo_name": "python/performance",
"id": "b9167d1e5574007e89694fc38ac0b19ce25d4fdb",
"size": "9539",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pyperformance/benchmarks/bm_richards.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "133837"
},
{
"name": "Python",
"bytes": "463402"
},
{
"name": "Shell",
"bytes": "14726"
}
],
"symlink_target": ""
} |
'''
Functions to start and configure the Flask
application. Loads routes from the routes.py
script.
'''
import flask
from app.classes.ckan import CKAN
from app.routes.users import blueprint_users
from app.routes.queues import blueprint_queues
from app.routes.status import blueprint_status
from app.routes.datasets import blueprint_datasets
from app.routes.countries import blueprint_countries
from app.routes.revisions import blueprint_revisions
from app.routes.resources import blueprint_resources
from app.routes.organizations import blueprint_organizations
from app.routes.gallery_items import blueprint_gallery_items
def createServer(database_uri, debug=False):
'''
Creates a Flask application as an object.
'''
app = flask.Flask(__name__)
app.debug = debug
app.host = '0.0.0.0'
app.register_blueprint(blueprint_users)
app.register_blueprint(blueprint_status)
app.register_blueprint(blueprint_queues)
app.register_blueprint(blueprint_datasets)
app.register_blueprint(blueprint_countries)
app.register_blueprint(blueprint_revisions)
app.register_blueprint(blueprint_resources)
app.register_blueprint(blueprint_organizations)
app.register_blueprint(blueprint_gallery_items)
return app
| {
"content_hash": "a2bb743fc220c53e3e0a687bfa65ee4f",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 60,
"avg_line_length": 31.435897435897434,
"alnum_prop": 0.800978792822186,
"repo_name": "luiscape/hdx-monitor-sql-collect",
"id": "6df3da4c0e05f97264ef865d4ac13c3de86668d7",
"size": "1268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "171"
},
{
"name": "Python",
"bytes": "49025"
},
{
"name": "Shell",
"bytes": "1094"
}
],
"symlink_target": ""
} |
Subsets and Splits