text
stringlengths 4
1.02M
| meta
dict |
---|---|
import uuid
import random
from django.test import TestCase, Client
from main.models import Question_option
class TestSesson(TestCase):
fixtures = ['basic_setup.json']
def setUp(self):
# Create our client, login and select a course
self.client = Client()
self.client.post('/login/', {'username': 'tutor1', 'password': '1234'})
self.client.post('/tutor/select_course/', {'course': 1}, follow=True)
# If no name is specified, an error should be displayed
def test_session_create_missing_name(self):
response = self.client.post('/tutor/sessions/new/', {'session-title': ''})
self.assertContains(response, 'You must specify a title for this session')
# Add a session and then check it appears in the list of sessions
def test_session_create(self):
session_name = uuid.uuid4() # Get a random session name
response = self.client.post('/tutor/sessions/new/', {'session-title': session_name}, follow=True)
self.assertContains(response, session_name)
# Attempt to add a question with no body to this session
def test_session_add_question_no_name(self):
response = self.client.post('/tutor/sessions/2/questions/add/', {'question': '', 'max-options': 0})
self.assertContains(response, 'Your question must have a body')
# Add a random question and ensure it is recalled properly
def test_session_add_question(self):
question_data = {}
# Give the question a random name
question_data['question'] = uuid.uuid4()
question_data['max-options'] = 10
# Loop and add 10 options to the question
for i in range(1,question_data['max-options']):
question_data['option-body[{0}]'.format(i)] = uuid.uuid4()
# Only set option correct if a random variable is true, this similates checkboxes
if (random.getrandbits(1)):
question_data['option-correct[{0}]'.format(i)] = True
response = self.client.post('/tutor/sessions/2/questions/add/', question_data, follow=True)
self.assertContains(response, question_data['question'])
# We know that the name has saved correctly, now check that the options were correctly saved
for i in range(1,question_data['max-options']):
# Check if option i exists
correct = bool('option-correct[{0}]'.format(i) in question_data)
option_body = question_data['option-body[{0}]'.format(i)]
self.assertTrue(Question_option.objects.filter(correct=correct, body=option_body).exists())
# Attempt to add a question where the "max-options" hidden input is missing
def test_session_add_question_missing_max_options(self):
question_data = {
'question': 'foo',
'option-body[0]': 'bar'
}
response = self.client.post('/tutor/sessions/2/questions/add/', question_data, follow=True)
self.assertContains(response, '"max-options" option was missing from your request')
# Attempt to load the edit page for a question in a course that the tutor is not assigned to teach
def test_session_view_unassigned_question(self):
response = self.client.get('/tutor/sessions/3/questions/edit/4/')
self.assertContains(response, 'The session specified could not be found')
# Attempt to load the edit page for a question that does not exist anywhere in the system
def test_session_view_non_existant_question(self):
response = self.client.get('/tutor/sessions/1/questions/edit/999/')
self.assertContains(response, 'The question specified could not be found')
# Attempt to load the edit page for a session in a course that the tutor is not assigned to teach
def test_session_view_unassigned_session(self):
response = self.client.get('/tutor/sessions/3/questions/edit/4/')
self.assertContains(response, 'The session specified could not be found') | {
"content_hash": "a726fb88722fc6b9670237c56f06836e",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 107,
"avg_line_length": 48.5609756097561,
"alnum_prop": 0.6685082872928176,
"repo_name": "camerongray1515/SELP-Audience-Response-System",
"id": "ce618a80dc7e0d0dc86227ec8df3dfe4b78de720",
"size": "3982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flash_response/main/tests/test_session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45465"
},
{
"name": "JavaScript",
"bytes": "102863"
},
{
"name": "Python",
"bytes": "47390"
},
{
"name": "TeX",
"bytes": "25505"
}
],
"symlink_target": ""
} |
from fakes import MyOpener
import wpl
import os
def load_file(filename):
return file(os.path.join(os.path.dirname(__file__), filename)).read()
def test_find_item__isbn10__makes_good_request():
opener = MyOpener('')
w = wpl.Library(opener)
w.find_item('1593974744')
assert (opener.last_request['url'] ==
'http://books.kpl.org/search~S3/?searchtype=i&searcharg=1593974744&searchscope=3&searchlimits=')
def test_find_item__isbn10_in_library__returns_record():
opener = MyOpener(load_file('wpl_has_item.html'))
w = wpl.Library(opener)
record = w.find_item('1593974744')
assert record == 'http://books.kpl.org/search~S3/?searchtype=i&searcharg=1593974744&searchscope=3&searchlimits='
def test_find_item__isbn10_not_in_library__returns_false():
opener = MyOpener(load_file('wpl_has_not_item.html'))
w = wpl.Library(opener)
has = w.find_item('0321125215')
assert not has
| {
"content_hash": "7ac6a72ee3e8beb57c6baa39392d0d95",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 116,
"avg_line_length": 28.484848484848484,
"alnum_prop": 0.6840425531914893,
"repo_name": "blairconrad/LibraryLookup",
"id": "7368ca9cc3418602b9e2ac6e3159f72466ae04c1",
"size": "963",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Tests/test_wpl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "693"
},
{
"name": "HTML",
"bytes": "302305"
},
{
"name": "JavaScript",
"bytes": "13245"
},
{
"name": "Python",
"bytes": "24909"
}
],
"symlink_target": ""
} |
from flask import g
import pandas as pd
def get_product_list(params):
df = pd.read_sql('''
select item_name
from donorschoose_resources
where item_name like %(item_name)s
limit 100
''', \
g.db_engine, \
params={'item_name': params['item_name']}
)
return df
| {
"content_hash": "8c9be032951687f150ea73923d7e08e1",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 49,
"avg_line_length": 19.58823529411765,
"alnum_prop": 0.5525525525525525,
"repo_name": "sampathweb/bayes_hack",
"id": "bf78d1ff6ef2888ef1f8396f7d0563cd68d8549f",
"size": "333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bayes-hack-app/app/blueprints/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "629707"
},
{
"name": "Python",
"bytes": "46745"
},
{
"name": "Shell",
"bytes": "467"
}
],
"symlink_target": ""
} |
"""Provide the RedditorListingMixin class."""
from ....const import urljoin
from ..generator import ListingGenerator
from .base import BaseListingMixin
from .gilded import GildedListingMixin
class RedditorListingMixin(BaseListingMixin, GildedListingMixin):
"""Adds additional methods pertaining to Redditor instances."""
@property
def comments(self):
r"""Provide an instance of :class:`.SubListing` for comment access.
For example, to output the first line of all new comments by
``/u/spez`` try:
.. code:: python
for comment in reddit.redditor('spez').comments.new(limit=None):
print(comment.body.split('\n', 1)[0][:79])
"""
if self.__dict__.get('_comments') is None:
self._comments = SubListing(self._reddit, self._path, 'comments')
return self._comments
@property
def submissions(self):
"""Provide an instance of :class:`.SubListing` for submission access.
For example, to output the title's of top 100 of all time submissions
for ``/u/spez`` try:
.. code:: python
for submission in reddit.redditor('spez').submissions.top('all'):
print(submission.title)
"""
if self.__dict__.get('_submissions') is None:
self._submissions = SubListing(self._reddit, self._path,
'submitted')
return self._submissions
def downvoted(self, **generator_kwargs):
"""Return a ListingGenerator for items the user has downvoted.
May raise ``prawcore.Forbidden`` after issuing the request if the user
is not authorized to access the list. Note that because this function
returns a :class:`.ListingGenerator` the exception may not occur until
sometime after this function has returned.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
"""
return ListingGenerator(self._reddit, urljoin(self._path, 'downvoted'),
**generator_kwargs)
def gildings(self, **generator_kwargs):
"""Return a ListingGenerator for items the user has gilded.
May raise ``prawcore.Forbidden`` after issuing the request if the user
is not authorized to access the list. Note that because this function
returns a :class:`.ListingGenerator` the exception may not occur until
sometime after this function has returned.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
"""
return ListingGenerator(self._reddit,
urljoin(self._path, 'gilded/given'),
**generator_kwargs)
def hidden(self, **generator_kwargs):
"""Return a ListingGenerator for items the user has hidden.
May raise ``prawcore.Forbidden`` after issuing the request if the user
is not authorized to access the list. Note that because this function
returns a :class:`.ListingGenerator` the exception may not occur until
sometime after this function has returned.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
"""
return ListingGenerator(self._reddit, urljoin(self._path, 'hidden'),
**generator_kwargs)
def saved(self, **generator_kwargs):
"""Return a ListingGenerator for items the user has saved.
May raise ``prawcore.Forbidden`` after issuing the request if the user
is not authorized to access the list. Note that because this function
returns a :class:`.ListingGenerator` the exception may not occur until
sometime after this function has returned.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
"""
return ListingGenerator(self._reddit, urljoin(self._path, 'saved'),
**generator_kwargs)
def upvoted(self, **generator_kwargs):
"""Return a ListingGenerator for items the user has upvoted.
May raise ``prawcore.Forbidden`` after issuing the request if the user
is not authorized to access the list. Note that because this function
returns a :class:`.ListingGenerator` the exception may not occur until
sometime after this function has returned.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
"""
return ListingGenerator(self._reddit, urljoin(self._path, 'upvoted'),
**generator_kwargs)
class SubListing(BaseListingMixin):
"""Helper class for generating ListingGenerator objects."""
def __init__(self, reddit, base_path, subpath):
"""Initialize a SubListing instance.
:param reddit: An instance of :class:`.Reddit`.
:param base_path: The path to the object up to this point.
:param subpath: The additional path to this sublisting.
"""
super(SubListing, self).__init__(reddit, None)
self._listing_use_sort = True
self._reddit = reddit
self._path = urljoin(base_path, subpath)
| {
"content_hash": "a82b080821e37aeee25b1789213a4752",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 79,
"avg_line_length": 39,
"alnum_prop": 0.6384053902302077,
"repo_name": "13steinj/praw",
"id": "eed2ca8212ae27d4288709ec5acce8310acedb80",
"size": "5343",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "praw/models/listing/mixins/redditor.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "667266"
},
{
"name": "Shell",
"bytes": "189"
}
],
"symlink_target": ""
} |
import dapi
try:
import oauth
import oauth_provider
except ImportError:
oauth_support = False
else:
oauth_support = True
if oauth_support:
from dapi.auth.doauth import AuthOAuth
class OAuthApi(dapi.Api):
auth = AuthOAuth()
oauth_api = OAuthApi(extends=dapi.default_api)
| {
"content_hash": "b0f5f606956a68a0a547da4c6c412d03",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 50,
"avg_line_length": 18.941176470588236,
"alnum_prop": 0.6645962732919255,
"repo_name": "ingenieroariel/dapi",
"id": "de71c7b55b5476f5a56c54ec2653551036754192",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sample_project/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14905"
}
],
"symlink_target": ""
} |
import numpy as np
def is_positive_semi_definite(R):
if not isinstance(R, (np.ndarray, np.generic)):
raise ValueError('Encountered an error while checking if the matrix is positive semi definite. \
Expected a numpy array, instead got : {}'.format(R))
return np.all(np.linalg.eigvals(R) > 0)
| {
"content_hash": "53508caccb61685f17e3f98ebdfb69bc",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 104,
"avg_line_length": 40.125,
"alnum_prop": 0.6791277258566978,
"repo_name": "siavashk/pycpd",
"id": "7a80880bc0080ff42d1310df76d5731e0f54b8a3",
"size": "321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycpd/utility.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "432"
},
{
"name": "Python",
"bytes": "31941"
},
{
"name": "TeX",
"bytes": "5353"
}
],
"symlink_target": ""
} |
'''
The function `rfc3339` formats dates according to the :RFC:`3339`. `rfc3339`
tries to have as much as possible sensible defaults.
'''
__author__ = 'Henry Precheur <[email protected]>'
__license__ = 'Public Domain'
__all__ = ('rfc3339', )
import datetime
import time
def _timezone(utcoffset):
'''
Return a string reprenseting the timezone offset.
>>> _timezone(3600)
'+01:00'
>>> _timezone(-28800)
'-08:00'
'''
hours = abs(utcoffset) // 3600
minutes = abs(utcoffset) % 3600
if utcoffset >= 0:
return '+%02d:%02d' % (hours, minutes)
else:
return '-%02d:%02d' % (hours, minutes)
def _utc_offset(date, use_system_timezone):
'''
Return the UTC offset of `date`. If `date` does not have any `tzinfo`, use
the timezone informations stored locally on the system.
>>> if time.daylight:
... system_timezone = -time.altzone
... else:
... system_timezone = -time.timezone
>>> _utc_offset(datetime.datetime.now(), True) == system_timezone
True
>>> _utc_offset(datetime.datetime.now(), False)
0
'''
if date.utcoffset() is not None:
return date.utcoffset()
elif use_system_timezone:
if time.daylight:
# multiplying by "-1" had to be done as pep8 wouldn't allow
# "-time.altzone"
return time.altzone * -1
else:
return time.timezone * -1
else:
return 0
def _utc_string(d):
return d.strftime('%Y-%m-%dT%H:%M:%SZ')
def rfc3339(date, utc=False, use_system_timezone=True):
'''
Return a string formatted according to the :RFC:`3339`. If called with
`utc=True`, it normalizes `date` to the UTC date. If `date` does not have
any timezone information, uses the local timezone::
>>> date = datetime.datetime(2008, 4, 2, 20)
>>> rfc3339(date, utc=True, use_system_timezone=False)
'2008-04-02T20:00:00Z'
>>> rfc3339(date) # doctest: +ELLIPSIS
'2008-04-02T20:00:00...'
If called with `user_system_time=False` don't use the local timezone and
consider the offset to UTC to be zero::
>>> rfc3339(date, use_system_timezone=False)
'2008-04-02T20:00:00+00:00'
`date` must be a a `datetime.datetime`, `datetime.date` or a timestamp as
returned by `time.time()`::
>>> rfc3339(0, utc=True, use_system_timezone=False)
'1970-01-01T00:00:00Z'
>>> rfc3339(datetime.date(2008, 9, 6), use_system_timezone=False)
'2008-09-06T00:00:00+00:00'
>>> rfc3339('foo bar')
Traceback (most recent call last):
...
TypeError: excepted datetime, got str instead
'''
# Check if `date` is a timestamp.
try:
if utc:
return _utc_string(datetime.datetime.utcfromtimestamp(date))
else:
date = datetime.datetime.fromtimestamp(date)
except TypeError:
pass
if isinstance(date, datetime.date):
# If `date` is a `datetime.date` convert it to a `datetime.datetime`.
if not isinstance(date, datetime.datetime):
date = datetime.datetime(*date.timetuple()[:3])
utcoffset = _utc_offset(date, use_system_timezone)
if utc:
return _utc_string(date
+ datetime.timedelta(seconds=utcoffset))
else:
return date.strftime('%Y-%m-%dT%H:%M:%S') \
+ _timezone(utcoffset)
else:
raise TypeError('excepted %s, got %s instead'
% (datetime.datetime.__name__,
date.__class__.__name__))
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"content_hash": "97bc03701570542b8609ee76026a8787",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 78,
"avg_line_length": 29.664,
"alnum_prop": 0.5827939590075513,
"repo_name": "steveandroulakis/mytardis",
"id": "b5e7fd4f598b3ce69991983574709711bc833004",
"size": "3755",
"binary": false,
"copies": "7",
"ref": "refs/heads/3.0",
"path": "tardis/tardis_portal/rfc3339.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "308601"
},
{
"name": "Python",
"bytes": "1673248"
},
{
"name": "Shell",
"bytes": "953"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import random
import warnings
from time import time
from datetime import datetime
from collections import deque
import six
from twisted.internet import reactor, defer, task
from scrapy.utils.defer import mustbe_deferred
from scrapy.utils.httpobj import urlparse_cached
from scrapy.resolver import dnscache
from scrapy import signals
from .middleware import DownloaderMiddlewareManager
from .handlers import DownloadHandlers
class Slot(object):
"""Downloader slot"""
def __init__(self, concurrency, delay, randomize_delay):
self.concurrency = concurrency
self.delay = delay
self.randomize_delay = randomize_delay
self.active = set()
self.queue = deque()
self.transferring = set()
self.lastseen = 0
self.latercall = None
def free_transfer_slots(self):
return self.concurrency - len(self.transferring)
def download_delay(self):
if self.randomize_delay:
return random.uniform(0.5 * self.delay, 1.5 * self.delay)
return self.delay
def close(self):
if self.latercall and self.latercall.active():
self.latercall.cancel()
def __repr__(self):
cls_name = self.__class__.__name__
return "%s(concurrency=%r, delay=%0.2f, randomize_delay=%r)" % (
cls_name, self.concurrency, self.delay, self.randomize_delay)
def __str__(self):
return (
"<downloader.Slot concurrency=%r delay=%0.2f randomize_delay=%r "
"len(active)=%d len(queue)=%d len(transferring)=%d lastseen=%s>" % (
self.concurrency, self.delay, self.randomize_delay,
len(self.active), len(self.queue), len(self.transferring),
datetime.fromtimestamp(self.lastseen).isoformat()
)
)
def _get_concurrency_delay(concurrency, spider, settings):
delay = settings.getfloat('DOWNLOAD_DELAY')
if hasattr(spider, 'DOWNLOAD_DELAY'):
warnings.warn("%s.DOWNLOAD_DELAY attribute is deprecated, use %s.download_delay instead" %
(type(spider).__name__, type(spider).__name__))
delay = spider.DOWNLOAD_DELAY
if hasattr(spider, 'download_delay'):
delay = spider.download_delay
if hasattr(spider, 'max_concurrent_requests'):
concurrency = spider.max_concurrent_requests
return concurrency, delay
class Downloader(object):
def __init__(self, crawler):
self.settings = crawler.settings
self.signals = crawler.signals
self.slots = {}
self.active = set()
self.handlers = DownloadHandlers(crawler)
self.total_concurrency = self.settings.getint('CONCURRENT_REQUESTS')
self.domain_concurrency = self.settings.getint('CONCURRENT_REQUESTS_PER_DOMAIN')
self.ip_concurrency = self.settings.getint('CONCURRENT_REQUESTS_PER_IP')
self.randomize_delay = self.settings.getbool('RANDOMIZE_DOWNLOAD_DELAY')
self.middleware = DownloaderMiddlewareManager.from_crawler(crawler)
self._slot_gc_loop = task.LoopingCall(self._slot_gc)
self._slot_gc_loop.start(60)
def fetch(self, request, spider):
def _deactivate(response):
self.active.remove(request)
return response
self.active.add(request)
dfd = self.middleware.download(self._enqueue_request, request, spider)
return dfd.addBoth(_deactivate)
def needs_backout(self):
return len(self.active) >= self.total_concurrency
def _get_slot(self, request, spider):
key = self._get_slot_key(request, spider)
if key not in self.slots:
conc = self.ip_concurrency if self.ip_concurrency else self.domain_concurrency
conc, delay = _get_concurrency_delay(conc, spider, self.settings)
self.slots[key] = Slot(conc, delay, self.randomize_delay)
return key, self.slots[key]
def _get_slot_key(self, request, spider):
if 'download_slot' in request.meta:
return request.meta['download_slot']
key = urlparse_cached(request).hostname or ''
if self.ip_concurrency:
key = dnscache.get(key, key)
return key
def _enqueue_request(self, request, spider):
key, slot = self._get_slot(request, spider)
request.meta['download_slot'] = key
def _deactivate(response):
slot.active.remove(request)
return response
slot.active.add(request)
self.signals.send_catch_log(signal=signals.request_reached_downloader,
request=request,
spider=spider)
deferred = defer.Deferred().addBoth(_deactivate)
slot.queue.append((request, deferred))
self._process_queue(spider, slot)
return deferred
def _process_queue(self, spider, slot):
if slot.latercall and slot.latercall.active():
return
# Delay queue processing if a download_delay is configured
now = time()
delay = slot.download_delay()
if delay:
penalty = delay - now + slot.lastseen
if penalty > 0:
slot.latercall = reactor.callLater(penalty, self._process_queue, spider, slot)
return
# Process enqueued requests if there are free slots to transfer for this slot
while slot.queue and slot.free_transfer_slots() > 0:
slot.lastseen = now
request, deferred = slot.queue.popleft()
dfd = self._download(slot, request, spider)
dfd.chainDeferred(deferred)
# prevent burst if inter-request delays were configured
if delay:
self._process_queue(spider, slot)
break
def _download(self, slot, request, spider):
# The order is very important for the following deferreds. Do not change!
# 1. Create the download deferred
dfd = mustbe_deferred(self.handlers.download_request, request, spider)
# 2. Notify response_downloaded listeners about the recent download
# before querying queue for next request
def _downloaded(response):
self.signals.send_catch_log(signal=signals.response_downloaded,
response=response,
request=request,
spider=spider)
return response
dfd.addCallback(_downloaded)
# 3. After response arrives, remove the request from transferring
# state to free up the transferring slot so it can be used by the
# following requests (perhaps those which came from the downloader
# middleware itself)
slot.transferring.add(request)
def finish_transferring(_):
slot.transferring.remove(request)
self._process_queue(spider, slot)
return _
return dfd.addBoth(finish_transferring)
def close(self):
self._slot_gc_loop.stop()
for slot in six.itervalues(self.slots):
slot.close()
def _slot_gc(self, age=60):
mintime = time() - age
for key, slot in list(self.slots.items()):
if not slot.active and slot.lastseen + slot.delay < mintime:
self.slots.pop(key).close()
| {
"content_hash": "0c80b77c90eafa8dd3c7b14ba5fa4a78",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 98,
"avg_line_length": 36.68811881188119,
"alnum_prop": 0.6198893536634732,
"repo_name": "Ryezhang/scrapy",
"id": "59c3ad0745d2c73f78729b584cf5c5cdb93a7551",
"size": "7411",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scrapy/core/downloader/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2076"
},
{
"name": "Python",
"bytes": "1337407"
},
{
"name": "Roff",
"bytes": "2010"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
import unittest
from caffe2.python import core, workspace, dyndep
import caffe2.python.hypothesis_test_util as hu
dyndep.InitOpsLibrary("@/caffe2/caffe2/mpi:mpi_ops")
_has_mpi =False
COMM = None
RANK = 0
SIZE = 0
def SetupMPI():
try:
from mpi4py import MPI
global _has_mpi, COMM, RANK, SIZE
_has_mpi = core.IsOperatorWithEngine("CreateCommonWorld", "MPI")
COMM = MPI.COMM_WORLD
RANK = COMM.Get_rank()
SIZE = COMM.Get_size()
except ImportError:
_has_mpi = False
@unittest.skipIf(not _has_mpi,
"MPI is not available. Skipping.")
class TestMPI(hu.HypothesisTestCase):
@given(X=hu.tensor(),
root=st.integers(min_value=0, max_value=SIZE - 1),
device_option=st.sampled_from(hu.device_options),
**hu.gcs)
def test_broadcast(self, X, root, device_option, gc, dc):
# Use mpi4py's broadcast to make sure that all nodes inherit the
# same hypothesis test.
X = COMM.bcast(X)
root = COMM.bcast(root)
device_option = COMM.bcast(device_option)
X[:] = RANK
self.assertTrue(
workspace.RunOperatorOnce(
core.CreateOperator(
"CreateCommonWorld", [], "comm", engine="MPI",
device_option=device_option)))
self.assertTrue(workspace.FeedBlob("X", X, device_option))
mpi_op = core.CreateOperator(
"Broadcast", ["comm", "X"], "X", engine="MPI", root=root,
device_option=device_option)
self.assertTrue(workspace.RunOperatorOnce(mpi_op))
new_X = workspace.FetchBlob("X")
np.testing.assert_array_equal(new_X, root)
workspace.ResetWorkspace()
@given(X=hu.tensor(),
root=st.integers(min_value=0, max_value=SIZE - 1),
device_option=st.sampled_from(hu.device_options),
**hu.gcs)
def test_reduce(self, X, root, device_option, gc, dc):
# Use mpi4py's broadcast to make sure that all nodes inherit the
# same hypothesis test.
X = COMM.bcast(X)
root = COMM.bcast(root)
device_option = COMM.bcast(device_option)
X[:] = RANK
self.assertTrue(
workspace.RunOperatorOnce(
core.CreateOperator(
"CreateCommonWorld", [], "comm", engine="MPI",
device_option=device_option)))
self.assertTrue(workspace.FeedBlob("X", X, device_option))
mpi_op = core.CreateOperator(
"Reduce", ["comm", "X"], "X_reduced", engine="MPI", root=root,
device_option=device_option)
self.assertTrue(workspace.RunOperatorOnce(mpi_op))
if (RANK == root):
new_X = workspace.FetchBlob("X")
np.testing.assert_array_equal(new_X, root)
workspace.ResetWorkspace()
@given(X=hu.tensor(),
root=st.integers(min_value=0, max_value=SIZE - 1),
device_option=st.sampled_from(hu.device_options),
inplace=st.booleans(),
**hu.gcs)
def test_allreduce(self, X, root, device_option, inplace, gc, dc):
# Use mpi4py's broadcast to make sure that all nodes inherit the
# same hypothesis test.
X = COMM.bcast(X)
root = COMM.bcast(root)
device_option = COMM.bcast(device_option)
inplace = COMM.bcast(inplace)
X[:] = RANK
self.assertTrue(
workspace.RunOperatorOnce(
core.CreateOperator(
"CreateCommonWorld", [], "comm", engine="MPI",
device_option=device_option)))
# Use mpi4py's broadcast to make sure that all copies have the same
# tensor size.
X = COMM.bcast(X)
X[:] = RANK
self.assertTrue(workspace.FeedBlob("X", X, device_option))
mpi_op = core.CreateOperator(
"Allreduce", ["comm", "X"],
"X" if inplace else "X_reduced",
engine="MPI", root=root,
device_option=device_option)
self.assertTrue(workspace.RunOperatorOnce(mpi_op))
new_X = workspace.FetchBlob("X" if inplace else "X_reduced")
np.testing.assert_array_equal(new_X, SIZE * (SIZE - 1) / 2)
workspace.ResetWorkspace()
@given(X=hu.tensor(),
device_option=st.sampled_from(hu.device_options),
specify_send_blob=st.booleans(),
specify_recv_blob=st.booleans(),
**hu.gcs)
def test_sendrecv(
self, X, device_option, specify_send_blob, specify_recv_blob,
gc, dc):
# Use mpi4py's broadcast to make sure that all nodes inherit the
# same hypothesis test.
X = COMM.bcast(X)
device_option = COMM.bcast(device_option)
specify_send_blob = COMM.bcast(specify_send_blob)
specify_recv_blob = COMM.bcast(specify_recv_blob)
X[:] = RANK
self.assertTrue(
workspace.RunOperatorOnce(
core.CreateOperator(
"CreateCommonWorld", [], "comm", engine="MPI",
device_option=device_option)))
self.assertTrue(workspace.FeedBlob("X", X, device_option))
for src in range(SIZE):
for dst in range(SIZE):
tag = src * SIZE + dst
if src == dst:
continue
elif RANK == src:
X[:] = RANK
self.assertTrue(workspace.FeedBlob("X", X, device_option))
if specify_send_blob:
self.assertTrue(workspace.FeedBlob(
"dst", np.array(dst, dtype=np.int32)))
self.assertTrue(workspace.FeedBlob(
"tag", np.array(tag, dtype=np.int32)))
mpi_op = core.CreateOperator(
"SendTensor", ["comm", "X", "dst", "tag"], [],
engine="MPI", raw_buffer=True,
device_option=device_option)
else:
mpi_op = core.CreateOperator(
"SendTensor", ["comm", "X"], [], engine="MPI",
dst=dst, tag=tag, raw_buffer=True,
device_option=device_option)
self.assertTrue(workspace.RunOperatorOnce(mpi_op))
elif RANK == dst:
if specify_recv_blob:
self.assertTrue(workspace.FeedBlob(
"src", np.array(src, dtype=np.int32)))
self.assertTrue(workspace.FeedBlob(
"tag", np.array(tag, dtype=np.int32)))
mpi_op = core.CreateOperator(
"ReceiveTensor", ["comm", "X", "src", "tag"],
["X", "src", "tag"],
engine="MPI",
src=src, tag=tag, raw_buffer=True,
device_option=device_option)
else:
mpi_op = core.CreateOperator(
"ReceiveTensor", ["comm", "X"], ["X", "src", "tag"],
engine="MPI",
src=src, tag=tag, raw_buffer=True,
device_option=device_option)
self.assertTrue(workspace.RunOperatorOnce(mpi_op))
received = workspace.FetchBlob("X")
np.testing.assert_array_equal(received, src)
src_blob = workspace.FetchBlob("src")
np.testing.assert_array_equal(src_blob, src)
tag_blob = workspace.FetchBlob("tag")
np.testing.assert_array_equal(tag_blob, tag)
# simply wait for the guys to finish
COMM.barrier()
workspace.ResetWorkspace()
if __name__ == "__main__":
SetupMPI()
import unittest
unittest.main()
| {
"content_hash": "db3d622a40fc31c4dc28c27158a27d43",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 80,
"avg_line_length": 41.696969696969695,
"alnum_prop": 0.5314922480620154,
"repo_name": "sf-wind/caffe2",
"id": "29f128e58ffd4d76f509fd522ec977684c7b3616",
"size": "8927",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "caffe2/python/operator_test/mpi_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5415"
},
{
"name": "C",
"bytes": "316608"
},
{
"name": "C++",
"bytes": "4740750"
},
{
"name": "CMake",
"bytes": "139512"
},
{
"name": "CSS",
"bytes": "2196"
},
{
"name": "Cuda",
"bytes": "665218"
},
{
"name": "HTML",
"bytes": "5203"
},
{
"name": "Makefile",
"bytes": "1225"
},
{
"name": "Metal",
"bytes": "36752"
},
{
"name": "Objective-C",
"bytes": "6505"
},
{
"name": "Objective-C++",
"bytes": "239139"
},
{
"name": "Python",
"bytes": "2901542"
},
{
"name": "Shell",
"bytes": "31734"
}
],
"symlink_target": ""
} |
from neutron.conf.services import provider_configuration
from oslo_config import cfg
def list_service_provider():
return [
('service_providers', provider_configuration.serviceprovider_opts),
]
_dummy_bgpvpn_provider = ':'.join([
'BGPVPN', 'Dummy',
'networking_bgpvpn.neutron.services.service_drivers.driver_api.'
'BGPVPNDriver',
'default'
])
# Set reasonable example for BGPVPN as a default value
def set_service_provider_default():
cfg.set_defaults(provider_configuration.serviceprovider_opts,
service_provider=[_dummy_bgpvpn_provider])
| {
"content_hash": "65ed9078d022bea1e0d1a0bfc9b1b665",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 75,
"avg_line_length": 27.318181818181817,
"alnum_prop": 0.7054908485856906,
"repo_name": "openstack/networking-bgpvpn",
"id": "6bce35929abdc934583565897307a13c1c1aa80b",
"size": "1156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networking_bgpvpn/neutron/opts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5665"
},
{
"name": "Mako",
"bytes": "1055"
},
{
"name": "Python",
"bytes": "477528"
},
{
"name": "Shell",
"bytes": "4835"
}
],
"symlink_target": ""
} |
"The protodown app module"
from bootstrap.build import Arguments
def init(arguments):
return "Initialized empty repository"
def config(arguments):
print(arguments)
if(arguments.option=='help'):
return 'display config help'
else:
s = Template("setting $option to $value")
return s.substitute(option=arguments.option,value=arguments.value)
def lookup(arguments):
return "lookup tests"
def resolve(arguments):
return "Looking for UC0..."
def generate(arguments):
return "Generating prototype..."
def run(arguments):
return "Starting application..."
class Protodown(object):
"""generate prototypes from markdown"""
def __init__(self):
super(Protodown, self).__init__()
self.arguments=Arguments()
#This should be in the kernel.
self.arguments['config'].set_defaults(func=config);
self.arguments['resolve'].set_defaults(func=resolve);
self.arguments['generate'].set_defaults(func=generate);
self.arguments['run'].set_defaults(func=run);
self.arguments['lookup'].set_defaults(func=lookup);
self.args=self.arguments.get_args()
def print_help(self):
self.arguments.print_help()
#group = parser.add_argument_group('Available protodown commands')
#group.add_argument('init', help="Create an empty Protodown repository or reinitialize an existing one")
#group.add_argument('config', help="Configure your protodown repository")
#group.add_argument('resolve', help="Find solutions for your prototype")
#group.add_argument('generate', help="Let the magic happen")
#group.add_argument('run', help="Start your prototype")
| {
"content_hash": "a54b647111da518d8bff3e06d480b1cc",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 104,
"avg_line_length": 33.28,
"alnum_prop": 0.6947115384615384,
"repo_name": "tweakch/protodown",
"id": "331fc0d24c8cc019e09c4e931ebfc2ceac140321",
"size": "1687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bootstrap/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "802"
},
{
"name": "Java",
"bytes": "109"
},
{
"name": "Python",
"bytes": "5422"
}
],
"symlink_target": ""
} |
import ast
from . import parsers
from ..lexer import lexers
from ..utils.parser import (build_call, build_class, build_yield, production,
split_tag_args_string, update_source_pos)
spg = parsers.spg
"""
Structure parser generator.
Used to tokenize and split the template at ``{{``, ``}}``, ``{%``, ``%}``,
``{#`` and ``#}``.
The overall rules are::
doc : CONTENT
| var
| tag
| comment
| doc CONTENT
| doc var
| doc tag
| doc comment
var : VS CONTENT VE
tag : if
| for
if : TS IF CONTENT TE inner TS ENDIF TE
| TS IF CONTENT TE inner TS ELSE TE inner TS ENDIF TE
for : TS FOR CONTENT TE inner TS ENDFOR TE
| TS FOR CONTENT TE inner TS ELSE TE inner TS ENDFOR TE
| TS FOR CONTENT TE inner TS EMPTY TE inner TS ENDFOR TE
comment : CS CONTENT CE
inner : CONTENT
| var
| tag
| comment
| inner CONTENT
| inner var
| inner tag
| inner comment
"""
spg.precedence = []
@production(spg, 'doc : CONTENT')
def doc__CONTENT(state, p):
klass, root_func = build_class()
state.blocks.append(root_func)
content = p[0]
content = update_source_pos(ast.Str(s=content.getstr()), content)
state.append_to_block(build_yield(content))
return klass
@production(spg,
'doc : var',
'doc : tag',
'doc : comment')
def doc__parsed(state, p):
klass, root_func = build_class()
state.blocks.append(root_func)
state.append_to_block(p[0])
return klass
@production(spg, 'doc : doc CONTENT')
def doc__doc_CONTENT(state, p):
doc, content = p
content = update_source_pos(ast.Str(s=content.getstr()), content)
state.append_to_block(build_yield(content))
return doc
@production(spg,
'doc : doc var',
'doc : doc tag')
def doc__doc_parsed(state, p):
doc, parsed = p
state.append_to_block(parsed)
return doc
@production(spg, 'doc : doc comment')
def doc__doc_comment(state, p):
doc, _ = p
return doc
@production(spg, 'var : VS CONTENT VE')
def var__varstart_CONTENT_varend(state, p):
content = parsers.fp.parse(lexers.fl.lex(p[1].getstr()))
return build_yield(build_call(
func=ast.Name(id='auto_escape', ctx=ast.Load()),
args=[
update_source_pos(content, p[1])
]
))
@production(spg,
'tag : if',
'tag : for')
def tag(state, p):
return p[0]
@production(spg, 'if : TS IF CONTENT TE inner TS ENDIF TE')
def if__impl(state, p):
ts, _, condition, _, body, _, _, _ = p
test = parsers.fp.parse(lexers.fl.lex(condition.getstr()))
return update_source_pos(ast.If(
test=test,
body=body,
orelse=[]
), ts)
@production(spg, 'if : TS IF CONTENT TE inner TS ELSE TE inner TS ENDIF TE')
def if__else_impl(state, p):
ts, _, condition, _, body, _, _, _, orelse, _, _, _ = p
test = parsers.fp.parse(lexers.fl.lex(condition.getstr()))
return update_source_pos(ast.If(
test=test,
body=body,
orelse=orelse,
), ts)
@production(spg, 'for : TS FOR CONTENT TE inner TS ENDFOR TE')
def for__impl(state, p):
ts, _, args, _, body, _, _, _ = p
target, in_, var = split_tag_args_string(args.getstr())
if in_ != 'in':
raise ValueError('"in" expected in for loop arguments')
iterator = parsers.fp.parse(lexers.fl.lex(var))
return update_source_pos(ast.For(
target=ast.Subscript(
value=ast.Name(id='context', ctx=ast.Load()),
slice=ast.Index(value=ast.Str(s=target)),
ctx=ast.Store()
),
iter=iterator,
body=body,
orelse=[]
), ts)
@production(spg,
'for : TS FOR CONTENT TE inner TS ELSE TE inner TS ENDFOR TE',
'for : TS FOR CONTENT TE inner TS EMPTY TE inner TS ENDFOR TE')
def for__else_impl(state, p):
ts, _, args, _, body, _, _, _, orelse, _, _, _ = p
target, in_, var = split_tag_args_string(args.getstr())
if in_ != 'in':
raise ValueError('"in" expected in for loop arguments')
iterator = parsers.fp.parse(lexers.fl.lex(var))
return update_source_pos(ast.For(
target=ast.Subscript(
value=ast.Name(id='context', ctx=ast.Load()),
slice=ast.Index(value=ast.Str(s=target)),
ctx=ast.Store()
),
iter=iterator,
body=body,
orelse=orelse
), ts)
@production(spg, 'comment : CS CONTENT CE')
def comment(state, p):
return build_yield(ast.Str(s=''))
@production(spg, 'inner : CONTENT')
def inner__CONTENT(state, p):
content = p[0]
content = update_source_pos(ast.Str(s=content.getstr()), content)
return [build_yield(content)]
@production(spg,
'inner : var',
'inner : tag',
'inner : comment')
def inner__parsed(state, p):
return p
@production(spg, 'inner : inner CONTENT')
def inner__inner_CONTENT(state, p):
inner, content = p
content = update_source_pos(ast.Str(s=content.getstr()), content)
inner.append(build_yield(content))
return inner
@production(spg,
'inner : inner var',
'inner : inner tag')
def inner__inner_parsed(state, p):
inner, parsed = p
inner.append(parsed)
return inner
@production(spg, 'inner : inner comment')
def inner__inner_comment(state, p):
inner, _ = p
return inner
@spg.error
def error(state, token):
raise ValueError('Unexpected token: %r' % token)
| {
"content_hash": "4c30b68cfb0b221d4554e98f218af0fc",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 77,
"avg_line_length": 25.495535714285715,
"alnum_prop": 0.5671511118893364,
"repo_name": "funkybob/rattle",
"id": "5c3e6ea3e891e60eb8def563609b1c5592fc8b46",
"size": "5711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rattle/parser/structure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6816"
},
{
"name": "Python",
"bytes": "78776"
},
{
"name": "Shell",
"bytes": "794"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
import pytest
from statsmodels.imputation import mice
import statsmodels.api as sm
from numpy.testing import assert_equal, assert_allclose
import warnings
try:
import matplotlib.pyplot as plt
except ImportError:
pass
pdf_output = False
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_mice.pdf")
else:
pdf = None
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
def teardown_module():
if pdf_output:
pdf.close()
def gendat():
"""
Create a data set with missing values.
"""
np.random.seed(34243)
n = 200
p = 5
exog = np.random.normal(size=(n, p))
exog[:, 0] = exog[:, 1] - exog[:, 2] + 2*exog[:, 4]
exog[:, 0] += np.random.normal(size=n)
exog[:, 2] = 1*(exog[:, 2] > 0)
endog = exog.sum(1) + np.random.normal(size=n)
df = pd.DataFrame(exog)
df.columns = ["x%d" % k for k in range(1, p+1)]
df["y"] = endog
df.x1[0:60] = np.nan
df.x2[0:40] = np.nan
df.x3[10:30:2] = np.nan
df.x4[20:50:3] = np.nan
df.x5[40:45] = np.nan
df.y[30:100:2] = np.nan
return df
class TestMICEData(object):
def test_default(self):
# Test with all defaults.
df = gendat()
orig = df.copy()
mx = pd.notnull(df)
imp_data = mice.MICEData(df)
nrow, ncol = df.shape
assert_allclose(imp_data.ix_miss['x1'], np.arange(60))
assert_allclose(imp_data.ix_obs['x1'], np.arange(60, 200))
assert_allclose(imp_data.ix_miss['x2'], np.arange(40))
assert_allclose(imp_data.ix_miss['x3'], np.arange(10, 30, 2))
assert_allclose(imp_data.ix_obs['x3'],
np.concatenate((np.arange(10),
np.arange(11, 30, 2),
np.arange(30, 200))))
assert_equal([set(imp_data.data[col]) for col in imp_data.data],
[set(df[col].dropna()) for col in df])
for k in range(3):
imp_data.update_all()
assert_equal(imp_data.data.shape[0], nrow)
assert_equal(imp_data.data.shape[1], ncol)
assert_allclose(orig[mx], imp_data.data[mx])
assert_equal([set(imp_data.data[col]) for col in imp_data.data],
[set(df[col].dropna()) for col in df])
fml = 'x1 ~ x2 + x3 + x4 + x5 + y'
assert_equal(imp_data.conditional_formula['x1'], fml)
assert_equal(imp_data._cycle_order, ['x5', 'x3', 'x4', 'y', 'x2', 'x1'])
# Should make a copy
assert(not (df is imp_data.data))
(endog_obs, exog_obs, exog_miss,
predict_obs_kwds, predict_miss_kwds) = imp_data.get_split_data('x3')
assert_equal(len(endog_obs), 190)
assert_equal(exog_obs.shape, [190, 6])
assert_equal(exog_miss.shape, [10, 6])
def test_settingwithcopywarning(self):
"Test that MICEData does not throw a SettingWithCopyWarning when imputing (https://github.com/statsmodels/statsmodels/issues/5430)"
df = gendat()
# There need to be some ints in here for the error to be thrown
df['intcol'] = np.arange(len(df))
df['intcol'] = df.intcol.astype('int32')
miceData = mice.MICEData(df)
with pd.option_context('mode.chained_assignment', 'warn'):
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
miceData.update_all()
assert len(ws) == 0
def test_next_sample(self):
df = gendat()
imp_data = mice.MICEData(df)
all_x = []
for j in range(2):
x = imp_data.next_sample()
assert(isinstance(x, pd.DataFrame))
assert_equal(df.shape, x.shape)
all_x.append(x)
# The returned dataframes are all the same object
assert(all_x[0] is all_x[1])
def test_pertmeth(self):
# Test with specified perturbation method.
df = gendat()
orig = df.copy()
mx = pd.notnull(df)
nrow, ncol = df.shape
for pert_meth in "gaussian", "boot":
imp_data = mice.MICEData(df, perturbation_method=pert_meth)
for k in range(2):
imp_data.update_all()
assert_equal(imp_data.data.shape[0], nrow)
assert_equal(imp_data.data.shape[1], ncol)
assert_allclose(orig[mx], imp_data.data[mx])
assert_equal(imp_data._cycle_order, ['x5', 'x3', 'x4', 'y', 'x2', 'x1'])
def test_phreg(self):
np.random.seed(8742)
n = 300
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
event_time = np.random.exponential(size=n) * np.exp(x1)
obs_time = np.random.exponential(size=n)
time = np.where(event_time < obs_time, event_time, obs_time)
status = np.where(time == event_time, 1, 0)
df = pd.DataFrame({"time": time, "status": status, "x1": x1, "x2": x2})
df.loc[10:40, 'time'] = np.nan
df.loc[10:40, 'status'] = np.nan
df.loc[30:50, 'x1'] = np.nan
df.loc[40:60, 'x2'] = np.nan
from statsmodels.duration.hazard_regression import PHReg
# Save the dataset size at each iteration.
hist = []
def cb(imp):
hist.append(imp.data.shape)
for pm in "gaussian", "boot":
idata = mice.MICEData(df, perturbation_method=pm, history_callback=cb)
idata.set_imputer("time", "0 + x1 + x2", model_class=PHReg,
init_kwds={"status": mice.PatsyFormula("status")},
predict_kwds={"pred_type": "hr"},
perturbation_method=pm)
x = idata.next_sample()
assert(isinstance(x, pd.DataFrame))
assert(all([val == (299, 4) for val in hist]))
def test_set_imputer(self):
# Test with specified perturbation method.
from statsmodels.regression.linear_model import RegressionResultsWrapper
from statsmodels.genmod.generalized_linear_model import GLMResultsWrapper
df = gendat()
orig = df.copy()
mx = pd.notnull(df)
nrow, ncol = df.shape
imp_data = mice.MICEData(df)
imp_data.set_imputer('x1', 'x3 + x4 + x3*x4')
imp_data.set_imputer('x2', 'x4 + I(x5**2)')
imp_data.set_imputer('x3', model_class=sm.GLM,
init_kwds={"family": sm.families.Binomial()})
imp_data.update_all()
assert_equal(imp_data.data.shape[0], nrow)
assert_equal(imp_data.data.shape[1], ncol)
assert_allclose(orig[mx], imp_data.data[mx])
for j in range(1, 6):
if j == 3:
assert_equal(isinstance(imp_data.models['x3'], sm.GLM), True)
assert_equal(isinstance(imp_data.models['x3'].family, sm.families.Binomial), True)
assert_equal(isinstance(imp_data.results['x3'], GLMResultsWrapper), True)
else:
assert_equal(isinstance(imp_data.models['x%d' % j], sm.OLS), True)
assert_equal(isinstance(imp_data.results['x%d' % j], RegressionResultsWrapper), True)
fml = 'x1 ~ x3 + x4 + x3*x4'
assert_equal(imp_data.conditional_formula['x1'], fml)
fml = 'x4 ~ x1 + x2 + x3 + x5 + y'
assert_equal(imp_data.conditional_formula['x4'], fml)
assert_equal(imp_data._cycle_order, ['x5', 'x3', 'x4', 'y', 'x2', 'x1'])
@pytest.mark.matplotlib
def test_plot_missing_pattern(self, close_figures):
df = gendat()
imp_data = mice.MICEData(df)
for row_order in "pattern", "raw":
for hide_complete_rows in False, True:
for color_row_patterns in False, True:
plt.clf()
fig = imp_data.plot_missing_pattern(row_order=row_order,
hide_complete_rows=hide_complete_rows,
color_row_patterns=color_row_patterns)
close_or_save(pdf, fig)
close_figures()
@pytest.mark.matplotlib
def test_plot_bivariate(self, close_figures):
df = gendat()
imp_data = mice.MICEData(df)
imp_data.update_all()
plt.clf()
for plot_points in False, True:
fig = imp_data.plot_bivariate('x2', 'x4', plot_points=plot_points)
fig.get_axes()[0].set_title('plot_bivariate')
close_or_save(pdf, fig)
close_figures()
@pytest.mark.matplotlib
def test_fit_obs(self, close_figures):
df = gendat()
imp_data = mice.MICEData(df)
imp_data.update_all()
plt.clf()
for plot_points in False, True:
fig = imp_data.plot_fit_obs('x4', plot_points=plot_points)
fig.get_axes()[0].set_title('plot_fit_scatterplot')
close_or_save(pdf, fig)
close_figures()
@pytest.mark.matplotlib
def test_plot_imputed_hist(self, close_figures):
df = gendat()
imp_data = mice.MICEData(df)
imp_data.update_all()
plt.clf()
for plot_points in False, True:
fig = imp_data.plot_imputed_hist('x4')
fig.get_axes()[0].set_title('plot_imputed_hist')
close_or_save(pdf, fig)
close_figures()
class TestMICE(object):
def test_MICE(self):
df = gendat()
imp_data = mice.MICEData(df)
mi = mice.MICE("y ~ x1 + x2 + x1:x2", sm.OLS, imp_data)
result = mi.fit(1, 3)
assert(issubclass(result.__class__, mice.MICEResults))
# Smoke test for results
smr = result.summary()
def test_MICE1(self):
df = gendat()
imp_data = mice.MICEData(df)
mi = mice.MICE("y ~ x1 + x2 + x1:x2", sm.OLS, imp_data)
from statsmodels.regression.linear_model import RegressionResultsWrapper
for j in range(3):
x = mi.next_sample()
assert(issubclass(x.__class__, RegressionResultsWrapper))
def test_MICE1_regularized(self):
df = gendat()
imp = mice.MICEData(df, perturbation_method='boot')
imp.set_imputer('x1', 'x2 + y', fit_kwds={'alpha': 1, 'L1_wt': 0})
imp.update_all()
def test_MICE2(self):
from statsmodels.genmod.generalized_linear_model import GLMResultsWrapper
df = gendat()
imp_data = mice.MICEData(df)
mi = mice.MICE("x3 ~ x1 + x2", sm.GLM, imp_data,
init_kwds={"family": sm.families.Binomial()})
for j in range(3):
x = mi.next_sample()
assert(isinstance(x, GLMResultsWrapper))
assert(isinstance(x.family, sm.families.Binomial))
@pytest.mark.slow
def test_combine(self):
np.random.seed(3897)
x1 = np.random.normal(size=300)
x2 = np.random.normal(size=300)
y = x1 + x2 + np.random.normal(size=300)
x1[0:100] = np.nan
x2[250:] = np.nan
df = pd.DataFrame({"x1": x1, "x2": x2, "y": y})
idata = mice.MICEData(df)
mi = mice.MICE("y ~ x1 + x2", sm.OLS, idata, n_skip=20)
result = mi.fit(10, 20)
fmi = np.asarray([0.1778143, 0.11057262, 0.29626521])
assert_allclose(result.frac_miss_info, fmi, atol=1e-5)
params = np.asarray([-0.03486102, 0.96236808, 0.9970371])
assert_allclose(result.params, params, atol=1e-5)
tvalues = np.asarray([-0.54674776, 15.28091069, 13.61359403])
assert_allclose(result.tvalues, tvalues, atol=1e-5)
def test_micedata_miss1():
# test for #4375
np.random.seed(0)
data = pd.DataFrame(np.random.rand(50, 4))
data.columns = ['var1', 'var2', 'var3', 'var4']
# one column with a single missing value
data.iloc[1, 1] = np.nan
data.iloc[[1, 3], 2] = np.nan
data_imp = mice.MICEData(data)
data_imp.update_all()
assert_equal(data_imp.data.isnull().values.sum(), 0)
ix_miss = {'var1': np.array([], dtype=np.int64),
'var2': np.array([1], dtype=np.int64),
'var3': np.array([1, 3], dtype=np.int64),
'var4': np.array([], dtype=np.int64)}
for k in ix_miss:
assert_equal(data_imp.ix_miss[k], ix_miss[k])
| {
"content_hash": "467b0fe7017d23c1408bd3d6b59b0069",
"timestamp": "",
"source": "github",
"line_count": 395,
"max_line_length": 139,
"avg_line_length": 31.453164556962026,
"alnum_prop": 0.5540083708950418,
"repo_name": "jseabold/statsmodels",
"id": "40492d2fec31404c8838ad52b70ab8c53bac46bd",
"size": "12424",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "statsmodels/imputation/tests/test_mice.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "Batchfile",
"bytes": "351"
},
{
"name": "C",
"bytes": "12088"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "Matlab",
"bytes": "1383"
},
{
"name": "Python",
"bytes": "8609450"
},
{
"name": "R",
"bytes": "34228"
},
{
"name": "Stata",
"bytes": "41179"
}
],
"symlink_target": ""
} |
'''
Introduction to pandas
=======================
Quick script to install dependencies and start the notebook server
in the current directory.
'''
import subprocess
try:
hasattr(raw_input, '__call__')
input = raw_input
except:
pass
pkgs = ['numpy', 'scipy', 'pandas', 'jupyter',
'notebook', 'ipython', 'matplotlib', 'seaborn',
'numba']
def install_pkgs(using='pip'):
'''
pip install pkgs
'''
cmd = [using, 'install']
if using == 'pip':
cmd += pkgs + ['tables']
else:
cmd += ['-y'] + pkgs + ['pytables']
subprocess.run(cmd)
def start_notebook():
'''
Starts a jupyter notebook server in the current directory
'''
subprocess.run(['jupyter', 'notebook'])
if __name__ == '__main__':
response = input('This script will install some dependencies. Continue [pip/conda/anaconda] (default: pip): ')
if response is '' or response == 'pip':
install_pkgs()
elif response == 'conda':
install_pkgs('conda')
elif response == 'anaconda':
install_pkgs('anaconda')
else:
raise Exception('Unknown option {0}'.format(response))
start_notebook()
| {
"content_hash": "0630c8be67c098c659e7bdbde892b2d4",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 114,
"avg_line_length": 23.56,
"alnum_prop": 0.5831918505942275,
"repo_name": "alexvmarch/pandas_intro",
"id": "4cb5350c13dea3b93ee61a7e68df188e566c725c",
"size": "1201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "39708"
},
{
"name": "Python",
"bytes": "9374"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_ssl_key_cert
short_description: Import/Delete SSL keys and certs from BIG-IP
description:
- This module imports/deletes SSL keys and certificates on a BIG-IP.
Keys can be imported from key files on the local disk, in PEM format.
Certificates can be imported from certificate and key files on the local
disk, in PEM format.
version_added: "1.6.0"
options:
key_content:
description:
- Sets the contents of a key directly to the specified value. This is
used with lookup plugins, or for anything with formatting or templating.
This must be provided when C(state) is C(present).
type: str
state:
description:
- When C(present), ensures the key and/or cert is uploaded to the
device. When C(absent), ensures the key and/or cert is removed
from the device. If the key and/or cert is currently in use, the module
will not be able to remove the key.
type: str
choices:
- present
- absent
default: present
key_name:
description:
- The name of the key.
type: str
passphrase:
description:
- Passphrase on key.
type: str
cert_content:
description:
- Sets the contents of a certificate directly to the specified value.
This is used with lookup plugins or for anything with formatting or
- C(content) must be provided when C(state) is C(present).
type: str
cert_name:
description:
- SSL Certificate Name. This is the cert name used when importing a certificate
into the BIG-IP. It also determines the filenames of the objects on the LTM.
type: str
issuer_cert:
description:
- Issuer certificate used for OCSP monitoring.
- This parameter is only valid on versions of BIG-IP 13.0.0 or above.
type: str
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Nitin Khanna (@nitinthewiz)
'''
EXAMPLES = r'''
- name: Import both key and cert
bigip_ssl_key_cert:
key_content: "{{ lookup('file', 'key.pem') }}"
key_name: cert1
cert_content: "{{ lookup('file', 'cert.pem') }}"
cert_name: cert1
state: present
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
import hashlib
import os
import re
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, transform_name,
f5_argument_spec, fq_name, merge_two_dicts
)
from ..module_utils.icontrol import (
TransactionContextManager, upload_file, tmos_version
)
from ..module_utils.teem import send_teem
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class Parameters(AnsibleF5Parameters):
download_path = '/var/config/rest/downloads'
api_map = {
'sourcePath': 'source_path',
'issuerCert': 'issuer_cert',
}
api_attributes = [
'passphrase',
'sourcePath',
'issuerCert',
]
returnables = [
'checksum',
'source_path',
'issuer_cert',
]
updatables = [
'key_checksum',
'cert_checksum',
'content',
'issuer_cert',
'source_path',
]
class ApiParameters(Parameters):
@property
def key_filename(self):
if self._values['name'] is None:
return None
if not self._values['name'].endswith('.key'):
return None
return self._values['name']
@property
def key_source_path(self):
if self.key_filename is None:
return None
if self._values['key_source_path'] is None:
return None
else:
return self._values['key_source_path']
@property
def cert_filename(self):
if self._values['name'] is None:
return None
if not self._values['name'].endswith('.crt'):
return None
return self._values['name']
@property
def cert_source_path(self):
if self.cert_filename is None:
return None
if self._values['cert_source_path'] is None:
return None
else:
return self._values['cert_source_path']
@property
def key_checksum(self):
if self._values['key_checksum'] is None:
return None
pattern = r'SHA1:\d+:(?P<value>[\w+]{40})'
matches = re.match(pattern, self._values['key_checksum'])
if matches:
return matches.group('value')
@property
def cert_checksum(self):
if self._values['cert_checksum'] is None:
return None
pattern = r'SHA1:\d+:(?P<value>[\w+]{40})'
matches = re.match(pattern, self._values['cert_checksum'])
if matches:
return matches.group('value')
class ModuleParameters(Parameters):
def _get_hash(self, content):
k = hashlib.sha1()
s = StringIO(content)
while True:
data = s.read(1024)
if not data:
break
k.update(data.encode('utf-8'))
return k.hexdigest()
@property
def issuer_cert(self):
if self._values['issuer_cert'] is None:
return None
name = fq_name(self.partition, self._values['issuer_cert'])
if name.endswith('.crt'):
return name
else:
return name + '.crt'
@property
def key_filename(self):
if self.key_name is None:
return None
if self.key_name.endswith('.key'):
return self.key_name
else:
return self.key_name + '.key'
@property
def cert_filename(self):
if self.cert_name is None:
return None
if self.cert_name.endswith('.crt'):
return self.cert_name
else:
return self.cert_name + '.crt'
@property
def key_checksum(self):
if self.key_content is None:
return None
return self._get_hash(self.key_content)
@property
def cert_checksum(self):
if self.cert_content is None:
return None
return self._get_hash(self.cert_content)
@property
def key_source_path(self):
result = 'file://' + os.path.join(
self.download_path,
self.key_filename
)
return result
@property
def cert_source_path(self):
result = 'file://' + os.path.join(
self.download_path,
self.cert_filename
)
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
raise
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def key_checksum(self):
if self.want.key_checksum is None:
return None
if self.want.key_checksum != self.have.key_checksum:
return self.want.key_checksum
@property
def key_source_path(self):
if self.want.key_source_path is None:
return None
if self.want.key_source_path == self.have.key_source_path:
if self.key_checksum:
return self.want.key_source_path
if self.want.key_source_path != self.have.key_source_path:
return self.want.key_source_path
@property
def cert_source_path(self):
if self.want.source_path is None:
return None
if self.want.source_path == self.have.source_path:
if self.cert_content:
return self.want.source_path
if self.want.source_path != self.have.source_path:
return self.want.source_path
@property
def cert_content(self):
if self.want.cert_checksum != self.have.checksum:
result = dict(
checksum=self.want.cert_checksum,
content=self.want.cert_content
)
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
if self.want.key_filename:
self.remove_uploaded_file_from_device(self.want.key_filename)
if self.want.cert_filename:
self.remove_uploaded_file_from_device(self.want.cert_filename)
return True
def remove_uploaded_file_from_device(self, name):
filepath = '/var/config/rest/downloads/{0}'.format(name)
params = {
"command": "run",
"utilCmdArgs": filepath
}
uri = "https://{0}:{1}/mgmt/tm/util/unix-rm".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def exists(self):
# Can't use TransactionContextManager here because
# it expects end result code to be 200 or so. 404 causes
# TransactionContextManager to fail.
if self.want.key_name:
uri = "https://{0}:{1}/mgmt/tm/sys/file/ssl-key/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.key_filename)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
# if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
# return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if self.want.cert_name:
uri = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.cert_filename)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
# if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
# return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def upload_file_to_device(self, content, name):
url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
try:
upload_file(self.client, url, content, name)
except F5ModuleError:
raise F5ModuleError(
"Failed to upload the file."
)
def _prepare_links(self):
# this is to ensure no duplicates are in the provided collection
links = list()
if self.want.key_name:
key_link = "https://{0}:{1}/mgmt/tm/sys/file/ssl-key/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.key_filename)
)
links.append(key_link)
if self.want.cert_name:
cert_link = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.cert_filename)
)
links.append(cert_link)
return links
def _prepare_links_for_update(self, params_dict):
# this is to ensure no duplicates are in the provided collection
links_and_params = list()
if self.want.key_name:
key_link = "https://{0}:{1}/mgmt/tm/sys/file/ssl-key/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.key_filename)
)
key_params_dict = params_dict.copy()
key_params_dict['sourcePath'] = self.want.key_source_path
links_and_params.append({'link': key_link, 'params': key_params_dict})
if self.want.cert_name:
cert_link = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.cert_filename)
)
cert_params_dict = params_dict.copy()
cert_params_dict['sourcePath'] = self.want.cert_source_path
links_and_params.append({'link': cert_link, 'params': cert_params_dict})
return links_and_params
def _prepare_links_for_create(self, params_dict):
# this is to ensure no duplicates are in the provided collection
links_and_params = list()
if self.want.key_name:
key_link = "https://{0}:{1}/mgmt/tm/sys/file/ssl-key/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
key_params_dict = params_dict.copy()
key_params_dict['name'] = self.want.key_filename
key_params_dict['sourcePath'] = self.want.key_source_path
links_and_params.append({'link': key_link, 'params': key_params_dict})
if self.want.cert_name:
cert_link = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
cert_params_dict = params_dict.copy()
cert_params_dict['name'] = self.want.cert_filename
cert_params_dict['sourcePath'] = self.want.cert_source_path
links_and_params.append({'link': cert_link, 'params': cert_params_dict})
return links_and_params
def create_on_device(self):
params = self.changes.api_params()
params['partition'] = self.want.partition
# params['name'] = self.want.name
links_and_params = self._prepare_links_for_create(params)
if self.want.key_name:
key_content = StringIO(self.want.key_content)
self.upload_file_to_device(key_content, self.want.key_filename)
if self.want.cert_name:
cert_content = StringIO(self.want.cert_content)
self.upload_file_to_device(cert_content, self.want.cert_filename)
with TransactionContextManager(self.client) as transact:
for link in links_and_params:
resp = transact.api.post(link['link'], json=link['params'])
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if not (resp.status in [200, 201] or 'code' in response and
response['code'] in [200, 201]):
raise F5ModuleError(resp.content)
# This needs to be done because of the way that BIG-IP creates certificates.
#
# The extra params (such as OCSP and issuer stuff) are not available in the
# payload. In a nutshell, the available resource attributes *change* after
# a create so that *more* are available.
if self.want.cert_name:
params = self.want.api_params()
if params:
uri = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.cert_filename)
)
resp = self.client.api.put(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if not (resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]):
raise F5ModuleError(resp.content)
return True
def update_on_device(self):
params = self.changes.api_params()
if self.want.key_name:
key_content = StringIO(self.want.key_content)
self.upload_file_to_device(key_content, self.want.key_filename)
if self.want.cert_name:
cert_content = StringIO(self.want.cert_content)
self.upload_file_to_device(cert_content, self.want.cert_filename)
links_and_params = self._prepare_links_for_update(params)
with TransactionContextManager(self.client) as transact:
for link in links_and_params:
resp = transact.api.patch(link['link'], json=link['params'])
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if not (resp.status in [200, 201] or 'code' in response and
response['code'] in [200, 201]):
raise F5ModuleError(resp.content)
return True
def remove_from_device(self):
links = self._prepare_links()
with TransactionContextManager(self.client) as transact:
for link in links:
resp = transact.api.delete(link)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if not (resp.status in [200, 201]):
raise F5ModuleError(resp.content)
return True
def read_current_from_device(self):
final_response = {}
# TransactionContextManager cannot be used for reading, for
# whatever reason
if self.want.key_name:
uri = "https://{0}:{1}/mgmt/tm/sys/file/ssl-key/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.key_filename)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
response['key_checksum'] = response['checksum']
response['key_source_path'] = response['sourcePath']
final_response = merge_two_dicts(final_response, response)
else:
raise F5ModuleError(resp.content)
if self.want.cert_name:
uri = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.cert_filename)
)
query = '?expandSubcollections=true'
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
response['cert_checksum'] = response['checksum']
response['cert_source_path'] = response['sourcePath']
final_response = merge_two_dicts(final_response, response)
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=final_response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
key_name=dict(),
key_content=dict(),
passphrase=dict(
no_log=True
),
cert_name=dict(),
cert_content=dict(),
issuer_cert=dict(),
state=dict(
required=False,
default='present',
choices=['absent', 'present']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| {
"content_hash": "8e107ddd366ec2606b239d0992922fd9",
"timestamp": "",
"source": "github",
"line_count": 793,
"max_line_length": 108,
"avg_line_length": 32.73770491803279,
"alnum_prop": 0.5642694811447941,
"repo_name": "F5Networks/f5-ansible-modules",
"id": "2f27e0ddbc4edcd5296ec7751b6f0fc52f6bffe4",
"size": "26138",
"binary": false,
"copies": "1",
"ref": "refs/heads/doc-update",
"path": "ansible_collections/f5networks/f5_modules/plugins/modules/bigip_ssl_key_cert.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1931"
},
{
"name": "Python",
"bytes": "345682"
}
],
"symlink_target": ""
} |
'''
Management of Pacemaker/Corosync clusters with PCS
==================================================
A state module to manage Pacemaker/Corosync clusters
with the Pacemaker/Corosync configuration system (PCS)
.. versionadded:: 2016.110
:depends: pcs
Walkthrough of a complete PCS cluster setup:
http://clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Clusters_from_Scratch/
Requirements:
PCS is installed, pcs service is started and
the password for the hacluster user is set and known.
Remark on the cibname variable used in the examples:
The use of the cibname variable is optional.
Use it only if you want to deploy your changes into a cibfile first and then push it.
This makes only sense if you want to deploy multiple changes (which require each other) at once to the cluster.
At first the cibfile must be created:
.. code-block:: yaml
mysql_pcs__cib_present_cib_for_galera:
pcs.cib_present:
- cibname: cib_for_galera
- scope: None
- extra_args: None
Then the cibfile can be modified by creating resources (creating only 1 resource for demonstration, see also 7.):
.. code-block:: yaml
mysql_pcs__resource_present_galera:
pcs.resource_present:
- resource_id: galera
- resource_type: "ocf:heartbeat:galera"
- resource_options:
- 'wsrep_cluster_address=gcomm://node1.example.org,node2.example.org,node3.example.org'
- '--master'
- cibname: cib_for_galera
After modifying the cibfile, it can be pushed to the live CIB in the cluster:
.. code-block:: yaml
mysql_pcs__cib_pushed_cib_for_galera:
pcs.cib_pushed:
- cibname: cib_for_galera
- scope: None
- extra_args: None
Create a cluster from scratch:
1. Authorize nodes to each other:
.. code-block:: yaml
pcs_auth__auth:
pcs.auth:
- nodes:
- node1.example.com
- node2.example.com
- pcsuser: hacluster
- pcspasswd: hoonetorg
- extra_args: []
2. Do the initial cluster setup:
.. code-block:: yaml
pcs_setup__setup:
pcs.cluster_setup:
- nodes:
- node1.example.com
- node2.example.com
- pcsclustername: pcscluster
- extra_args:
- '--start'
- '--enable'
3. Optional: Set cluster properties:
.. code-block:: yaml
pcs_properties__prop_has_value_no-quorum-policy:
pcs.prop_has_value:
- prop: no-quorum-policy
- value: ignore
- cibname: cib_for_cluster_settings
4. Optional: Set resource defaults:
.. code-block:: yaml
pcs_properties__resource_defaults_to_resource-stickiness:
pcs.resource_defaults_to:
- default: resource-stickiness
- value: 100
- cibname: cib_for_cluster_settings
5. Optional: Set resource op defaults:
.. code-block:: yaml
pcs_properties__resource_op_defaults_to_monitor-interval:
pcs.resource_op_defaults_to:
- op_default: monitor-interval
- value: 60s
- cibname: cib_for_cluster_settings
6. Configure Fencing (!is often not optional on production ready cluster!):
.. code-block:: yaml
pcs_stonith__created_eps_fence:
pcs.stonith_present:
- stonith_id: eps_fence
- stonith_device_type: fence_eps
- stonith_device_options:
- 'pcmk_host_map=node1.example.org:01;node2.example.org:02'
- 'ipaddr=myepsdevice.example.org'
- 'power_wait=5'
- 'verbose=1'
- 'debug=/var/log/pcsd/eps_fence.log'
- 'login=hidden'
- 'passwd=hoonetorg'
- cibname: cib_for_stonith
7. Add resources to your cluster:
.. code-block:: yaml
mysql_pcs__resource_present_galera:
pcs.resource_present:
- resource_id: galera
- resource_type: "ocf:heartbeat:galera"
- resource_options:
- 'wsrep_cluster_address=gcomm://node1.example.org,node2.example.org,node3.example.org'
- '--master'
- cibname: cib_for_galera
8. Optional: Add constraints (locations, colocations, orders):
.. code-block:: yaml
haproxy_pcs__constraint_present_colocation-vip_galera-haproxy-clone-INFINITY:
pcs.constraint_present:
- constraint_id: colocation-vip_galera-haproxy-clone-INFINITY
- constraint_type: colocation
- constraint_options:
- 'add'
- 'vip_galera'
- 'with'
- 'haproxy-clone'
- cibname: cib_for_haproxy
.. versionadded:: 2016.3.0
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import Python libs
import logging
import os
# Import Salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if pcs package is installed
'''
if salt.utils.path.which('pcs'):
return 'pcs'
return False
def _file_read(path):
'''
Read a file and return content
'''
content = False
if os.path.exists(path):
with salt.utils.files.fopen(path, 'r+') as fp_:
content = salt.utils.stringutils.to_unicode(fp_.read())
fp_.close()
return content
def _file_write(path, content):
'''
Write content to a file
'''
with salt.utils.files.fopen(path, 'w+') as fp_:
fp_.write(salt.utils.stringutils.to_str(content))
fp_.close()
def _get_cibpath():
'''
Get the path to the directory on the minion where CIB's are saved
'''
cibpath = os.path.join(__opts__['cachedir'], 'pcs', __env__)
log.trace('cibpath: %s', cibpath)
return cibpath
def _get_cibfile(cibname):
'''
Get the full path of a cached CIB-file with the name of the CIB
'''
cibfile = os.path.join(_get_cibpath(), '{0}.{1}'.format(cibname, 'cib'))
log.trace('cibfile: %s', cibfile)
return cibfile
def _get_cibfile_tmp(cibname):
'''
Get the full path of a temporary CIB-file with the name of the CIB
'''
cibfile_tmp = '{0}.tmp'.format(_get_cibfile(cibname))
log.trace('cibfile_tmp: %s', cibfile_tmp)
return cibfile_tmp
def _get_cibfile_cksum(cibname):
'''
Get the full path of the file containing a checksum of a CIB-file with the name of the CIB
'''
cibfile_cksum = '{0}.cksum'.format(_get_cibfile(cibname))
log.trace('cibfile_cksum: %s', cibfile_cksum)
return cibfile_cksum
def _item_present(name, item, item_id, item_type, show='show', create='create', extra_args=None, cibname=None):
'''
Ensure that an item is created
name
Irrelevant, not used
item
config, property, resource, constraint etc.
item_id
id of the item
item_type
item type
show
show command (probably None, default: show)
create
create command (create or set f.e., default: create)
extra_args
additional options for the pcs command
cibname
use a cached CIB-file named like cibname instead of the live CIB
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
item_create_required = True
cibfile = None
if isinstance(cibname, six.string_types):
cibfile = _get_cibfile(cibname)
if not isinstance(extra_args, (list, tuple)):
extra_args = []
# split off key and value (item_id contains =)
item_id_key = item_id
item_id_value = None
if '=' in item_id:
item_id_key = item_id.split('=')[0].strip()
item_id_value = item_id.replace(item_id.split('=')[0] + '=', '').strip()
log.trace('item_id_key=%s item_id_value=%s', item_id_key, item_id_value)
# constraints, properties, resource defaults or resource op defaults
# do not support specifying an id on 'show' command
item_id_show = item_id
if item in ['constraint'] or '=' in item_id:
item_id_show = None
is_existing = __salt__['pcs.item_show'](item=item,
item_id=item_id_show,
item_type=item_type,
show=show,
cibfile=cibfile)
log.trace(
'Output of pcs.item_show item=%s item_id=%s item_type=%s cibfile=%s: %s',
item, item_id_show, item_type, cibfile, is_existing
)
# key,value pairs (item_id contains =) - match key and value
if item_id_value is not None:
for line in is_existing['stdout'].splitlines():
if len(line.split(':')) in [2]:
key = line.split(':')[0].strip()
value = line.split(':')[1].strip()
if item_id_key in [key]:
if item_id_value in [value]:
item_create_required = False
# constraints match on '(id:<id>)'
elif item in ['constraint']:
for line in is_existing['stdout'].splitlines():
if '(id:{0})'.format(item_id) in line:
item_create_required = False
# item_id was provided,
# return code 0 indicates, that resource already exists
else:
if is_existing['retcode'] in [0]:
item_create_required = False
if not item_create_required:
ret['comment'] += '{0} {1} ({2}) is already existing\n'.format(
six.text_type(item), six.text_type(item_id), six.text_type(item_type)
)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] += '{0} {1} ({2}) is set to be created\n'.format(
six.text_type(item), six.text_type(item_id), six.text_type(item_type)
)
return ret
item_create = __salt__['pcs.item_create'](
item=item,
item_id=item_id,
item_type=item_type,
create=create,
extra_args=extra_args,
cibfile=cibfile)
log.trace('Output of pcs.item_create: %s', item_create)
if item_create['retcode'] in [0]:
ret['comment'] += 'Created {0} {1} ({2})\n'.format(item, item_id, item_type)
ret['changes'].update({item_id: {'old': '', 'new': six.text_type(item_id)}})
else:
ret['result'] = False
ret['comment'] += 'Failed to create {0} {1} ({2})\n'.format(item, item_id, item_type)
log.trace('ret: %s', ret)
return ret
def auth(name, nodes, pcsuser='hacluster', pcspasswd='hacluster', extra_args=None):
'''
Ensure all nodes are authorized to the cluster
name
Irrelevant, not used (recommended: pcs_auth__auth)
nodes
a list of nodes which should be authorized to the cluster
pcsuser
user for communication with pcs (default: hacluster)
pcspasswd
password for pcsuser (default: hacluster)
extra_args
list of extra args for the \'pcs cluster auth\' command
Example:
.. code-block:: yaml
pcs_auth__auth:
pcs.auth:
- nodes:
- node1.example.com
- node2.example.com
- pcsuser: hacluster
- pcspasswd: hoonetorg
- extra_args: []
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
auth_required = False
authorized = __salt__['pcs.is_auth'](nodes=nodes)
log.trace('Output of pcs.is_auth: %s', authorized)
authorized_dict = {}
for line in authorized['stdout'].splitlines():
node = line.split(':')[0].strip()
auth_state = line.split(':')[1].strip()
if node in nodes:
authorized_dict.update({node: auth_state})
log.trace('authorized_dict: %s', authorized_dict)
for node in nodes:
if node in authorized_dict and authorized_dict[node] == 'Already authorized':
ret['comment'] += 'Node {0} is already authorized\n'.format(node)
else:
auth_required = True
if __opts__['test']:
ret['comment'] += 'Node is set to authorize: {0}\n'.format(node)
if not auth_required:
return ret
if __opts__['test']:
ret['result'] = None
return ret
if not isinstance(extra_args, (list, tuple)):
extra_args = []
if '--force' not in extra_args:
extra_args += ['--force']
authorize = __salt__['pcs.auth'](nodes=nodes, pcsuser=pcsuser, pcspasswd=pcspasswd, extra_args=extra_args)
log.trace('Output of pcs.auth: %s', authorize)
authorize_dict = {}
for line in authorize['stdout'].splitlines():
node = line.split(':')[0].strip()
auth_state = line.split(':')[1].strip()
if node in nodes:
authorize_dict.update({node: auth_state})
log.trace('authorize_dict: %s', authorize_dict)
for node in nodes:
if node in authorize_dict and authorize_dict[node] == 'Authorized':
ret['comment'] += 'Authorized {0}\n'.format(node)
ret['changes'].update({node: {'old': '', 'new': 'Authorized'}})
else:
ret['result'] = False
if node in authorized_dict:
ret['comment'] += 'Authorization check for node {0} returned: {1}\n'.format(node, authorized_dict[node])
if node in authorize_dict:
ret['comment'] += 'Failed to authorize {0} with error {1}\n'.format(node, authorize_dict[node])
return ret
def cluster_setup(name, nodes, pcsclustername='pcscluster', extra_args=None):
'''
Setup Pacemaker cluster on nodes.
Should be run on one cluster node only
(there may be races)
name
Irrelevant, not used (recommended: pcs_setup__setup)
nodes
a list of nodes which should be set up
pcsclustername
Name of the Pacemaker cluster
extra_args
list of extra args for the \'pcs cluster setup\' command
Example:
.. code-block:: yaml
pcs_setup__setup:
pcs.cluster_setup:
- nodes:
- node1.example.com
- node2.example.com
- pcsclustername: pcscluster
- extra_args:
- '--start'
- '--enable'
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
setup_required = False
nodes_firstring = []
for node in nodes:
nodes_firstring += [ node.split(',')[0] ]
log.trace('nodes_firstring %s', nodes_firstring)
config_show = __salt__['pcs.config_show']()
log.trace('Output of pcs.config_show: %s', config_show)
for line in config_show['stdout'].splitlines():
if len(line.split(':')) in [2]:
key = line.split(':')[0].strip()
value = line.split(':')[1].strip()
if key in ['Cluster Name']:
if value in [pcsclustername]:
ret['comment'] += 'Cluster {0} is already set up\n'.format(pcsclustername)
else:
setup_required = True
if __opts__['test']:
ret['comment'] += 'Cluster {0} is set to set up\n'.format(pcsclustername)
if not setup_required:
return ret
if __opts__['test']:
ret['result'] = None
return ret
if not isinstance(extra_args, (list, tuple)):
extra_args = []
setup = __salt__['pcs.cluster_setup'](nodes=nodes, pcsclustername=pcsclustername, extra_args=extra_args)
log.trace('Output of pcs.cluster_setup: %s', setup)
setup_dict = {}
for line in setup['stdout'].splitlines():
log.trace('line: %s', line)
log.trace('line.split(:).len: %s', len(line.split(':')))
if len(line.split(':')) in [2]:
node = line.split(':')[0].strip()
setup_state = line.split(':')[1].strip()
if node in nodes_firstring:
setup_dict.update({node: setup_state})
log.trace('setup_dict: %s', setup_dict)
for node in nodes_firstring:
if node in setup_dict and setup_dict[node] in ['Succeeded', 'Success']:
ret['comment'] += 'Set up {0}\n'.format(node)
ret['changes'].update({node: {'old': '', 'new': 'Setup'}})
else:
ret['result'] = False
ret['comment'] += 'Failed to setup {0}\n'.format(node)
if node in setup_dict:
ret['comment'] += '{0}: setup_dict: {1}\n'.format(node, setup_dict[node])
ret['comment'] += six.text_type(setup)
log.trace('ret: %s', ret)
return ret
def cluster_node_present(name, node, extra_args=None):
'''
Add a node to the Pacemaker cluster via PCS
Should be run on one cluster node only
(there may be races)
Can only be run on a already setup/added node
name
Irrelevant, not used (recommended: pcs_setup__node_add_{{node}})
node
node that should be added
extra_args
list of extra args for the \'pcs cluster node add\' command
Example:
.. code-block:: yaml
pcs_setup__node_add_node1.example.com:
pcs.cluster_node_present:
- node: node1.example.com
- extra_args:
- '--start'
- '--enable'
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
node_add_required = True
node_firstring = node.split(',')[0]
log.trace('node_firstring %s', node_firstring)
current_nodes = []
is_member_cmd = ['pcs', 'status', 'nodes', 'corosync']
is_member = __salt__['cmd.run_all'](is_member_cmd, output_loglevel='trace', python_shell=False)
log.trace('Output of pcs status nodes corosync: %s', is_member)
for line in is_member['stdout'].splitlines():
if len(line.split(':')) in [2]:
key = line.split(':')[0].strip()
value = line.split(':')[1].strip()
if key in ['Offline', 'Online']:
if len(value.split()) > 0:
if node_firstring in value.split():
node_add_required = False
ret['comment'] += 'Node {0} is already member of the cluster\n'.format(node_firstring)
else:
current_nodes += value.split()
if not node_add_required:
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] += 'Node {0} is set to be added to the cluster with as follows: {1} \n'.format(node_firstring, node)
return ret
if not isinstance(extra_args, (list, tuple)):
extra_args = []
node_add = __salt__['pcs.cluster_node_add'](node=node, extra_args=extra_args)
log.trace('Output of pcs.cluster_node_add: %s', node_add)
node_add_dict = {}
for line in node_add['stdout'].splitlines():
log.trace('line: %s', line)
log.trace('line.split(:).len: %s', len(line.split(':')))
if len(line.split(':')) in [2]:
current_node = line.split(':')[0].strip()
current_node_add_state = line.split(':')[1].strip()
if current_node in current_nodes + [node_firstring]:
node_add_dict.update({current_node: current_node_add_state})
log.trace('node_add_dict: %s', node_add_dict)
for current_node in current_nodes:
if current_node in node_add_dict:
if node_add_dict[current_node] not in ['Corosync updated']:
ret['result'] = False
ret['comment'] += 'Failed to update corosync.conf on node {0}\n'.format(current_node)
ret['comment'] += '{0}: node_add_dict: {1}\n'.format(current_node, node_add_dict[current_node])
else:
ret['result'] = False
ret['comment'] += 'Failed to update corosync.conf on node {0}\n'.format(current_node)
if node_firstring in node_add_dict and node_add_dict[node_firstring] in ['Succeeded', 'Success']:
ret['comment'] += 'Added node {0}\n'.format(node_firstring)
ret['changes'].update({node_firstring: {'old': '', 'new': 'Added'}})
else:
ret['result'] = False
ret['comment'] += 'Failed to add node {0}\n'.format(node_firstring)
if node_firstring in node_add_dict:
ret['comment'] += '{0}: node_add_dict: {1}\n'.format(node_firstring, node_add_dict[node_firstring])
ret['comment'] += six.text_type(node_add)
log.trace('ret: %s', ret)
return ret
def cib_present(name, cibname, scope=None, extra_args=None):
'''
Ensure that a CIB-file with the content of the current live CIB is created
Should be run on one cluster node only
(there may be races)
name
Irrelevant, not used (recommended: {{formulaname}}__cib_present_{{cibname}})
cibname
name/path of the file containing the CIB
scope
specific section of the CIB (default:
extra_args
additional options for creating the CIB-file
Example:
.. code-block:: yaml
mysql_pcs__cib_present_cib_for_galera:
pcs.cib_present:
- cibname: cib_for_galera
- scope: None
- extra_args: None
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
cib_hash_form = 'sha256'
cib_create_required = False
cib_cksum_required = False
cib_required = False
cibpath = _get_cibpath()
cibfile = _get_cibfile(cibname)
cibfile_tmp = _get_cibfile_tmp(cibname)
cibfile_cksum = _get_cibfile_cksum(cibname)
if not os.path.exists(cibpath):
os.makedirs(cibpath)
if not isinstance(extra_args, (list, tuple)):
extra_args = []
if os.path.exists(cibfile_tmp):
__salt__['file.remove'](cibfile_tmp)
cib_create = __salt__['pcs.cib_create'](cibfile=cibfile_tmp, scope=scope, extra_args=extra_args)
log.trace('Output of pcs.cib_create: %s', cib_create)
if cib_create['retcode'] not in [0] or not os.path.exists(cibfile_tmp):
ret['result'] = False
ret['comment'] += 'Failed to get live CIB\n'
return ret
cib_hash_live = '{0}:{1}'.format(cib_hash_form, __salt__['file.get_hash'](path=cibfile_tmp, form=cib_hash_form))
log.trace('cib_hash_live: %s', cib_hash_live)
cib_hash_cur = _file_read(path=cibfile_cksum)
if cib_hash_cur not in [cib_hash_live]:
cib_cksum_required = True
log.trace('cib_hash_cur: %s', cib_hash_cur)
if not os.path.exists(cibfile) or not __salt__['file.check_hash'](path=cibfile, file_hash=cib_hash_live):
cib_create_required = True
if cib_cksum_required or cib_create_required:
cib_required = True
if not cib_create_required:
__salt__['file.remove'](cibfile_tmp)
ret['comment'] += 'CIB {0} is already equal to the live CIB\n'.format(cibname)
if not cib_cksum_required:
ret['comment'] += 'CIB {0} checksum is correct\n'.format(cibname)
if not cib_required:
return ret
if __opts__['test']:
__salt__['file.remove'](cibfile_tmp)
ret['result'] = None
if cib_create_required:
ret['comment'] += 'CIB {0} is set to be created/updated\n'.format(cibname)
if cib_cksum_required:
ret['comment'] += 'CIB {0} checksum is set to be created/updated\n'.format(cibname)
return ret
if cib_create_required:
__salt__['file.move'](cibfile_tmp, cibfile)
if __salt__['file.check_hash'](path=cibfile, file_hash=cib_hash_live):
ret['comment'] += 'Created/updated CIB {0}\n'.format(cibname)
ret['changes'].update({'cibfile': cibfile})
else:
ret['result'] = False
ret['comment'] += 'Failed to create/update CIB {0}\n'.format(cibname)
if cib_cksum_required:
_file_write(cibfile_cksum, cib_hash_live)
if _file_read(cibfile_cksum) in [cib_hash_live]:
ret['comment'] += 'Created/updated checksum {0} of CIB {1}\n'.format(cib_hash_live, cibname)
ret['changes'].update({'cibcksum': cib_hash_live})
else:
ret['result'] = False
ret['comment'] += 'Failed to create/update checksum {0} CIB {1}\n'.format(cib_hash_live, cibname)
log.trace('ret: %s', ret)
return ret
def cib_pushed(name, cibname, scope=None, extra_args=None):
'''
Ensure that a CIB-file is pushed if it is changed since the creation of it with pcs.cib_present
Should be run on one cluster node only
(there may be races)
name
Irrelevant, not used (recommended: {{formulaname}}__cib_pushed_{{cibname}})
cibname
name/path of the file containing the CIB
scope
specific section of the CIB
extra_args
additional options for creating the CIB-file
Example:
.. code-block:: yaml
mysql_pcs__cib_pushed_cib_for_galera:
pcs.cib_pushed:
- cibname: cib_for_galera
- scope: None
- extra_args: None
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
cib_hash_form = 'sha256'
cib_push_required = False
cibfile = _get_cibfile(cibname)
cibfile_cksum = _get_cibfile_cksum(cibname)
if not isinstance(extra_args, (list, tuple)):
extra_args = []
if not os.path.exists(cibfile):
ret['result'] = False
ret['comment'] += 'CIB-file {0} does not exist\n'.format(cibfile)
return ret
cib_hash_cibfile = '{0}:{1}'.format(cib_hash_form, __salt__['file.get_hash'](path=cibfile, form=cib_hash_form))
log.trace('cib_hash_cibfile: %s', cib_hash_cibfile)
if _file_read(cibfile_cksum) not in [cib_hash_cibfile]:
cib_push_required = True
if not cib_push_required:
ret['comment'] += 'CIB {0} is not changed since creation through pcs.cib_present\n'.format(cibname)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] += 'CIB {0} is set to be pushed as the new live CIB\n'.format(cibname)
return ret
cib_push = __salt__['pcs.cib_push'](cibfile=cibfile, scope=scope, extra_args=extra_args)
log.trace('Output of pcs.cib_push: %s', cib_push)
if cib_push['retcode'] in [0]:
ret['comment'] += 'Pushed CIB {0}\n'.format(cibname)
ret['changes'].update({'cibfile_pushed': cibfile})
else:
ret['result'] = False
ret['comment'] += 'Failed to push CIB {0}\n'.format(cibname)
log.trace('ret: %s', ret)
return ret
def prop_has_value(name, prop, value, extra_args=None, cibname=None):
'''
Ensure that a property in the cluster is set to a given value
Should be run on one cluster node only
(there may be races)
name
Irrelevant, not used (recommended: pcs_properties__prop_has_value_{{prop}})
prop
name of the property
value
value of the property
extra_args
additional options for the pcs property command
cibname
use a cached CIB-file named like cibname instead of the live CIB
Example:
.. code-block:: yaml
pcs_properties__prop_has_value_no-quorum-policy:
pcs.prop_has_value:
- prop: no-quorum-policy
- value: ignore
- cibname: cib_for_cluster_settings
'''
return _item_present(name=name,
item='property',
item_id='{0}={1}'.format(prop, value),
item_type=None,
create='set',
extra_args=extra_args,
cibname=cibname)
def resource_defaults_to(name, default, value, extra_args=None, cibname=None):
'''
Ensure a resource default in the cluster is set to a given value
Should be run on one cluster node only
(there may be races)
Can only be run on a node with a functional pacemaker/corosync
name
Irrelevant, not used (recommended: pcs_properties__resource_defaults_to_{{default}})
default
name of the default resource property
value
value of the default resource property
extra_args
additional options for the pcs command
cibname
use a cached CIB-file named like cibname instead of the live CIB
Example:
.. code-block:: yaml
pcs_properties__resource_defaults_to_resource-stickiness:
pcs.resource_defaults_to:
- default: resource-stickiness
- value: 100
- cibname: cib_for_cluster_settings
'''
return _item_present(name=name,
item='resource',
item_id='{0}={1}'.format(default, value),
item_type=None,
show='defaults',
create='defaults',
extra_args=extra_args,
cibname=cibname)
def resource_op_defaults_to(name, op_default, value, extra_args=None, cibname=None):
'''
Ensure a resource operation default in the cluster is set to a given value
Should be run on one cluster node only
(there may be races)
Can only be run on a node with a functional pacemaker/corosync
name
Irrelevant, not used (recommended: pcs_properties__resource_op_defaults_to_{{op_default}})
op_default
name of the operation default resource property
value
value of the operation default resource property
extra_args
additional options for the pcs command
cibname
use a cached CIB-file named like cibname instead of the live CIB
Example:
.. code-block:: yaml
pcs_properties__resource_op_defaults_to_monitor-interval:
pcs.resource_op_defaults_to:
- op_default: monitor-interval
- value: 60s
- cibname: cib_for_cluster_settings
'''
return _item_present(name=name,
item='resource',
item_id='{0}={1}'.format(op_default, value),
item_type=None,
show=['op', 'defaults'],
create=['op', 'defaults'],
extra_args=extra_args,
cibname=cibname)
def stonith_present(name, stonith_id, stonith_device_type, stonith_device_options=None, cibname=None):
'''
Ensure that a fencing resource is created
Should be run on one cluster node only
(there may be races)
Can only be run on a node with a functional pacemaker/corosync
name
Irrelevant, not used (recommended: pcs_stonith__created_{{stonith_id}})
stonith_id
name for the stonith resource
stonith_device_type
name of the stonith agent fence_eps, fence_xvm f.e.
stonith_device_options
additional options for creating the stonith resource
cibname
use a cached CIB-file named like cibname instead of the live CIB
Example:
.. code-block:: yaml
pcs_stonith__created_eps_fence:
pcs.stonith_present:
- stonith_id: eps_fence
- stonith_device_type: fence_eps
- stonith_device_options:
- 'pcmk_host_map=node1.example.org:01;node2.example.org:02'
- 'ipaddr=myepsdevice.example.org'
- 'power_wait=5'
- 'verbose=1'
- 'debug=/var/log/pcsd/eps_fence.log'
- 'login=hidden'
- 'passwd=hoonetorg'
- cibname: cib_for_stonith
'''
return _item_present(name=name,
item='stonith',
item_id=stonith_id,
item_type=stonith_device_type,
extra_args=stonith_device_options,
cibname=cibname)
def resource_present(name, resource_id, resource_type, resource_options=None, cibname=None):
'''
Ensure that a resource is created
Should be run on one cluster node only
(there may be races)
Can only be run on a node with a functional pacemaker/corosync
name
Irrelevant, not used (recommended: {{formulaname}}__resource_present_{{resource_id}})
resource_id
name for the resource
resource_type
resource type (f.e. ocf:heartbeat:IPaddr2 or VirtualIP)
resource_options
additional options for creating the resource
cibname
use a cached CIB-file named like cibname instead of the live CIB
Example:
.. code-block:: yaml
mysql_pcs__resource_present_galera:
pcs.resource_present:
- resource_id: galera
- resource_type: "ocf:heartbeat:galera"
- resource_options:
- 'wsrep_cluster_address=gcomm://node1.example.org,node2.example.org,node3.example.org'
- '--master'
- cibname: cib_for_galera
'''
return _item_present(name=name,
item='resource',
item_id=resource_id,
item_type=resource_type,
extra_args=resource_options,
cibname=cibname)
def constraint_present(name, constraint_id, constraint_type, constraint_options=None, cibname=None):
'''
Ensure that a constraint is created
Should be run on one cluster node only
(there may be races)
Can only be run on a node with a functional pacemaker/corosync
name
Irrelevant, not used (recommended: {{formulaname}}__constraint_present_{{constraint_id}})
constraint_id
name for the constraint (try first to create manually to find out the autocreated name)
constraint_type
constraint type (location, colocation, order)
constraint_options
options for creating the constraint
cibname
use a cached CIB-file named like cibname instead of the live CIB
Example:
.. code-block:: yaml
haproxy_pcs__constraint_present_colocation-vip_galera-haproxy-clone-INFINITY:
pcs.constraint_present:
- constraint_id: colocation-vip_galera-haproxy-clone-INFINITY
- constraint_type: colocation
- constraint_options:
- 'add'
- 'vip_galera'
- 'with'
- 'haproxy-clone'
- cibname: cib_for_haproxy
'''
return _item_present(name=name,
item='constraint',
item_id=constraint_id,
item_type=constraint_type,
create=None,
extra_args=constraint_options,
cibname=cibname)
| {
"content_hash": "d9c9973bb04be41d7a3f2a97ff1b6a90",
"timestamp": "",
"source": "github",
"line_count": 1069,
"max_line_length": 123,
"avg_line_length": 33.042095416276894,
"alnum_prop": 0.5685408527263461,
"repo_name": "hoonetorg/salt-pcs-formula",
"id": "9ba81c0f28fee04382d6ffd5545d96e4b4575cdb",
"size": "35346",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "_states/pcs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1188"
},
{
"name": "Python",
"bytes": "48971"
},
{
"name": "SaltStack",
"bytes": "10459"
}
],
"symlink_target": ""
} |
"""DNS Messages"""
import cStringIO
import random
import struct
import sys
import time
import dns.edns
import dns.exception
import dns.flags
import dns.name
import dns.opcode
import dns.entropy
import dns.rcode
import dns.rdata
import dns.rdataclass
import dns.rdatatype
import dns.rrset
import dns.renderer
import dns.tsig
import dns.wiredata
class ShortHeader(dns.exception.FormError):
"""Raised if the DNS packet passed to from_wire() is too short."""
pass
class TrailingJunk(dns.exception.FormError):
"""Raised if the DNS packet passed to from_wire() has extra junk
at the end of it."""
pass
class UnknownHeaderField(dns.exception.DNSException):
"""Raised if a header field name is not recognized when converting from
text into a message."""
pass
class BadEDNS(dns.exception.FormError):
"""Raised if an OPT record occurs somewhere other than the start of
the additional data section."""
pass
class BadTSIG(dns.exception.FormError):
"""Raised if a TSIG record occurs somewhere other than the end of
the additional data section."""
pass
class UnknownTSIGKey(dns.exception.DNSException):
"""Raised if we got a TSIG but don't know the key."""
pass
class Message(object):
"""A DNS message.
@ivar id: The query id; the default is a randomly chosen id.
@type id: int
@ivar flags: The DNS flags of the message. @see: RFC 1035 for an
explanation of these flags.
@type flags: int
@ivar question: The question section.
@type question: list of dns.rrset.RRset objects
@ivar answer: The answer section.
@type answer: list of dns.rrset.RRset objects
@ivar authority: The authority section.
@type authority: list of dns.rrset.RRset objects
@ivar additional: The additional data section.
@type additional: list of dns.rrset.RRset objects
@ivar edns: The EDNS level to use. The default is -1, no Edns.
@type edns: int
@ivar ednsflags: The EDNS flags
@type ednsflags: long
@ivar payload: The EDNS payload size. The default is 0.
@type payload: int
@ivar options: The EDNS options
@type options: list of dns.edns.Option objects
@ivar request_payload: The associated request's EDNS payload size.
@type request_payload: int
@ivar keyring: The TSIG keyring to use. The default is None.
@type keyring: dict
@ivar keyname: The TSIG keyname to use. The default is None.
@type keyname: dns.name.Name object
@ivar keyalgorithm: The TSIG algorithm to use; defaults to
dns.tsig.default_algorithm. Constants for TSIG algorithms are defined
in dns.tsig, and the currently implemented algorithms are
HMAC_MD5, HMAC_SHA1, HMAC_SHA224, HMAC_SHA256, HMAC_SHA384, and
HMAC_SHA512.
@type keyalgorithm: string
@ivar request_mac: The TSIG MAC of the request message associated with
this message; used when validating TSIG signatures. @see: RFC 2845 for
more information on TSIG fields.
@type request_mac: string
@ivar fudge: TSIG time fudge; default is 300 seconds.
@type fudge: int
@ivar original_id: TSIG original id; defaults to the message's id
@type original_id: int
@ivar tsig_error: TSIG error code; default is 0.
@type tsig_error: int
@ivar other_data: TSIG other data.
@type other_data: string
@ivar mac: The TSIG MAC for this message.
@type mac: string
@ivar xfr: Is the message being used to contain the results of a DNS
zone transfer? The default is False.
@type xfr: bool
@ivar origin: The origin of the zone in messages which are used for
zone transfers or for DNS dynamic updates. The default is None.
@type origin: dns.name.Name object
@ivar tsig_ctx: The TSIG signature context associated with this
message. The default is None.
@type tsig_ctx: hmac.HMAC object
@ivar had_tsig: Did the message decoded from wire format have a TSIG
signature?
@type had_tsig: bool
@ivar multi: Is this message part of a multi-message sequence? The
default is false. This variable is used when validating TSIG signatures
on messages which are part of a zone transfer.
@type multi: bool
@ivar first: Is this message standalone, or the first of a multi
message sequence? This variable is used when validating TSIG signatures
on messages which are part of a zone transfer.
@type first: bool
@ivar index: An index of rrsets in the message. The index key is
(section, name, rdclass, rdtype, covers, deleting). Indexing can be
disabled by setting the index to None.
@type index: dict
"""
def __init__(self, id=None):
if id is None:
self.id = dns.entropy.random_16()
else:
self.id = id
self.flags = 0
self.question = []
self.answer = []
self.authority = []
self.additional = []
self.edns = -1
self.ednsflags = 0
self.payload = 0
self.options = []
self.request_payload = 0
self.keyring = None
self.keyname = None
self.keyalgorithm = dns.tsig.default_algorithm
self.request_mac = ''
self.other_data = ''
self.tsig_error = 0
self.fudge = 300
self.original_id = self.id
self.mac = ''
self.xfr = False
self.origin = None
self.tsig_ctx = None
self.had_tsig = False
self.multi = False
self.first = True
self.index = {}
def __repr__(self):
return '<DNS message, ID ' + `self.id` + '>'
def __str__(self):
return self.to_text()
def to_text(self, origin=None, relativize=True, **kw):
"""Convert the message to text.
The I{origin}, I{relativize}, and any other keyword
arguments are passed to the rrset to_wire() method.
@rtype: string
"""
s = cStringIO.StringIO()
print >> s, 'id %d' % self.id
print >> s, 'opcode %s' % \
dns.opcode.to_text(dns.opcode.from_flags(self.flags))
rc = dns.rcode.from_flags(self.flags, self.ednsflags)
print >> s, 'rcode %s' % dns.rcode.to_text(rc)
print >> s, 'flags %s' % dns.flags.to_text(self.flags)
if self.edns >= 0:
print >> s, 'edns %s' % self.edns
if self.ednsflags != 0:
print >> s, 'eflags %s' % \
dns.flags.edns_to_text(self.ednsflags)
print >> s, 'payload', self.payload
is_update = dns.opcode.is_update(self.flags)
if is_update:
print >> s, ';ZONE'
else:
print >> s, ';QUESTION'
for rrset in self.question:
print >> s, rrset.to_text(origin, relativize, **kw)
if is_update:
print >> s, ';PREREQ'
else:
print >> s, ';ANSWER'
for rrset in self.answer:
print >> s, rrset.to_text(origin, relativize, **kw)
if is_update:
print >> s, ';UPDATE'
else:
print >> s, ';AUTHORITY'
for rrset in self.authority:
print >> s, rrset.to_text(origin, relativize, **kw)
print >> s, ';ADDITIONAL'
for rrset in self.additional:
print >> s, rrset.to_text(origin, relativize, **kw)
#
# We strip off the final \n so the caller can print the result without
# doing weird things to get around eccentricities in Python print
# formatting
#
return s.getvalue()[:-1]
def __eq__(self, other):
"""Two messages are equal if they have the same content in the
header, question, answer, and authority sections.
@rtype: bool"""
if not isinstance(other, Message):
return False
if self.id != other.id:
return False
if self.flags != other.flags:
return False
for n in self.question:
if n not in other.question:
return False
for n in other.question:
if n not in self.question:
return False
for n in self.answer:
if n not in other.answer:
return False
for n in other.answer:
if n not in self.answer:
return False
for n in self.authority:
if n not in other.authority:
return False
for n in other.authority:
if n not in self.authority:
return False
return True
def __ne__(self, other):
"""Are two messages not equal?
@rtype: bool"""
return not self.__eq__(other)
def is_response(self, other):
"""Is other a response to self?
@rtype: bool"""
if other.flags & dns.flags.QR == 0 or \
self.id != other.id or \
dns.opcode.from_flags(self.flags) != \
dns.opcode.from_flags(other.flags):
return False
if dns.rcode.from_flags(other.flags, other.ednsflags) != \
dns.rcode.NOERROR:
return True
if dns.opcode.is_update(self.flags):
return True
for n in self.question:
if n not in other.question:
return False
for n in other.question:
if n not in self.question:
return False
return True
def section_number(self, section):
if section is self.question:
return 0
elif section is self.answer:
return 1
elif section is self.authority:
return 2
elif section is self.additional:
return 3
else:
raise ValueError('unknown section')
def find_rrset(self, section, name, rdclass, rdtype,
covers=dns.rdatatype.NONE, deleting=None, create=False,
force_unique=False):
"""Find the RRset with the given attributes in the specified section.
@param section: the section of the message to look in, e.g.
self.answer.
@type section: list of dns.rrset.RRset objects
@param name: the name of the RRset
@type name: dns.name.Name object
@param rdclass: the class of the RRset
@type rdclass: int
@param rdtype: the type of the RRset
@type rdtype: int
@param covers: the covers value of the RRset
@type covers: int
@param deleting: the deleting value of the RRset
@type deleting: int
@param create: If True, create the RRset if it is not found.
The created RRset is appended to I{section}.
@type create: bool
@param force_unique: If True and create is also True, create a
new RRset regardless of whether a matching RRset exists already.
@type force_unique: bool
@raises KeyError: the RRset was not found and create was False
@rtype: dns.rrset.RRset object"""
key = (self.section_number(section),
name, rdclass, rdtype, covers, deleting)
if not force_unique:
if not self.index is None:
rrset = self.index.get(key)
if not rrset is None:
return rrset
else:
for rrset in section:
if rrset.match(name, rdclass, rdtype, covers, deleting):
return rrset
if not create:
raise KeyError
rrset = dns.rrset.RRset(name, rdclass, rdtype, covers, deleting)
section.append(rrset)
if not self.index is None:
self.index[key] = rrset
return rrset
def get_rrset(self, section, name, rdclass, rdtype,
covers=dns.rdatatype.NONE, deleting=None, create=False,
force_unique=False):
"""Get the RRset with the given attributes in the specified section.
If the RRset is not found, None is returned.
@param section: the section of the message to look in, e.g.
self.answer.
@type section: list of dns.rrset.RRset objects
@param name: the name of the RRset
@type name: dns.name.Name object
@param rdclass: the class of the RRset
@type rdclass: int
@param rdtype: the type of the RRset
@type rdtype: int
@param covers: the covers value of the RRset
@type covers: int
@param deleting: the deleting value of the RRset
@type deleting: int
@param create: If True, create the RRset if it is not found.
The created RRset is appended to I{section}.
@type create: bool
@param force_unique: If True and create is also True, create a
new RRset regardless of whether a matching RRset exists already.
@type force_unique: bool
@rtype: dns.rrset.RRset object or None"""
try:
rrset = self.find_rrset(section, name, rdclass, rdtype, covers,
deleting, create, force_unique)
except KeyError:
rrset = None
return rrset
def to_wire(self, origin=None, max_size=0, **kw):
"""Return a string containing the message in DNS compressed wire
format.
Additional keyword arguments are passed to the rrset to_wire()
method.
@param origin: The origin to be appended to any relative names.
@type origin: dns.name.Name object
@param max_size: The maximum size of the wire format output; default
is 0, which means 'the message's request payload, if nonzero, or
65536'.
@type max_size: int
@raises dns.exception.TooBig: max_size was exceeded
@rtype: string
"""
if max_size == 0:
if self.request_payload != 0:
max_size = self.request_payload
else:
max_size = 65535
if max_size < 512:
max_size = 512
elif max_size > 65535:
max_size = 65535
r = dns.renderer.Renderer(self.id, self.flags, max_size, origin)
for rrset in self.question:
r.add_question(rrset.name, rrset.rdtype, rrset.rdclass)
for rrset in self.answer:
r.add_rrset(dns.renderer.ANSWER, rrset, **kw)
for rrset in self.authority:
r.add_rrset(dns.renderer.AUTHORITY, rrset, **kw)
if self.edns >= 0:
r.add_edns(self.edns, self.ednsflags, self.payload, self.options)
for rrset in self.additional:
r.add_rrset(dns.renderer.ADDITIONAL, rrset, **kw)
r.write_header()
if not self.keyname is None:
r.add_tsig(self.keyname, self.keyring[self.keyname],
self.fudge, self.original_id, self.tsig_error,
self.other_data, self.request_mac,
self.keyalgorithm)
self.mac = r.mac
return r.get_wire()
def use_tsig(self, keyring, keyname=None, fudge=300,
original_id=None, tsig_error=0, other_data='',
algorithm=dns.tsig.default_algorithm):
"""When sending, a TSIG signature using the specified keyring
and keyname should be added.
@param keyring: The TSIG keyring to use; defaults to None.
@type keyring: dict
@param keyname: The name of the TSIG key to use; defaults to None.
The key must be defined in the keyring. If a keyring is specified
but a keyname is not, then the key used will be the first key in the
keyring. Note that the order of keys in a dictionary is not defined,
so applications should supply a keyname when a keyring is used, unless
they know the keyring contains only one key.
@type keyname: dns.name.Name or string
@param fudge: TSIG time fudge; default is 300 seconds.
@type fudge: int
@param original_id: TSIG original id; defaults to the message's id
@type original_id: int
@param tsig_error: TSIG error code; default is 0.
@type tsig_error: int
@param other_data: TSIG other data.
@type other_data: string
@param algorithm: The TSIG algorithm to use; defaults to
dns.tsig.default_algorithm
"""
self.keyring = keyring
if keyname is None:
self.keyname = self.keyring.keys()[0]
else:
if isinstance(keyname, (str, unicode)):
keyname = dns.name.from_text(keyname)
self.keyname = keyname
self.keyalgorithm = algorithm
self.fudge = fudge
if original_id is None:
self.original_id = self.id
else:
self.original_id = original_id
self.tsig_error = tsig_error
self.other_data = other_data
def use_edns(self, edns=0, ednsflags=0, payload=1280, request_payload=None, options=None):
"""Configure EDNS behavior.
@param edns: The EDNS level to use. Specifying None, False, or -1
means 'do not use EDNS', and in this case the other parameters are
ignored. Specifying True is equivalent to specifying 0, i.e. 'use
EDNS0'.
@type edns: int or bool or None
@param ednsflags: EDNS flag values.
@type ednsflags: int
@param payload: The EDNS sender's payload field, which is the maximum
size of UDP datagram the sender can handle.
@type payload: int
@param request_payload: The EDNS payload size to use when sending
this message. If not specified, defaults to the value of payload.
@type request_payload: int or None
@param options: The EDNS options
@type options: None or list of dns.edns.Option objects
@see: RFC 2671
"""
if edns is None or edns is False:
edns = -1
if edns is True:
edns = 0
if request_payload is None:
request_payload = payload
if edns < 0:
ednsflags = 0
payload = 0
request_payload = 0
options = []
else:
# make sure the EDNS version in ednsflags agrees with edns
ednsflags &= 0xFF00FFFFL
ednsflags |= (edns << 16)
if options is None:
options = []
self.edns = edns
self.ednsflags = ednsflags
self.payload = payload
self.options = options
self.request_payload = request_payload
def want_dnssec(self, wanted=True):
"""Enable or disable 'DNSSEC desired' flag in requests.
@param wanted: Is DNSSEC desired? If True, EDNS is enabled if
required, and then the DO bit is set. If False, the DO bit is
cleared if EDNS is enabled.
@type wanted: bool
"""
if wanted:
if self.edns < 0:
self.use_edns()
self.ednsflags |= dns.flags.DO
elif self.edns >= 0:
self.ednsflags &= ~dns.flags.DO
def rcode(self):
"""Return the rcode.
@rtype: int
"""
return dns.rcode.from_flags(self.flags, self.ednsflags)
def set_rcode(self, rcode):
"""Set the rcode.
@param rcode: the rcode
@type rcode: int
"""
(value, evalue) = dns.rcode.to_flags(rcode)
self.flags &= 0xFFF0
self.flags |= value
self.ednsflags &= 0x00FFFFFFL
self.ednsflags |= evalue
if self.ednsflags != 0 and self.edns < 0:
self.edns = 0
def opcode(self):
"""Return the opcode.
@rtype: int
"""
return dns.opcode.from_flags(self.flags)
def set_opcode(self, opcode):
"""Set the opcode.
@param opcode: the opcode
@type opcode: int
"""
self.flags &= 0x87FF
self.flags |= dns.opcode.to_flags(opcode)
class _WireReader(object):
"""Wire format reader.
@ivar wire: the wire-format message.
@type wire: string
@ivar message: The message object being built
@type message: dns.message.Message object
@ivar current: When building a message object from wire format, this
variable contains the offset from the beginning of wire of the next octet
to be read.
@type current: int
@ivar updating: Is the message a dynamic update?
@type updating: bool
@ivar one_rr_per_rrset: Put each RR into its own RRset?
@type one_rr_per_rrset: bool
@ivar ignore_trailing: Ignore trailing junk at end of request?
@type ignore_trailing: bool
@ivar zone_rdclass: The class of the zone in messages which are
DNS dynamic updates.
@type zone_rdclass: int
"""
def __init__(self, wire, message, question_only=False,
one_rr_per_rrset=False, ignore_trailing=False):
self.wire = dns.wiredata.maybe_wrap(wire)
self.message = message
self.current = 0
self.updating = False
self.zone_rdclass = dns.rdataclass.IN
self.question_only = question_only
self.one_rr_per_rrset = one_rr_per_rrset
self.ignore_trailing = ignore_trailing
def _get_question(self, qcount):
"""Read the next I{qcount} records from the wire data and add them to
the question section.
@param qcount: the number of questions in the message
@type qcount: int"""
if self.updating and qcount > 1:
raise dns.exception.FormError
for i in xrange(0, qcount):
(qname, used) = dns.name.from_wire(self.wire, self.current)
if not self.message.origin is None:
qname = qname.relativize(self.message.origin)
self.current = self.current + used
(rdtype, rdclass) = \
struct.unpack('!HH',
self.wire[self.current:self.current + 4])
self.current = self.current + 4
self.message.find_rrset(self.message.question, qname,
rdclass, rdtype, create=True,
force_unique=True)
if self.updating:
self.zone_rdclass = rdclass
def _get_section(self, section, count):
"""Read the next I{count} records from the wire data and add them to
the specified section.
@param section: the section of the message to which to add records
@type section: list of dns.rrset.RRset objects
@param count: the number of records to read
@type count: int"""
if self.updating or self.one_rr_per_rrset:
force_unique = True
else:
force_unique = False
seen_opt = False
for i in xrange(0, count):
rr_start = self.current
(name, used) = dns.name.from_wire(self.wire, self.current)
absolute_name = name
if not self.message.origin is None:
name = name.relativize(self.message.origin)
self.current = self.current + used
(rdtype, rdclass, ttl, rdlen) = \
struct.unpack('!HHIH',
self.wire[self.current:self.current + 10])
self.current = self.current + 10
if rdtype == dns.rdatatype.OPT:
if not section is self.message.additional or seen_opt:
raise BadEDNS
self.message.payload = rdclass
self.message.ednsflags = ttl
self.message.edns = (ttl & 0xff0000) >> 16
self.message.options = []
current = self.current
optslen = rdlen
while optslen > 0:
(otype, olen) = \
struct.unpack('!HH',
self.wire[current:current + 4])
current = current + 4
opt = dns.edns.option_from_wire(otype, self.wire, current, olen)
self.message.options.append(opt)
current = current + olen
optslen = optslen - 4 - olen
seen_opt = True
elif rdtype == dns.rdatatype.TSIG:
if not (section is self.message.additional and
i == (count - 1)):
raise BadTSIG
if self.message.keyring is None:
raise UnknownTSIGKey('got signed message without keyring')
secret = self.message.keyring.get(absolute_name)
if secret is None:
raise UnknownTSIGKey("key '%s' unknown" % name)
self.message.keyname = absolute_name
(self.message.keyalgorithm, self.message.mac) = \
dns.tsig.get_algorithm_and_mac(self.wire, self.current,
rdlen)
self.message.tsig_ctx = \
dns.tsig.validate(self.wire,
absolute_name,
secret,
int(time.time()),
self.message.request_mac,
rr_start,
self.current,
rdlen,
self.message.tsig_ctx,
self.message.multi,
self.message.first)
self.message.had_tsig = True
else:
if ttl < 0:
ttl = 0
if self.updating and \
(rdclass == dns.rdataclass.ANY or
rdclass == dns.rdataclass.NONE):
deleting = rdclass
rdclass = self.zone_rdclass
else:
deleting = None
if deleting == dns.rdataclass.ANY or \
(deleting == dns.rdataclass.NONE and \
section is self.message.answer):
covers = dns.rdatatype.NONE
rd = None
else:
rd = dns.rdata.from_wire(rdclass, rdtype, self.wire,
self.current, rdlen,
self.message.origin)
covers = rd.covers()
if self.message.xfr and rdtype == dns.rdatatype.SOA:
force_unique = True
rrset = self.message.find_rrset(section, name,
rdclass, rdtype, covers,
deleting, True, force_unique)
if not rd is None:
rrset.add(rd, ttl)
self.current = self.current + rdlen
def read(self):
"""Read a wire format DNS message and build a dns.message.Message
object."""
l = len(self.wire)
if l < 12:
raise ShortHeader
(self.message.id, self.message.flags, qcount, ancount,
aucount, adcount) = struct.unpack('!HHHHHH', self.wire[:12])
self.current = 12
if dns.opcode.is_update(self.message.flags):
self.updating = True
self._get_question(qcount)
if self.question_only:
return
self._get_section(self.message.answer, ancount)
self._get_section(self.message.authority, aucount)
self._get_section(self.message.additional, adcount)
if not self.ignore_trailing and self.current != l:
raise TrailingJunk
if self.message.multi and self.message.tsig_ctx and \
not self.message.had_tsig:
self.message.tsig_ctx.update(self.wire)
def from_wire(wire, keyring=None, request_mac='', xfr=False, origin=None,
tsig_ctx = None, multi = False, first = True,
question_only = False, one_rr_per_rrset = False,
ignore_trailing = False):
"""Convert a DNS wire format message into a message
object.
@param keyring: The keyring to use if the message is signed.
@type keyring: dict
@param request_mac: If the message is a response to a TSIG-signed request,
I{request_mac} should be set to the MAC of that request.
@type request_mac: string
@param xfr: Is this message part of a zone transfer?
@type xfr: bool
@param origin: If the message is part of a zone transfer, I{origin}
should be the origin name of the zone.
@type origin: dns.name.Name object
@param tsig_ctx: The ongoing TSIG context, used when validating zone
transfers.
@type tsig_ctx: hmac.HMAC object
@param multi: Is this message part of a multiple message sequence?
@type multi: bool
@param first: Is this message standalone, or the first of a multi
message sequence?
@type first: bool
@param question_only: Read only up to the end of the question section?
@type question_only: bool
@param one_rr_per_rrset: Put each RR into its own RRset
@type one_rr_per_rrset: bool
@param ignore_trailing: Ignore trailing junk at end of request?
@type ignore_trailing: bool
@raises ShortHeader: The message is less than 12 octets long.
@raises TrailingJunk: There were octets in the message past the end
of the proper DNS message.
@raises BadEDNS: An OPT record was in the wrong section, or occurred more
than once.
@raises BadTSIG: A TSIG record was not the last record of the additional
data section.
@rtype: dns.message.Message object"""
m = Message(id=0)
m.keyring = keyring
m.request_mac = request_mac
m.xfr = xfr
m.origin = origin
m.tsig_ctx = tsig_ctx
m.multi = multi
m.first = first
reader = _WireReader(wire, m, question_only, one_rr_per_rrset,
ignore_trailing)
reader.read()
return m
class _TextReader(object):
"""Text format reader.
@ivar tok: the tokenizer
@type tok: dns.tokenizer.Tokenizer object
@ivar message: The message object being built
@type message: dns.message.Message object
@ivar updating: Is the message a dynamic update?
@type updating: bool
@ivar zone_rdclass: The class of the zone in messages which are
DNS dynamic updates.
@type zone_rdclass: int
@ivar last_name: The most recently read name when building a message object
from text format.
@type last_name: dns.name.Name object
"""
def __init__(self, text, message):
self.message = message
self.tok = dns.tokenizer.Tokenizer(text)
self.last_name = None
self.zone_rdclass = dns.rdataclass.IN
self.updating = False
def _header_line(self, section):
"""Process one line from the text format header section."""
token = self.tok.get()
what = token.value
if what == 'id':
self.message.id = self.tok.get_int()
elif what == 'flags':
while True:
token = self.tok.get()
if not token.is_identifier():
self.tok.unget(token)
break
self.message.flags = self.message.flags | \
dns.flags.from_text(token.value)
if dns.opcode.is_update(self.message.flags):
self.updating = True
elif what == 'edns':
self.message.edns = self.tok.get_int()
self.message.ednsflags = self.message.ednsflags | \
(self.message.edns << 16)
elif what == 'eflags':
if self.message.edns < 0:
self.message.edns = 0
while True:
token = self.tok.get()
if not token.is_identifier():
self.tok.unget(token)
break
self.message.ednsflags = self.message.ednsflags | \
dns.flags.edns_from_text(token.value)
elif what == 'payload':
self.message.payload = self.tok.get_int()
if self.message.edns < 0:
self.message.edns = 0
elif what == 'opcode':
text = self.tok.get_string()
self.message.flags = self.message.flags | \
dns.opcode.to_flags(dns.opcode.from_text(text))
elif what == 'rcode':
text = self.tok.get_string()
self.message.set_rcode(dns.rcode.from_text(text))
else:
raise UnknownHeaderField
self.tok.get_eol()
def _question_line(self, section):
"""Process one line from the text format question section."""
token = self.tok.get(want_leading = True)
if not token.is_whitespace():
self.last_name = dns.name.from_text(token.value, None)
name = self.last_name
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except:
rdclass = dns.rdataclass.IN
# Type
rdtype = dns.rdatatype.from_text(token.value)
self.message.find_rrset(self.message.question, name,
rdclass, rdtype, create=True,
force_unique=True)
if self.updating:
self.zone_rdclass = rdclass
self.tok.get_eol()
def _rr_line(self, section):
"""Process one line from the text format answer, authority, or
additional data sections.
"""
deleting = None
# Name
token = self.tok.get(want_leading = True)
if not token.is_whitespace():
self.last_name = dns.name.from_text(token.value, None)
name = self.last_name
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
# TTL
try:
ttl = int(token.value, 0)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except:
ttl = 0
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
if rdclass == dns.rdataclass.ANY or rdclass == dns.rdataclass.NONE:
deleting = rdclass
rdclass = self.zone_rdclass
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except:
rdclass = dns.rdataclass.IN
# Type
rdtype = dns.rdatatype.from_text(token.value)
token = self.tok.get()
if not token.is_eol_or_eof():
self.tok.unget(token)
rd = dns.rdata.from_text(rdclass, rdtype, self.tok, None)
covers = rd.covers()
else:
rd = None
covers = dns.rdatatype.NONE
rrset = self.message.find_rrset(section, name,
rdclass, rdtype, covers,
deleting, True, self.updating)
if not rd is None:
rrset.add(rd, ttl)
def read(self):
"""Read a text format DNS message and build a dns.message.Message
object."""
line_method = self._header_line
section = None
while 1:
token = self.tok.get(True, True)
if token.is_eol_or_eof():
break
if token.is_comment():
u = token.value.upper()
if u == 'HEADER':
line_method = self._header_line
elif u == 'QUESTION' or u == 'ZONE':
line_method = self._question_line
section = self.message.question
elif u == 'ANSWER' or u == 'PREREQ':
line_method = self._rr_line
section = self.message.answer
elif u == 'AUTHORITY' or u == 'UPDATE':
line_method = self._rr_line
section = self.message.authority
elif u == 'ADDITIONAL':
line_method = self._rr_line
section = self.message.additional
self.tok.get_eol()
continue
self.tok.unget(token)
line_method(section)
def from_text(text):
"""Convert the text format message into a message object.
@param text: The text format message.
@type text: string
@raises UnknownHeaderField:
@raises dns.exception.SyntaxError:
@rtype: dns.message.Message object"""
# 'text' can also be a file, but we don't publish that fact
# since it's an implementation detail. The official file
# interface is from_file().
m = Message()
reader = _TextReader(text, m)
reader.read()
return m
def from_file(f):
"""Read the next text format message from the specified file.
@param f: file or string. If I{f} is a string, it is treated
as the name of a file to open.
@raises UnknownHeaderField:
@raises dns.exception.SyntaxError:
@rtype: dns.message.Message object"""
if sys.hexversion >= 0x02030000:
# allow Unicode filenames; turn on universal newline support
str_type = basestring
opts = 'rU'
else:
str_type = str
opts = 'r'
if isinstance(f, str_type):
f = file(f, opts)
want_close = True
else:
want_close = False
try:
m = from_text(f)
finally:
if want_close:
f.close()
return m
def make_query(qname, rdtype, rdclass = dns.rdataclass.IN, use_edns=None,
want_dnssec=False, ednsflags=0, payload=1280,
request_payload=None, options=None):
"""Make a query message.
The query name, type, and class may all be specified either
as objects of the appropriate type, or as strings.
The query will have a randomly choosen query id, and its DNS flags
will be set to dns.flags.RD.
@param qname: The query name.
@type qname: dns.name.Name object or string
@param rdtype: The desired rdata type.
@type rdtype: int
@param rdclass: The desired rdata class; the default is class IN.
@type rdclass: int
@param use_edns: The EDNS level to use; the default is None (no EDNS).
See the description of dns.message.Message.use_edns() for the possible
values for use_edns and their meanings.
@type use_edns: int or bool or None
@param want_dnssec: Should the query indicate that DNSSEC is desired?
@type want_dnssec: bool
@param ednsflags: EDNS flag values.
@type ednsflags: int
@param payload: The EDNS sender's payload field, which is the maximum
size of UDP datagram the sender can handle.
@type payload: int
@param request_payload: The EDNS payload size to use when sending
this message. If not specified, defaults to the value of payload.
@type request_payload: int or None
@param options: The EDNS options
@type options: None or list of dns.edns.Option objects
@see: RFC 2671
@rtype: dns.message.Message object"""
if isinstance(qname, (str, unicode)):
qname = dns.name.from_text(qname)
if isinstance(rdtype, (str, unicode)):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(rdclass, (str, unicode)):
rdclass = dns.rdataclass.from_text(rdclass)
m = Message()
m.flags |= dns.flags.RD
m.find_rrset(m.question, qname, rdclass, rdtype, create=True,
force_unique=True)
m.use_edns(use_edns, ednsflags, payload, request_payload, options)
m.want_dnssec(want_dnssec)
return m
def make_response(query, recursion_available=False, our_payload=8192,
fudge=300):
"""Make a message which is a response for the specified query.
The message returned is really a response skeleton; it has all
of the infrastructure required of a response, but none of the
content.
The response's question section is a shallow copy of the query's
question section, so the query's question RRsets should not be
changed.
@param query: the query to respond to
@type query: dns.message.Message object
@param recursion_available: should RA be set in the response?
@type recursion_available: bool
@param our_payload: payload size to advertise in EDNS responses; default
is 8192.
@type our_payload: int
@param fudge: TSIG time fudge; default is 300 seconds.
@type fudge: int
@rtype: dns.message.Message object"""
if query.flags & dns.flags.QR:
raise dns.exception.FormError('specified query message is not a query')
response = dns.message.Message(query.id)
response.flags = dns.flags.QR | (query.flags & dns.flags.RD)
if recursion_available:
response.flags |= dns.flags.RA
response.set_opcode(query.opcode())
response.question = list(query.question)
if query.edns >= 0:
response.use_edns(0, 0, our_payload, query.payload)
if query.had_tsig:
response.use_tsig(query.keyring, query.keyname, fudge, None, 0, '',
query.keyalgorithm)
response.request_mac = query.mac
return response
| {
"content_hash": "acf94ad9adb290ed55811be9ff5a031a",
"timestamp": "",
"source": "github",
"line_count": 1099,
"max_line_length": 94,
"avg_line_length": 38.06915377616014,
"alnum_prop": 0.5770113294134519,
"repo_name": "rsreese/namebench",
"id": "e523931481bc600cbceaf1df82f034565274e73a",
"size": "42625",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "nb_third_party/dns/message.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3926"
},
{
"name": "Objective-C",
"bytes": "1819"
},
{
"name": "Python",
"bytes": "210663"
}
],
"symlink_target": ""
} |
import os
PROJECT_ROOT = os.path.join(os.path.dirname(__file__))
PROJECT_ROOT = os.path.normpath(os.path.abspath(PROJECT_ROOT))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'sqlite3.db')
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.3/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'xfh7)zld#c5&%h8!p7h6gn&ndz2u(%dhw^)lpg$-9+j3lz$%%k'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'test_project.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'banzai',
'south',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| {
"content_hash": "d2d9deadeef3e33fcafc3c58b606cab0",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 88,
"avg_line_length": 32.36666666666667,
"alnum_prop": 0.7056642636457261,
"repo_name": "saippuakauppias/django-banzai",
"id": "3355a7b4b048b70f49783512b33247a8a7007093",
"size": "4899",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test_project/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32545"
}
],
"symlink_target": ""
} |
from keras.layers import Dense, merge
#Makes Dense connections to a series of previous outputs
#Can be used for making connections to all previous layers
#Eg http://arxiv.org/abs/1608.06993 but for Dense networks
#Avoids the need to concat inputs by product then sum
def make_densedense(output_dim, inputs):
out_arr = []
for layer in inputs:
out_dense = Dense(output_dim)(layer)
out_arr.append(out_dense)
if len(out_arr) == 1:
return out_arr[0]
else:
return merge(out_arr, mode='sum') | {
"content_hash": "ce21f519664dd1c4f88db16fff51dcef",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 58,
"avg_line_length": 33.375,
"alnum_prop": 0.6891385767790262,
"repo_name": "kuza55/keras-extras",
"id": "933a925373efd835eff1e5821b2b46d24a0df08b",
"size": "534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "layers/layer_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7086"
}
],
"symlink_target": ""
} |
"""
Django settings for doughuware project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xb+%x5y0h2h#uz6gq20%q@w8emxrou(0k-%%4w5c1=^27f+mq4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'core',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'doughuware.urls'
WSGI_APPLICATION = 'doughuware.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
| {
"content_hash": "cb93d2bc7f7988380562dc3d8f9c8e5a",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 71,
"avg_line_length": 25.89622641509434,
"alnum_prop": 0.6881602914389799,
"repo_name": "JDougherty/doughuware",
"id": "2924dc819ec4c6dc7d09f18d41260bfbfff116ea",
"size": "2745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doughuware/doughuware/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1004"
},
{
"name": "HTML",
"bytes": "4161"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "7031"
}
],
"symlink_target": ""
} |
class TargetPool(GCPResource):
'''Object to represent a gcp targetPool'''
resource_type = "compute.v1.targetPool"
# pylint: disable=too-many-arguments
def __init__(self,
rname,
project,
zone,
desc,
region,
health_checks=None, #pd-ssd, local-ssd
instances=None,
session_affinity=None,
):
'''constructor for gcp resource'''
super(TargetPool, self).__init__(rname,
TargetPool.resource_type,
project,
zone)
self._region = region
self._desc = desc
self._session_affinity = session_affinity
self._instances = instances
self._health_checks = health_checks
self._instance_refs = None
self._health_checks_refs = None
@property
def description(self):
'''property for resource description'''
return self._desc
@property
def region(self):
'''property for resource region'''
return self._region
@property
def session_affinity(self):
'''property for resource session_affinity'''
return self._session_affinity
@property
def instances(self):
'''property for resource instances'''
return self._instances
@property
def health_checks(self):
'''property for resource health_checks'''
return self._health_checks
@property
def instance_refs(self):
'''property for resource instance references type'''
if self._instance_refs == None:
self._instance_refs = ['$(ref.%s.selfLink)' % inst for inst in self.instances]
return self._instance_refs
@property
def health_checks_refs(self):
'''property for resource health_checks'''
if self._health_checks_refs == None:
self._health_checks_refs = ['$(ref.%s.selfLink)' % check for check in self.health_checks]
return self._health_checks_refs
def to_resource(self):
""" return the resource representation"""
return {'name': self.name,
'type': TargetPool.resource_type,
'properties': {'description': self.description,
'healthChecks': self.health_checks_refs,
'instances': self.instance_refs,
'sessionAffinity': 'NONE',
'region': self.region,
}
}
| {
"content_hash": "a9441d3080e59f2a9964841e08d4b71b",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 101,
"avg_line_length": 32.5,
"alnum_prop": 0.5223264540337711,
"repo_name": "drewandersonnz/openshift-tools",
"id": "2e10c298561c8f67f8979e4178d1bf6e2e9ccace",
"size": "2733",
"binary": false,
"copies": "13",
"ref": "refs/heads/prod",
"path": "ansible/roles/lib_gcloud/build/lib/target_pool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24919"
},
{
"name": "Dockerfile",
"bytes": "10248"
},
{
"name": "Go",
"bytes": "127388"
},
{
"name": "Groovy",
"bytes": "6322"
},
{
"name": "HTML",
"bytes": "67678"
},
{
"name": "JavaScript",
"bytes": "9573"
},
{
"name": "Makefile",
"bytes": "1108"
},
{
"name": "PHP",
"bytes": "30017"
},
{
"name": "Python",
"bytes": "19774421"
},
{
"name": "Shell",
"bytes": "553874"
}
],
"symlink_target": ""
} |
import gi
try:
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
gi.require_version('GdkPixbuf', '2.0')
except ValueError as e:
print(e)
exit(1)
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GdkPixbuf
import os
DEFAULT_CURSOR = Gdk.Cursor(Gdk.CursorType.ARROW)
WAIT_CURSOR = Gdk.Cursor(Gdk.CursorType.WATCH)
def load_css(css_filename):
with open(css_filename, 'r') as css_file:
css_code = css_file.read()
style_provider = Gtk.CssProvider()
style_provider.load_from_data(css_code.encode())
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_USER)
def load_image(filename, size=24):
if os.path.exists(filename):
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(filename, size, size)
return Gtk.Image.new_from_pixbuf(pixbuf)
return None
def redondea(valor):
valor = valor * 10.0
return int(valor) / 10.0
def redondea_digits(valor, digits=0):
if digits == 0:
return int(round(valor, digits))
return round(valor, digits)
def s2f(cadena):
try:
value = float(cadena)
except BaseException:
value = 0.0
return value
def s2f_print(word):
try:
return float(word)
except Exception as e:
print('error:', str(e))
return 0
def cambia(valor, a, SI=True):
if len(valor) == 0:
return ''
valor = float(valor)
if SI is False:
valor = redondea(5.0 / 9.0 * (valor - 32.0))
if a == 'F':
return str(redondea(valor * 9.0 / 5.0 + 32.0))
elif a == 'K':
return str(redondea(valor + 273.15))
return str(valor)
def change_temperature(valor, a):
valor = s2f(valor)
# initial a in ºF
if a == 'C':
valor = 5.0 / 9.0 * (valor - 32.0)
elif a == 'K':
valor = 5.0 / 9.0 * (valor - 32.0) + 273.15
return str(redondea_digits(valor))
def fa2f(temperature):
return (temperature - 273.15) * 9.0 / 5.0 + 32.0
def f2c(temperature):
return (s2f(temperature) - 32.0) * 5.0 / 9.0
| {
"content_hash": "edb3a2ff2cbf9b329da9cea4989e8c07",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 77,
"avg_line_length": 23.554347826086957,
"alnum_prop": 0.6035994462390402,
"repo_name": "atareao/my-weather-indicator",
"id": "008d6b75d709abd4ea8389c607a309638de2f365",
"size": "3384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16797"
},
{
"name": "HTML",
"bytes": "8469"
},
{
"name": "JavaScript",
"bytes": "80848"
},
{
"name": "Python",
"bytes": "401750"
}
],
"symlink_target": ""
} |
import io
import os
import setuptools # type: ignore
package_root = os.path.abspath(os.path.dirname(__file__))
name = "google-cloud-containeranalysis"
description = "Google Cloud Devtools Containeranalysis API client library"
version = {}
with open(
os.path.join(
package_root, "google/cloud/devtools/containeranalysis/gapic_version.py"
)
) as fp:
exec(fp.read(), version)
version = version["__version__"]
if version[0] == "0":
release_status = "Development Status :: 4 - Beta"
else:
release_status = "Development Status :: 5 - Production/Stable"
dependencies = [
"google-api-core[grpc] >= 1.33.2, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*",
"proto-plus >= 1.22.0, <2.0.0dev",
"protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5",
"grpc-google-iam-v1 >= 0.12.4, < 1.0.0dev",
"grafeas >=1.4.1, <2.0dev",
]
url = "https://github.com/googleapis/python-containeranalysis"
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, "README.rst")
with io.open(readme_filename, encoding="utf-8") as readme_file:
readme = readme_file.read()
packages = [
package
for package in setuptools.PEP420PackageFinder.find()
if package.startswith("google")
]
namespaces = ["google"]
if "google.cloud" in packages:
namespaces.append("google.cloud")
if "google.cloud.devtools" in packages:
namespaces.append("google.cloud.devtools")
setuptools.setup(
name=name,
version=version,
description=description,
long_description=readme,
author="Google LLC",
author_email="[email protected]",
license="Apache 2.0",
url=url,
classifiers=[
release_status,
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Operating System :: OS Independent",
"Topic :: Internet",
],
platforms="Posix; MacOS X; Windows",
packages=packages,
python_requires=">=3.7",
namespace_packages=namespaces,
install_requires=dependencies,
include_package_data=True,
zip_safe=False,
)
| {
"content_hash": "f497e9c0eeb427be06128b49842c200b",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 113,
"avg_line_length": 29.89156626506024,
"alnum_prop": 0.6360338573155986,
"repo_name": "googleapis/python-containeranalysis",
"id": "16a187fbc581ec13cf71b6585bbd7ed0f7ac0c32",
"size": "3081",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "255878"
},
{
"name": "Shell",
"bytes": "30693"
}
],
"symlink_target": ""
} |
import os
import sys
DEBUG = True
SECRET_KEY = os.environ.get('DAYS_SECRET_KEY', 'insecure-secret-key')
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Application definition
INSTALLED_APPS = [
'days.apps.days.apps.DaysConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'days.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'days.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# STATICFILES_DIRS = (
# os.path.join(BASE_DIR, 'static'),
# )
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Sites framework
SITE_ID = 1
# Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)s %(process)d [%(name)s:%(lineno)d] - %(message)s',
},
},
'handlers': {
'console': {
'level': 'DEBUG' if DEBUG else 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'standard',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
},
'': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False
},
},
}
# Day of the week on which to run the load_events management command, where Monday is 0 and Sunday is 6.
# Allows Heroku's Scheduler to be used to run the command weekly.
LOAD_DAY = 6
# Number of days for which to retrieve and store events.
DAY_COUNT = 30
# Number of events to include in daily emails to subscribers.
EVENT_COUNT = 1
# SendGrid credentials
SENDGRID_USERNAME = os.environ.get('SENDGRID_USERNAME', 'set-me-please')
SENDGRID_PASSWORD = os.environ.get('SENDGRID_PASSWORD', 'set-me-please')
# Address to send emails from
FROM_EMAIL = 'On This Day <[email protected]>'
| {
"content_hash": "027656ac07e24b7beb1d4d336a38c45c",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 104,
"avg_line_length": 26.98581560283688,
"alnum_prop": 0.6438896189224704,
"repo_name": "rlucioni/days",
"id": "c18bd3bc7ba5afc43b5fdf0b5ecb46577d3f4ff4",
"size": "3805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "days/settings/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "242"
},
{
"name": "Python",
"bytes": "18244"
}
],
"symlink_target": ""
} |
import requests
class LeClient(object):
def __init__(self):
self.endpoint = 'http://dbios.herokuapp.com/'
def request(self, path=None, params=None):
self.path = path
self.params = params
result = None
result = requests.get(self.endpoint + path, params=params)
return result.json()
| {
"content_hash": "278a54a33084d2ede6326448c06c2b70",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 66,
"avg_line_length": 24.357142857142858,
"alnum_prop": 0.6099706744868035,
"repo_name": "fly/dbcli",
"id": "464ef8230c660bf85a04a4ddbef9d2d06c2bcda8",
"size": "341",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5710"
}
],
"symlink_target": ""
} |
from BattleBase import *
from DistributedBattleAI import *
from toontown.toonbase.ToontownBattleGlobals import *
import random
from toontown.suit import DistributedSuitBaseAI
import SuitBattleGlobals
import BattleExperienceAI
from toontown.toon import NPCToons
from toontown.pets import PetTricks, DistributedPetProxyAI
from toontown.hood import ZoneUtil
from direct.showbase.PythonUtil import lerp
import sys
class BattleCalculatorAI:
AccuracyBonuses = [0,
20,
40,
60]
DamageBonuses = [0,
20,
20,
20]
AttackExpPerTrack = [0,
10,
20,
30,
40,
50,
60]
NumRoundsLured = [2,
2,
3,
3,
4,
4,
15]
TRAP_CONFLICT = -2
APPLY_HEALTH_ADJUSTMENTS = 1
TOONS_TAKE_NO_DAMAGE = 0
CAP_HEALS = 1
CLEAR_SUIT_ATTACKERS = 1
SUITS_UNLURED_IMMEDIATELY = 1
CLEAR_MULTIPLE_TRAPS = 0
KBBONUS_LURED_FLAG = 0
KBBONUS_TGT_LURED = 1
notify = DirectNotifyGlobal.directNotify.newCategory('BattleCalculatorAI')
toonsAlwaysHit = simbase.config.GetBool('toons-always-hit', 0)
toonsAlwaysMiss = simbase.config.GetBool('toons-always-miss', 0)
toonsAlways5050 = simbase.config.GetBool('toons-always-5050', 0)
suitsAlwaysHit = simbase.config.GetBool('suits-always-hit', 0)
suitsAlwaysMiss = simbase.config.GetBool('suits-always-miss', 0)
immortalSuits = simbase.config.GetBool('immortal-suits', 0)
propAndOrganicBonusStack = simbase.config.GetBool('prop-and-organic-bonus-stack', 0)
def __init__(self, battle, tutorialFlag = 0):
self.battle = battle
self.SuitAttackers = {}
self.currentlyLuredSuits = {}
self.successfulLures = {}
self.toonAtkOrder = []
self.toonHPAdjusts = {}
self.toonSkillPtsGained = {}
self.traps = {}
self.npcTraps = {}
self.suitAtkStats = {}
self.__clearBonuses(hp=1)
self.__clearBonuses(hp=0)
self.delayedUnlures = []
self.__skillCreditMultiplier = simbase.air.baseXpMultiplier
self.tutorialFlag = tutorialFlag
self.trainTrapTriggered = False
def setSkillCreditMultiplier(self, mult):
self.__skillCreditMultiplier = simbase.air.baseXpMultiplier * mult
def getSkillCreditMultiplier(self):
return self.__skillCreditMultiplier
def cleanup(self):
self.battle = None
return
def __calcToonAtkHit(self, attackIndex, atkTargets):
if len(atkTargets) == 0:
return (0, 0)
if self.tutorialFlag:
return (1, 95)
if self.toonsAlways5050:
roll = random.randint(0, 99)
if roll < 50:
return (1, 95)
else:
return (0, 0)
if self.toonsAlwaysHit:
return (1, 95)
elif self.toonsAlwaysMiss:
return (0, 0)
debug = self.notify.getDebug()
attack = self.battle.toonAttacks[attackIndex]
atkTrack, atkLevel = self.__getActualTrackLevel(attack)
hasAccuracyBuff = False
toon = simbase.air.doId2do.get(attack[TOON_ID_COL])
if toon:
if toon.hasBuff(BGagAccuracy):
if not ZoneUtil.isDynamicZone(toon.zoneId):
if ZoneUtil.getWhereName(toon.zoneId, True) in ('street', 'factoryExterior', 'cogHQExterior'):
hasAccuracyBuff = True
if atkTrack == NPCSOS:
return (1, 95)
if atkTrack == FIRE:
return (1, 95)
if atkTrack == TRAP:
if debug:
self.notify.debug('Attack is a trap, so it hits regardless')
attack[TOON_ACCBONUS_COL] = 0
return (1, 100)
elif atkTrack == DROP and attack[TOON_TRACK_COL] == NPCSOS:
unluredSuits = 0
for tgt in atkTargets:
if not self.__suitIsLured(tgt.getDoId()):
unluredSuits = 1
if unluredSuits == 0:
attack[TOON_ACCBONUS_COL] = 1
return (0, 0)
elif atkTrack == DROP:
allLured = True
for i in xrange(len(atkTargets)):
if self.__suitIsLured(atkTargets[i].getDoId()):
pass
else:
allLured = False
if allLured:
attack[TOON_ACCBONUS_COL] = 1
return (0, 0)
elif atkTrack == PETSOS:
return self.__calculatePetTrickSuccess(attack)
tgtDef = 0
numLured = 0
if atkTrack != HEAL:
for currTarget in atkTargets:
thisSuitDef = self.__targetDefense(currTarget, atkTrack)
if debug:
self.notify.debug('Examining suit def for toon attack: ' + str(thisSuitDef))
tgtDef = min(thisSuitDef, tgtDef)
if self.__suitIsLured(currTarget.getDoId()):
numLured += 1
trackExp = self.__toonTrackExp(attack[TOON_ID_COL], atkTrack)
for currOtherAtk in self.toonAtkOrder:
if currOtherAtk != attack[TOON_ID_COL]:
nextAttack = self.battle.toonAttacks[currOtherAtk]
nextAtkTrack = self.__getActualTrack(nextAttack)
if atkTrack == nextAtkTrack and attack[TOON_TGT_COL] == nextAttack[TOON_TGT_COL]:
currTrackExp = self.__toonTrackExp(nextAttack[TOON_ID_COL], atkTrack)
if debug:
self.notify.debug('Examining toon track exp bonus: ' + str(currTrackExp))
trackExp = max(currTrackExp, trackExp)
if debug:
if atkTrack == HEAL:
self.notify.debug('Toon attack is a heal, no target def used')
else:
self.notify.debug('Suit defense used for toon attack: ' + str(tgtDef))
self.notify.debug('Toon track exp bonus used for toon attack: ' + str(trackExp))
if attack[TOON_TRACK_COL] == NPCSOS:
randChoice = 0
else:
randChoice = random.randint(0, 99)
propAcc = AvPropAccuracy[atkTrack][atkLevel]
if hasAccuracyBuff:
propAcc *= BGagAccuracyMultiplier
if atkTrack == LURE:
treebonus = self.__toonCheckGagBonus(attack[TOON_ID_COL], atkTrack, atkLevel)
propBonus = self.__checkPropBonus(atkTrack)
if self.propAndOrganicBonusStack:
propAcc = 0
if treebonus:
self.notify.debug('using organic bonus lure accuracy')
propAcc += AvLureBonusAccuracy[atkLevel]
if propBonus:
self.notify.debug('using prop bonus lure accuracy')
propAcc += AvLureBonusAccuracy[atkLevel]
elif treebonus or propBonus:
self.notify.debug('using oragnic OR prop bonus lure accuracy')
propAcc = AvLureBonusAccuracy[atkLevel]
attackAcc = propAcc + trackExp + tgtDef
currAtk = self.toonAtkOrder.index(attackIndex)
if currAtk > 0 and atkTrack != HEAL:
prevAtkId = self.toonAtkOrder[currAtk - 1]
prevAttack = self.battle.toonAttacks[prevAtkId]
prevAtkTrack = self.__getActualTrack(prevAttack)
lure = atkTrack == LURE and (not attackAffectsGroup(atkTrack, atkLevel,
attack[TOON_TRACK_COL]) and attack[TOON_TGT_COL] in self.successfulLures or attackAffectsGroup(atkTrack, atkLevel, attack[TOON_TRACK_COL]))
if atkTrack == prevAtkTrack and (attack[TOON_TGT_COL] == prevAttack[TOON_TGT_COL] or lure):
if prevAttack[TOON_ACCBONUS_COL] == 1:
if debug:
self.notify.debug('DODGE: Toon attack track dodged')
elif prevAttack[TOON_ACCBONUS_COL] == 0:
if debug:
self.notify.debug('HIT: Toon attack track hit')
attack[TOON_ACCBONUS_COL] = prevAttack[TOON_ACCBONUS_COL]
return (not attack[TOON_ACCBONUS_COL], attackAcc)
atkAccResult = attackAcc
if debug:
self.notify.debug('setting atkAccResult to %d' % atkAccResult)
acc = attackAcc + self.__calcToonAccBonus(attackIndex)
if atkTrack != LURE and atkTrack != HEAL:
if atkTrack != DROP:
if numLured == len(atkTargets):
if debug:
self.notify.debug('all targets are lured, attack hits')
attack[TOON_ACCBONUS_COL] = 0
return (1, 100)
else:
luredRatio = float(numLured) / float(len(atkTargets))
accAdjust = 100 * luredRatio
if accAdjust > 0 and debug:
self.notify.debug(str(numLured) + ' out of ' + str(len(atkTargets)) + ' targets are lured, so adding ' + str(accAdjust) + ' to attack accuracy')
acc += accAdjust
elif numLured == len(atkTargets):
if debug:
self.notify.debug('all targets are lured, attack misses')
attack[TOON_ACCBONUS_COL] = 0
return (0, 0)
if acc > MaxToonAcc:
acc = MaxToonAcc
if randChoice < acc:
if debug:
self.notify.debug('HIT: Toon attack rolled' + str(randChoice) + 'to hit with an accuracy of' + str(acc))
attack[TOON_ACCBONUS_COL] = 0
else:
if debug:
self.notify.debug('MISS: Toon attack rolled' + str(randChoice) + 'to hit with an accuracy of' + str(acc))
attack[TOON_ACCBONUS_COL] = 1
return (not attack[TOON_ACCBONUS_COL], atkAccResult)
def __toonTrackExp(self, toonId, track):
toon = self.battle.getToon(toonId)
if toon != None:
toonExpLvl = toon.experience.getExpLevel(track)
exp = self.AttackExpPerTrack[toonExpLvl]
if track == HEAL:
exp = exp * 0.5
self.notify.debug('Toon track exp: ' + str(toonExpLvl) + ' and resulting acc bonus: ' + str(exp))
return exp
else:
return 0
return
def __toonCheckGagBonus(self, toonId, track, level):
toon = self.battle.getToon(toonId)
if toon != None:
return toon.checkGagBonus(track, level)
else:
return False
return
def __checkPropBonus(self, track):
result = False
if self.battle.getInteractivePropTrackBonus() == track:
result = True
return result
def __targetDefense(self, suit, atkTrack):
if atkTrack == HEAL:
return 0
suitDef = SuitBattleGlobals.SuitAttributes[suit.dna.name]['def'][suit.getLevel()]
return -suitDef
def __createToonTargetList(self, attackIndex):
attack = self.battle.toonAttacks[attackIndex]
atkTrack, atkLevel = self.__getActualTrackLevel(attack)
targetList = []
if atkTrack == NPCSOS:
return targetList
if not attackAffectsGroup(atkTrack, atkLevel, attack[TOON_TRACK_COL]):
if atkTrack == HEAL:
target = attack[TOON_TGT_COL]
else:
target = self.battle.findSuit(attack[TOON_TGT_COL])
if target != None:
targetList.append(target)
elif atkTrack == HEAL or atkTrack == PETSOS:
if attack[TOON_TRACK_COL] == NPCSOS or atkTrack == PETSOS:
targetList = self.battle.activeToons
else:
for currToon in self.battle.activeToons:
if attack[TOON_ID_COL] != currToon:
targetList.append(currToon)
else:
targetList = self.battle.activeSuits
return targetList
def __prevAtkTrack(self, attackerId, toon = 1):
if toon:
prevAtkIdx = self.toonAtkOrder.index(attackerId) - 1
if prevAtkIdx >= 0:
prevAttackerId = self.toonAtkOrder[prevAtkIdx]
attack = self.battle.toonAttacks[prevAttackerId]
return self.__getActualTrack(attack)
else:
return NO_ATTACK
def getSuitTrapType(self, suitId):
if suitId in self.traps:
if self.traps[suitId][0] == self.TRAP_CONFLICT:
return NO_TRAP
else:
return self.traps[suitId][0]
else:
return NO_TRAP
def __suitTrapDamage(self, suitId):
if suitId in self.traps:
return self.traps[suitId][2]
else:
return 0
def addTrainTrapForJoiningSuit(self, suitId):
self.notify.debug('addTrainTrapForJoiningSuit suit=%d self.traps=%s' % (suitId, self.traps))
trapInfoToUse = None
for trapInfo in self.traps.values():
if trapInfo[0] == UBER_GAG_LEVEL_INDEX:
trapInfoToUse = trapInfo
break
if trapInfoToUse:
self.traps[suitId] = trapInfoToUse
else:
self.notify.warning('huh we did not find a train trap?')
return
def __addSuitGroupTrap(self, suitId, trapLvl, attackerId, allSuits, npcDamage = 0):
if npcDamage == 0:
if suitId in self.traps:
if self.traps[suitId][0] == self.TRAP_CONFLICT:
pass
else:
self.traps[suitId][0] = self.TRAP_CONFLICT
for suit in allSuits:
id = suit.doId
if id in self.traps:
self.traps[id][0] = self.TRAP_CONFLICT
else:
self.traps[id] = [self.TRAP_CONFLICT, 0, 0]
else:
toon = self.battle.getToon(attackerId)
organicBonus = toon.checkGagBonus(TRAP, trapLvl)
propBonus = self.__checkPropBonus(TRAP)
damage = getAvPropDamage(TRAP, trapLvl, toon.experience.getExp(TRAP), organicBonus, propBonus, self.propAndOrganicBonusStack)
if self.itemIsCredit(TRAP, trapLvl):
self.traps[suitId] = [trapLvl, attackerId, damage]
else:
self.traps[suitId] = [trapLvl, 0, damage]
self.notify.debug('calling __addLuredSuitsDelayed')
self.__addLuredSuitsDelayed(attackerId, targetId=-1, ignoreDamageCheck=True)
elif suitId in self.traps:
if self.traps[suitId][0] == self.TRAP_CONFLICT:
self.traps[suitId] = [trapLvl, 0, npcDamage]
elif not self.__suitIsLured(suitId):
self.traps[suitId] = [trapLvl, 0, npcDamage]
def __addSuitTrap(self, suitId, trapLvl, attackerId, npcDamage = 0):
if npcDamage == 0:
if suitId in self.traps:
if self.traps[suitId][0] == self.TRAP_CONFLICT:
pass
else:
self.traps[suitId][0] = self.TRAP_CONFLICT
else:
toon = self.battle.getToon(attackerId)
organicBonus = toon.checkGagBonus(TRAP, trapLvl)
propBonus = self.__checkPropBonus(TRAP)
damage = getAvPropDamage(TRAP, trapLvl, toon.experience.getExp(TRAP), organicBonus, propBonus, self.propAndOrganicBonusStack)
if self.itemIsCredit(TRAP, trapLvl):
self.traps[suitId] = [trapLvl, attackerId, damage]
else:
self.traps[suitId] = [trapLvl, 0, damage]
elif suitId in self.traps:
if self.traps[suitId][0] == self.TRAP_CONFLICT:
self.traps[suitId] = [trapLvl, 0, npcDamage]
elif not self.__suitIsLured(suitId):
self.traps[suitId] = [trapLvl, 0, npcDamage]
def __removeSuitTrap(self, suitId):
if suitId in self.traps:
del self.traps[suitId]
def __clearTrapCreator(self, creatorId, suitId = None):
if suitId == None:
for currTrap in self.traps.keys():
if creatorId == self.traps[currTrap][1]:
self.traps[currTrap][1] = 0
elif suitId in self.traps:
self.traps[suitId][1] = 0
return
def __trapCreator(self, suitId):
if suitId in self.traps:
return self.traps[suitId][1]
else:
return 0
def __initTraps(self):
self.trainTrapTriggered = False
keysList = self.traps.keys()
for currTrap in keysList:
if self.traps[currTrap][0] == self.TRAP_CONFLICT:
del self.traps[currTrap]
def __calcToonAtkHp(self, toonId):
attack = self.battle.toonAttacks[toonId]
targetList = self.__createToonTargetList(toonId)
atkHit, atkAcc = self.__calcToonAtkHit(toonId, targetList)
atkTrack, atkLevel, atkHp = self.__getActualTrackLevelHp(attack)
if not atkHit and atkTrack != HEAL:
return
validTargetAvail = 0
lureDidDamage = 0
currLureId = -1
for currTarget in xrange(len(targetList)):
attackLevel = -1
attackTrack = None
attackDamage = 0
toonTarget = 0
targetLured = 0
if atkTrack == HEAL or atkTrack == PETSOS:
targetId = targetList[currTarget]
toonTarget = 1
else:
targetId = targetList[currTarget].getDoId()
if atkTrack == LURE:
if self.getSuitTrapType(targetId) == NO_TRAP:
if self.notify.getDebug():
self.notify.debug('Suit lured, but no trap exists')
if self.SUITS_UNLURED_IMMEDIATELY:
if not self.__suitIsLured(targetId, prevRound=1):
if not self.__combatantDead(targetId, toon=toonTarget):
validTargetAvail = 1
rounds = self.NumRoundsLured[atkLevel]
wakeupChance = 100 - atkAcc * 2
npcLurer = attack[TOON_TRACK_COL] == NPCSOS
currLureId = self.__addLuredSuitInfo(targetId, -1, rounds, wakeupChance, toonId, atkLevel, lureId=currLureId, npc=npcLurer)
if self.notify.getDebug():
self.notify.debug('Suit lured for ' + str(rounds) + ' rounds max with ' + str(wakeupChance) + '% chance to wake up each round')
targetLured = 1
else:
attackTrack = TRAP
if targetId in self.traps:
trapInfo = self.traps[targetId]
attackLevel = trapInfo[0]
else:
attackLevel = NO_TRAP
attackDamage = self.__suitTrapDamage(targetId)
trapCreatorId = self.__trapCreator(targetId)
if trapCreatorId > 0:
self.notify.debug('Giving trap EXP to toon ' + str(trapCreatorId))
self.__addAttackExp(attack, track=TRAP, level=attackLevel, attackerId=trapCreatorId)
self.__clearTrapCreator(trapCreatorId, targetId)
lureDidDamage = 1
if self.notify.getDebug():
self.notify.debug('Suit lured right onto a trap! (' + str(AvProps[attackTrack][attackLevel]) + ',' + str(attackLevel) + ')')
if not self.__combatantDead(targetId, toon=toonTarget):
validTargetAvail = 1
targetLured = 1
if not self.SUITS_UNLURED_IMMEDIATELY:
if not self.__suitIsLured(targetId, prevRound=1):
if not self.__combatantDead(targetId, toon=toonTarget):
validTargetAvail = 1
rounds = self.NumRoundsLured[atkLevel]
wakeupChance = 100 - atkAcc * 2
npcLurer = attack[TOON_TRACK_COL] == NPCSOS
currLureId = self.__addLuredSuitInfo(targetId, -1, rounds, wakeupChance, toonId, atkLevel, lureId=currLureId, npc=npcLurer)
if self.notify.getDebug():
self.notify.debug('Suit lured for ' + str(rounds) + ' rounds max with ' + str(wakeupChance) + '% chance to wake up each round')
targetLured = 1
if attackLevel != -1:
self.__addLuredSuitsDelayed(toonId, targetId)
if targetLured and (not targetId in self.successfulLures or targetId in self.successfulLures and self.successfulLures[targetId][1] < atkLevel):
self.notify.debug('Adding target ' + str(targetId) + ' to successfulLures list')
self.successfulLures[targetId] = [toonId,
atkLevel,
atkAcc,
-1]
else:
if atkTrack == TRAP:
npcDamage = 0
if attack[TOON_TRACK_COL] == NPCSOS:
npcDamage = atkHp
if self.CLEAR_MULTIPLE_TRAPS:
if self.getSuitTrapType(targetId) != NO_TRAP:
self.__clearAttack(toonId)
return
if atkLevel == UBER_GAG_LEVEL_INDEX:
self.__addSuitGroupTrap(targetId, atkLevel, toonId, targetList, npcDamage)
if self.__suitIsLured(targetId):
self.notify.debug('Train Trap on lured suit %d, \n indicating with KBBONUS_COL flag' % targetId)
tgtPos = self.battle.activeSuits.index(targetList[currTarget])
attack[TOON_KBBONUS_COL][tgtPos] = self.KBBONUS_LURED_FLAG
else:
self.__addSuitTrap(targetId, atkLevel, toonId, npcDamage)
elif self.__suitIsLured(targetId) and atkTrack == SOUND:
self.notify.debug('Sound on lured suit, ' + 'indicating with KBBONUS_COL flag')
tgtPos = self.battle.activeSuits.index(targetList[currTarget])
attack[TOON_KBBONUS_COL][tgtPos] = self.KBBONUS_LURED_FLAG
attackLevel = atkLevel
attackTrack = atkTrack
toon = self.battle.getToon(toonId)
if attack[TOON_TRACK_COL] == NPCSOS and lureDidDamage != 1 or attack[TOON_TRACK_COL] == PETSOS:
attackDamage = atkHp
elif atkTrack == FIRE:
suit = self.battle.findSuit(targetId)
if suit:
costToFire = 1
abilityToFire = toon.getPinkSlips()
numLeft = abilityToFire - costToFire
if numLeft < 0:
numLeft = 0
toon.b_setPinkSlips(numLeft)
if costToFire > abilityToFire:
simbase.air.writeServerEvent('suspicious', toonId, 'Toon attempting to fire a %s cost cog with %s pinkslips' % (costToFire, abilityToFire))
print 'Not enough PinkSlips to fire cog - print a warning here'
else:
suit.skeleRevives = 0
attackDamage = suit.getHP()
else:
attackDamage = 0
bonus = 0
else:
organicBonus = toon.checkGagBonus(attackTrack, attackLevel)
propBonus = self.__checkPropBonus(attackTrack)
attackDamage = getAvPropDamage(attackTrack, attackLevel, toon.experience.getExp(attackTrack), organicBonus, propBonus, self.propAndOrganicBonusStack)
if not self.__combatantDead(targetId, toon=toonTarget):
if self.__suitIsLured(targetId) and atkTrack == DROP:
self.notify.debug('not setting validTargetAvail, since drop on a lured suit')
else:
validTargetAvail = 1
if attackLevel == -1 and not atkTrack == FIRE:
result = LURE_SUCCEEDED
elif atkTrack != TRAP:
result = attackDamage
if atkTrack == HEAL:
if not self.__attackHasHit(attack, suit=0):
result = result * 0.2
if self.notify.getDebug():
self.notify.debug('toon does ' + str(result) + ' healing to toon(s)')
else:
if self.__suitIsLured(targetId) and atkTrack == DROP:
result = 0
self.notify.debug('setting damage to 0, since drop on a lured suit')
if self.notify.getDebug():
self.notify.debug('toon does ' + str(result) + ' damage to suit')
else:
result = 0
if result != 0 or atkTrack == PETSOS:
targets = self.__getToonTargets(attack)
if targetList[currTarget] not in targets:
if self.notify.getDebug():
self.notify.debug('Target of toon is not accessible!')
continue
targetIndex = targets.index(targetList[currTarget])
if atkTrack == HEAL:
result = result / len(targetList)
if self.notify.getDebug():
self.notify.debug('Splitting heal among ' + str(len(targetList)) + ' targets')
if targetId in self.successfulLures and atkTrack == LURE:
self.notify.debug('Updating lure damage to ' + str(result))
self.successfulLures[targetId][3] = result
else:
attack[TOON_HP_COL][targetIndex] = result
if result > 0 and atkTrack != HEAL and atkTrack != DROP and atkTrack != PETSOS:
attackTrack = LURE
lureInfos = self.__getLuredExpInfo(targetId)
for currInfo in lureInfos:
if currInfo[3]:
self.notify.debug('Giving lure EXP to toon ' + str(currInfo[0]))
self.__addAttackExp(attack, track=attackTrack, level=currInfo[1], attackerId=currInfo[0])
self.__clearLurer(currInfo[0], lureId=currInfo[2])
if lureDidDamage:
if self.itemIsCredit(atkTrack, atkLevel):
self.notify.debug('Giving lure EXP to toon ' + str(toonId))
self.__addAttackExp(attack)
if not validTargetAvail and self.__prevAtkTrack(toonId) != atkTrack:
self.__clearAttack(toonId)
return
def __getToonTargets(self, attack):
track = self.__getActualTrack(attack)
if track == HEAL or track == PETSOS:
return self.battle.activeToons
else:
return self.battle.activeSuits
def __attackHasHit(self, attack, suit = 0):
if suit == 1:
for dmg in attack[SUIT_HP_COL]:
if dmg > 0:
return 1
return 0
else:
track = self.__getActualTrack(attack)
return not attack[TOON_ACCBONUS_COL] and track != NO_ATTACK
def __attackDamage(self, attack, suit = 0):
if suit:
for dmg in attack[SUIT_HP_COL]:
if dmg > 0:
return dmg
return 0
else:
for dmg in attack[TOON_HP_COL]:
if dmg > 0:
return dmg
return 0
def __attackDamageForTgt(self, attack, tgtPos, suit = 0):
if suit:
return attack[SUIT_HP_COL][tgtPos]
else:
return attack[TOON_HP_COL][tgtPos]
def __calcToonAccBonus(self, attackKey):
numPrevHits = 0
attackIdx = self.toonAtkOrder.index(attackKey)
for currPrevAtk in xrange(attackIdx - 1, -1, -1):
attack = self.battle.toonAttacks[attackKey]
atkTrack, atkLevel = self.__getActualTrackLevel(attack)
prevAttackKey = self.toonAtkOrder[currPrevAtk]
prevAttack = self.battle.toonAttacks[prevAttackKey]
prvAtkTrack, prvAtkLevel = self.__getActualTrackLevel(prevAttack)
if self.__attackHasHit(prevAttack) and (attackAffectsGroup(prvAtkTrack, prvAtkLevel, prevAttack[TOON_TRACK_COL]) or attackAffectsGroup(atkTrack, atkLevel, attack[TOON_TRACK_COL]) or attack[TOON_TGT_COL] == prevAttack[TOON_TGT_COL]) and atkTrack != prvAtkTrack:
numPrevHits += 1
if numPrevHits > 0 and self.notify.getDebug():
self.notify.debug('ACC BONUS: toon attack received accuracy ' + 'bonus of ' + str(self.AccuracyBonuses[numPrevHits]) + ' from previous attack by (' + str(attack[TOON_ID_COL]) + ') which hit')
return self.AccuracyBonuses[numPrevHits]
def __applyToonAttackDamages(self, toonId, hpbonus = 0, kbbonus = 0):
totalDamages = 0
if not self.APPLY_HEALTH_ADJUSTMENTS:
return totalDamages
attack = self.battle.toonAttacks[toonId]
track = self.__getActualTrack(attack)
if track != NO_ATTACK and track != SOS and track != TRAP and track != NPCSOS:
targets = self.__getToonTargets(attack)
for position in xrange(len(targets)):
if hpbonus:
if targets[position] in self.__createToonTargetList(toonId):
damageDone = attack[TOON_HPBONUS_COL]
else:
damageDone = 0
elif kbbonus:
if targets[position] in self.__createToonTargetList(toonId):
damageDone = attack[TOON_KBBONUS_COL][position]
else:
damageDone = 0
else:
damageDone = attack[TOON_HP_COL][position]
if damageDone <= 0 or self.immortalSuits:
continue
if track == HEAL or track == PETSOS:
currTarget = targets[position]
if self.CAP_HEALS:
toonHp = self.__getToonHp(currTarget)
toonMaxHp = self.__getToonMaxHp(currTarget)
if toonHp + damageDone > toonMaxHp:
damageDone = toonMaxHp - toonHp
attack[TOON_HP_COL][position] = damageDone
self.toonHPAdjusts[currTarget] += damageDone
totalDamages = totalDamages + damageDone
continue
currTarget = targets[position]
currTarget.setHP(currTarget.getHP() - damageDone)
targetId = currTarget.getDoId()
if self.notify.getDebug():
if hpbonus:
self.notify.debug(str(targetId) + ': suit takes ' + str(damageDone) + ' damage from HP-Bonus')
elif kbbonus:
self.notify.debug(str(targetId) + ': suit takes ' + str(damageDone) + ' damage from KB-Bonus')
else:
self.notify.debug(str(targetId) + ': suit takes ' + str(damageDone) + ' damage')
totalDamages = totalDamages + damageDone
if currTarget.getHP() <= 0:
if currTarget.getSkeleRevives() >= 1:
currTarget.useSkeleRevive()
attack[SUIT_REVIVE_COL] = attack[SUIT_REVIVE_COL] | 1 << position
else:
self.suitLeftBattle(targetId)
attack[SUIT_DIED_COL] = attack[SUIT_DIED_COL] | 1 << position
if self.notify.getDebug():
self.notify.debug('Suit' + str(targetId) + 'bravely expired in combat')
return totalDamages
def __combatantDead(self, avId, toon):
if toon:
if self.__getToonHp(avId) <= 0:
return 1
else:
suit = self.battle.findSuit(avId)
if suit.getHP() <= 0:
return 1
return 0
def __combatantJustRevived(self, avId):
suit = self.battle.findSuit(avId)
if suit.reviveCheckAndClear():
return 1
else:
return 0
def __addAttackExp(self, attack, track = -1, level = -1, attackerId = -1):
trk = -1
lvl = -1
id = -1
if track != -1 and level != -1 and attackerId != -1:
trk = track
lvl = level
id = attackerId
elif self.__attackHasHit(attack):
if self.notify.getDebug():
self.notify.debug('Attack ' + repr(attack) + ' has hit')
trk = attack[TOON_TRACK_COL]
lvl = attack[TOON_LVL_COL]
id = attack[TOON_ID_COL]
if trk != -1 and trk != NPCSOS and trk != PETSOS and lvl != -1 and id != -1:
expList = self.toonSkillPtsGained.get(id, None)
if expList == None:
expList = [0,
0,
0,
0,
0,
0,
0]
self.toonSkillPtsGained[id] = expList
expList[trk] = min(ExperienceCap, expList[trk] + (lvl + 1) * self.__skillCreditMultiplier)
return
def __clearTgtDied(self, tgt, lastAtk, currAtk):
position = self.battle.activeSuits.index(tgt)
currAtkTrack = self.__getActualTrack(currAtk)
lastAtkTrack = self.__getActualTrack(lastAtk)
if currAtkTrack == lastAtkTrack and lastAtk[SUIT_DIED_COL] & 1 << position and self.__attackHasHit(currAtk, suit=0):
if self.notify.getDebug():
self.notify.debug('Clearing suit died for ' + str(tgt.getDoId()) + ' at position ' + str(position) + ' from toon attack ' + str(lastAtk[TOON_ID_COL]) + ' and setting it for ' + str(currAtk[TOON_ID_COL]))
lastAtk[SUIT_DIED_COL] = lastAtk[SUIT_DIED_COL] ^ 1 << position
self.suitLeftBattle(tgt.getDoId())
currAtk[SUIT_DIED_COL] = currAtk[SUIT_DIED_COL] | 1 << position
def __addDmgToBonuses(self, dmg, attackIndex, hp = 1):
toonId = self.toonAtkOrder[attackIndex]
attack = self.battle.toonAttacks[toonId]
atkTrack = self.__getActualTrack(attack)
if atkTrack == HEAL or atkTrack == PETSOS:
return
tgts = self.__createToonTargetList(toonId)
for currTgt in tgts:
tgtPos = self.battle.activeSuits.index(currTgt)
attackerId = self.toonAtkOrder[attackIndex]
attack = self.battle.toonAttacks[attackerId]
track = self.__getActualTrack(attack)
if hp:
if track in self.hpBonuses[tgtPos]:
self.hpBonuses[tgtPos][track].append([attackIndex, dmg])
else:
self.hpBonuses[tgtPos][track] = [[attackIndex, dmg]]
elif self.__suitIsLured(currTgt.getDoId()):
if track in self.kbBonuses[tgtPos]:
self.kbBonuses[tgtPos][track].append([attackIndex, dmg])
else:
self.kbBonuses[tgtPos][track] = [[attackIndex, dmg]]
def __clearBonuses(self, hp = 1):
if hp:
self.hpBonuses = [{},
{},
{},
{}]
else:
self.kbBonuses = [{},
{},
{},
{}]
def __bonusExists(self, tgtSuit, hp = 1):
tgtPos = self.activeSuits.index(tgtSuit)
if hp:
bonusLen = len(self.hpBonuses[tgtPos])
else:
bonusLen = len(self.kbBonuses[tgtPos])
if bonusLen > 0:
return 1
return 0
def __processBonuses(self, hp = 1):
if hp:
bonusList = self.hpBonuses
self.notify.debug('Processing hpBonuses: ' + repr(self.hpBonuses))
else:
bonusList = self.kbBonuses
self.notify.debug('Processing kbBonuses: ' + repr(self.kbBonuses))
tgtPos = 0
for currTgt in bonusList:
for currAtkType in currTgt.keys():
if len(currTgt[currAtkType]) > 1 or not hp and len(currTgt[currAtkType]) > 0:
totalDmgs = 0
for currDmg in currTgt[currAtkType]:
totalDmgs += currDmg[1]
numDmgs = len(currTgt[currAtkType])
attackIdx = currTgt[currAtkType][numDmgs - 1][0]
attackerId = self.toonAtkOrder[attackIdx]
attack = self.battle.toonAttacks[attackerId]
if hp:
attack[TOON_HPBONUS_COL] = math.ceil(totalDmgs * (self.DamageBonuses[numDmgs - 1] * 0.01))
if self.notify.getDebug():
self.notify.debug('Applying hp bonus to track ' + str(attack[TOON_TRACK_COL]) + ' of ' + str(attack[TOON_HPBONUS_COL]))
elif len(attack[TOON_KBBONUS_COL]) > tgtPos:
attack[TOON_KBBONUS_COL][tgtPos] = totalDmgs * 0.5
if self.notify.getDebug():
self.notify.debug('Applying kb bonus to track ' + str(attack[TOON_TRACK_COL]) + ' of ' + str(attack[TOON_KBBONUS_COL][tgtPos]) + ' to target ' + str(tgtPos))
else:
self.notify.warning('invalid tgtPos for knock back bonus: %d' % tgtPos)
tgtPos += 1
if hp:
self.__clearBonuses()
else:
self.__clearBonuses(hp=0)
def __handleBonus(self, attackIdx, hp = 1):
attackerId = self.toonAtkOrder[attackIdx]
attack = self.battle.toonAttacks[attackerId]
atkDmg = self.__attackDamage(attack, suit=0)
atkTrack = self.__getActualTrack(attack)
if atkDmg > 0:
if hp:
if atkTrack != LURE:
self.notify.debug('Adding dmg of ' + str(atkDmg) + ' to hpBonuses list')
self.__addDmgToBonuses(atkDmg, attackIdx)
elif self.__knockBackAtk(attackerId, toon=1):
self.notify.debug('Adding dmg of ' + str(atkDmg) + ' to kbBonuses list')
self.__addDmgToBonuses(atkDmg, attackIdx, hp=0)
def __clearAttack(self, attackIdx, toon = 1):
if toon:
if self.notify.getDebug():
self.notify.debug('clearing out toon attack for toon ' + str(attackIdx) + '...')
attack = self.battle.toonAttacks[attackIdx]
self.battle.toonAttacks[attackIdx] = getToonAttack(attackIdx)
longest = max(len(self.battle.activeToons), len(self.battle.activeSuits))
taList = self.battle.toonAttacks
for j in xrange(longest):
taList[attackIdx][TOON_HP_COL].append(-1)
taList[attackIdx][TOON_KBBONUS_COL].append(-1)
if self.notify.getDebug():
self.notify.debug('toon attack is now ' + repr(self.battle.toonAttacks[attackIdx]))
else:
self.notify.warning('__clearAttack not implemented for suits!')
def __rememberToonAttack(self, suitId, toonId, damage):
if not suitId in self.SuitAttackers:
self.SuitAttackers[suitId] = {toonId: damage}
elif not toonId in self.SuitAttackers[suitId]:
self.SuitAttackers[suitId][toonId] = damage
elif self.SuitAttackers[suitId][toonId] <= damage:
self.SuitAttackers[suitId] = [toonId, damage]
def __postProcessToonAttacks(self):
self.notify.debug('__postProcessToonAttacks()')
lastTrack = -1
lastAttacks = []
self.__clearBonuses()
for currToonAttack in self.toonAtkOrder:
if currToonAttack != -1:
attack = self.battle.toonAttacks[currToonAttack]
atkTrack, atkLevel = self.__getActualTrackLevel(attack)
if atkTrack != HEAL and atkTrack != SOS and atkTrack != NO_ATTACK and atkTrack != NPCSOS and atkTrack != PETSOS:
targets = self.__createToonTargetList(currToonAttack)
allTargetsDead = 1
for currTgt in targets:
damageDone = self.__attackDamage(attack, suit=0)
if damageDone > 0:
self.__rememberToonAttack(currTgt.getDoId(), attack[TOON_ID_COL], damageDone)
if atkTrack == TRAP:
if currTgt.doId in self.traps:
trapInfo = self.traps[currTgt.doId]
currTgt.battleTrap = trapInfo[0]
targetDead = 0
if currTgt.getHP() > 0:
allTargetsDead = 0
else:
targetDead = 1
if atkTrack != LURE:
for currLastAtk in lastAttacks:
self.__clearTgtDied(currTgt, currLastAtk, attack)
tgtId = currTgt.getDoId()
if tgtId in self.successfulLures and atkTrack == LURE:
lureInfo = self.successfulLures[tgtId]
self.notify.debug('applying lure data: ' + repr(lureInfo))
toonId = lureInfo[0]
lureAtk = self.battle.toonAttacks[toonId]
tgtPos = self.battle.activeSuits.index(currTgt)
if currTgt.doId in self.traps:
trapInfo = self.traps[currTgt.doId]
if trapInfo[0] == UBER_GAG_LEVEL_INDEX:
self.notify.debug('train trap triggered for %d' % currTgt.doId)
self.trainTrapTriggered = True
self.__removeSuitTrap(tgtId)
lureAtk[TOON_KBBONUS_COL][tgtPos] = self.KBBONUS_TGT_LURED
lureAtk[TOON_HP_COL][tgtPos] = lureInfo[3]
elif self.__suitIsLured(tgtId) and atkTrack == DROP:
self.notify.debug('Drop on lured suit, ' + 'indicating with KBBONUS_COL ' + 'flag')
tgtPos = self.battle.activeSuits.index(currTgt)
attack[TOON_KBBONUS_COL][tgtPos] = self.KBBONUS_LURED_FLAG
if targetDead and atkTrack != lastTrack:
tgtPos = self.battle.activeSuits.index(currTgt)
attack[TOON_HP_COL][tgtPos] = 0
attack[TOON_KBBONUS_COL][tgtPos] = -1
if allTargetsDead and atkTrack != lastTrack:
if self.notify.getDebug():
self.notify.debug('all targets of toon attack ' + str(currToonAttack) + ' are dead')
self.__clearAttack(currToonAttack, toon=1)
attack = self.battle.toonAttacks[currToonAttack]
atkTrack, atkLevel = self.__getActualTrackLevel(attack)
damagesDone = self.__applyToonAttackDamages(currToonAttack)
self.__applyToonAttackDamages(currToonAttack, hpbonus=1)
if atkTrack != LURE and atkTrack != DROP and atkTrack != SOUND:
self.__applyToonAttackDamages(currToonAttack, kbbonus=1)
if lastTrack != atkTrack:
lastAttacks = []
lastTrack = atkTrack
lastAttacks.append(attack)
if self.itemIsCredit(atkTrack, atkLevel):
if atkTrack == TRAP or atkTrack == LURE:
pass
elif atkTrack == HEAL:
if damagesDone != 0:
self.__addAttackExp(attack)
else:
self.__addAttackExp(attack)
if self.trainTrapTriggered:
for suit in self.battle.activeSuits:
suitId = suit.doId
self.__removeSuitTrap(suitId)
suit.battleTrap = NO_TRAP
self.notify.debug('train trap triggered, removing trap from %d' % suitId)
if self.notify.getDebug():
for currToonAttack in self.toonAtkOrder:
attack = self.battle.toonAttacks[currToonAttack]
self.notify.debug('Final Toon attack: ' + str(attack))
def __allTargetsDead(self, attackIdx, toon = 1):
allTargetsDead = 1
if toon:
targets = self.__createToonTargetList(attackIdx)
for currTgt in targets:
if currTgt.getHp() > 0:
allTargetsDead = 0
break
else:
self.notify.warning('__allTargetsDead: suit ver. not implemented!')
return allTargetsDead
def __clearLuredSuitsByAttack(self, toonId, kbBonusReq = 0, targetId = -1):
if self.notify.getDebug():
self.notify.debug('__clearLuredSuitsByAttack')
if targetId != -1 and self.__suitIsLured(t.getDoId()):
self.__removeLured(t.getDoId())
else:
tgtList = self.__createToonTargetList(toonId)
for t in tgtList:
if self.__suitIsLured(t.getDoId()) and (not kbBonusReq or self.__bonusExists(t, hp=0)):
self.__removeLured(t.getDoId())
if self.notify.getDebug():
self.notify.debug('Suit %d stepping from lured spot' % t.getDoId())
else:
self.notify.debug('Suit ' + str(t.getDoId()) + ' not found in currently lured suits')
def __clearLuredSuitsDelayed(self):
if self.notify.getDebug():
self.notify.debug('__clearLuredSuitsDelayed')
for t in self.delayedUnlures:
if self.__suitIsLured(t):
self.__removeLured(t)
if self.notify.getDebug():
self.notify.debug('Suit %d stepping back from lured spot' % t)
else:
self.notify.debug('Suit ' + str(t) + ' not found in currently lured suits')
self.delayedUnlures = []
def __addLuredSuitsDelayed(self, toonId, targetId = -1, ignoreDamageCheck = False):
if self.notify.getDebug():
self.notify.debug('__addLuredSuitsDelayed')
if targetId != -1:
self.delayedUnlures.append(targetId)
else:
tgtList = self.__createToonTargetList(toonId)
for t in tgtList:
if self.__suitIsLured(t.getDoId()) and t.getDoId() not in self.delayedUnlures and (self.__attackDamageForTgt(self.battle.toonAttacks[toonId], self.battle.activeSuits.index(t), suit=0) > 0 or ignoreDamageCheck):
self.delayedUnlures.append(t.getDoId())
def __calculateToonAttacks(self):
self.notify.debug('__calculateToonAttacks()')
self.__clearBonuses(hp=0)
currTrack = None
self.notify.debug('Traps: ' + str(self.traps))
maxSuitLevel = 0
for cog in self.battle.activeSuits:
maxSuitLevel = max(maxSuitLevel, cog.getActualLevel())
self.creditLevel = maxSuitLevel
for toonId in self.toonAtkOrder:
if self.__combatantDead(toonId, toon=1):
if self.notify.getDebug():
self.notify.debug("Toon %d is dead and can't attack" % toonId)
continue
attack = self.battle.toonAttacks[toonId]
atkTrack = self.__getActualTrack(attack)
if atkTrack != NO_ATTACK and atkTrack != SOS and atkTrack != NPCSOS:
if self.notify.getDebug():
self.notify.debug('Calculating attack for toon: %d' % toonId)
if self.SUITS_UNLURED_IMMEDIATELY:
if currTrack and atkTrack != currTrack:
self.__clearLuredSuitsDelayed()
currTrack = atkTrack
self.__calcToonAtkHp(toonId)
attackIdx = self.toonAtkOrder.index(toonId)
self.__handleBonus(attackIdx, hp=0)
self.__handleBonus(attackIdx, hp=1)
lastAttack = self.toonAtkOrder.index(toonId) >= len(self.toonAtkOrder) - 1
unlureAttack = self.__attackHasHit(attack, suit=0) and self.__unlureAtk(toonId, toon=1)
if unlureAttack:
if lastAttack:
self.__clearLuredSuitsByAttack(toonId)
else:
self.__addLuredSuitsDelayed(toonId)
if lastAttack:
self.__clearLuredSuitsDelayed()
self.__processBonuses(hp=0)
self.__processBonuses(hp=1)
self.__postProcessToonAttacks()
return
def __knockBackAtk(self, attackIndex, toon = 1):
if toon and (self.battle.toonAttacks[attackIndex][TOON_TRACK_COL] == THROW or self.battle.toonAttacks[attackIndex][TOON_TRACK_COL] == SQUIRT):
if self.notify.getDebug():
self.notify.debug('attack is a knockback')
return 1
return 0
def __unlureAtk(self, attackIndex, toon = 1):
attack = self.battle.toonAttacks[attackIndex]
track = self.__getActualTrack(attack)
if toon and (track == THROW or track == SQUIRT or track == SOUND):
if self.notify.getDebug():
self.notify.debug('attack is an unlure')
return 1
return 0
def __calcSuitAtkType(self, attackIndex):
theSuit = self.battle.activeSuits[attackIndex]
attacks = SuitBattleGlobals.SuitAttributes[theSuit.dna.name]['attacks']
atk = SuitBattleGlobals.pickSuitAttack(attacks, theSuit.getLevel())
return atk
def __calcSuitTarget(self, attackIndex):
attack = self.battle.suitAttacks[attackIndex]
suitId = attack[SUIT_ID_COL]
if suitId in self.SuitAttackers and random.randint(0, 99) < 75:
totalDamage = 0
for currToon in self.SuitAttackers[suitId].keys():
totalDamage += self.SuitAttackers[suitId][currToon]
dmgs = []
for currToon in self.SuitAttackers[suitId].keys():
dmgs.append(self.SuitAttackers[suitId][currToon] / totalDamage * 100)
dmgIdx = SuitBattleGlobals.pickFromFreqList(dmgs)
if dmgIdx == None:
toonId = self.__pickRandomToon(suitId)
else:
toonId = self.SuitAttackers[suitId].keys()[dmgIdx]
if toonId == -1 or toonId not in self.battle.activeToons:
return -1
self.notify.debug('Suit attacking back at toon ' + str(toonId))
return self.battle.activeToons.index(toonId)
else:
return self.__pickRandomToon(suitId)
return
def __pickRandomToon(self, suitId):
liveToons = []
for currToon in self.battle.activeToons:
if not self.__combatantDead(currToon, toon=1):
liveToons.append(self.battle.activeToons.index(currToon))
if len(liveToons) == 0:
self.notify.debug('No tgts avail. for suit ' + str(suitId))
return -1
chosen = random.choice(liveToons)
self.notify.debug('Suit randomly attacking toon ' + str(self.battle.activeToons[chosen]))
return chosen
def __suitAtkHit(self, attackIndex):
if self.suitsAlwaysHit:
return 1
elif self.suitsAlwaysMiss:
return 0
theSuit = self.battle.activeSuits[attackIndex]
atkType = self.battle.suitAttacks[attackIndex][SUIT_ATK_COL]
atkInfo = SuitBattleGlobals.getSuitAttack(theSuit.dna.name, theSuit.getLevel(), atkType)
atkAcc = atkInfo['acc']
suitAcc = SuitBattleGlobals.SuitAttributes[theSuit.dna.name]['acc'][theSuit.getLevel()]
acc = atkAcc
randChoice = random.randint(0, 99)
if self.notify.getDebug():
self.notify.debug('Suit attack rolled ' + str(randChoice) + ' to hit with an accuracy of ' + str(acc) + ' (attackAcc: ' + str(atkAcc) + ' suitAcc: ' + str(suitAcc) + ')')
if randChoice < acc:
return 1
return 0
def __suitAtkAffectsGroup(self, attack):
atkType = attack[SUIT_ATK_COL]
theSuit = self.battle.findSuit(attack[SUIT_ID_COL])
atkInfo = SuitBattleGlobals.getSuitAttack(theSuit.dna.name, theSuit.getLevel(), atkType)
return atkInfo['group'] != SuitBattleGlobals.ATK_TGT_SINGLE
def __createSuitTargetList(self, attackIndex):
attack = self.battle.suitAttacks[attackIndex]
targetList = []
if attack[SUIT_ATK_COL] == NO_ATTACK:
self.notify.debug('No attack, no targets')
return targetList
debug = self.notify.getDebug()
if not self.__suitAtkAffectsGroup(attack):
targetList.append(self.battle.activeToons[attack[SUIT_TGT_COL]])
if debug:
self.notify.debug('Suit attack is single target')
else:
if debug:
self.notify.debug('Suit attack is group target')
for currToon in self.battle.activeToons:
if debug:
self.notify.debug('Suit attack will target toon' + str(currToon))
targetList.append(currToon)
return targetList
def __calcSuitAtkHp(self, attackIndex):
targetList = self.__createSuitTargetList(attackIndex)
attack = self.battle.suitAttacks[attackIndex]
for currTarget in xrange(len(targetList)):
toonId = targetList[currTarget]
toon = self.battle.getToon(toonId)
result = 0
if toon and toon.immortalMode:
result = 1
elif self.TOONS_TAKE_NO_DAMAGE:
result = 0
elif self.__suitAtkHit(attackIndex):
atkType = attack[SUIT_ATK_COL]
theSuit = self.battle.findSuit(attack[SUIT_ID_COL])
atkInfo = SuitBattleGlobals.getSuitAttack(theSuit.dna.name, theSuit.getLevel(), atkType)
result = atkInfo['hp']
targetIndex = self.battle.activeToons.index(toonId)
attack[SUIT_HP_COL][targetIndex] = result
def __getToonHp(self, toonDoId):
handle = self.battle.getToon(toonDoId)
if handle != None and toonDoId in self.toonHPAdjusts:
return handle.hp + self.toonHPAdjusts[toonDoId]
else:
return 0
return
def __getToonMaxHp(self, toonDoId):
handle = self.battle.getToon(toonDoId)
if handle != None:
return handle.maxHp
else:
return 0
return
def __applySuitAttackDamages(self, attackIndex):
attack = self.battle.suitAttacks[attackIndex]
if self.APPLY_HEALTH_ADJUSTMENTS:
for t in self.battle.activeToons:
position = self.battle.activeToons.index(t)
if attack[SUIT_HP_COL][position] <= 0:
continue
toonHp = self.__getToonHp(t)
if toonHp - attack[SUIT_HP_COL][position] <= 0:
if self.notify.getDebug():
self.notify.debug('Toon %d has died, removing' % t)
self.toonLeftBattle(t)
attack[TOON_DIED_COL] = attack[TOON_DIED_COL] | 1 << position
if self.notify.getDebug():
self.notify.debug('Toon ' + str(t) + ' takes ' + str(attack[SUIT_HP_COL][position]) + ' damage')
self.toonHPAdjusts[t] -= attack[SUIT_HP_COL][position]
self.notify.debug('Toon ' + str(t) + ' now has ' + str(self.__getToonHp(t)) + ' health')
def __suitCanAttack(self, suitId):
if self.__combatantDead(suitId, toon=0) or self.__suitIsLured(suitId) or self.__combatantJustRevived(suitId):
return 0
return 1
def __updateSuitAtkStat(self, toonId):
if toonId in self.suitAtkStats:
self.suitAtkStats[toonId] += 1
else:
self.suitAtkStats[toonId] = 1
def __printSuitAtkStats(self):
self.notify.debug('Suit Atk Stats:')
for currTgt in self.suitAtkStats.keys():
if currTgt not in self.battle.activeToons:
continue
tgtPos = self.battle.activeToons.index(currTgt)
self.notify.debug(' toon ' + str(currTgt) + ' at position ' + str(tgtPos) + ' was attacked ' + str(self.suitAtkStats[currTgt]) + ' times')
self.notify.debug('\n')
def __calculateSuitAttacks(self):
for i in xrange(len(self.battle.suitAttacks)):
if i < len(self.battle.activeSuits):
suitId = self.battle.activeSuits[i].doId
self.battle.suitAttacks[i][SUIT_ID_COL] = suitId
if not self.__suitCanAttack(suitId):
if self.notify.getDebug():
self.notify.debug("Suit %d can't attack" % suitId)
continue
if self.battle.pendingSuits.count(self.battle.activeSuits[i]) > 0 or self.battle.joiningSuits.count(self.battle.activeSuits[i]) > 0:
continue
attack = self.battle.suitAttacks[i]
attack[SUIT_ID_COL] = self.battle.activeSuits[i].doId
attack[SUIT_ATK_COL] = self.__calcSuitAtkType(i)
attack[SUIT_TGT_COL] = self.__calcSuitTarget(i)
if attack[SUIT_TGT_COL] == -1:
self.battle.suitAttacks[i] = getDefaultSuitAttack()
attack = self.battle.suitAttacks[i]
self.notify.debug('clearing suit attack, no avail targets')
self.__calcSuitAtkHp(i)
if attack[SUIT_ATK_COL] != NO_ATTACK:
if self.__suitAtkAffectsGroup(attack):
for currTgt in self.battle.activeToons:
self.__updateSuitAtkStat(currTgt)
else:
tgtId = self.battle.activeToons[attack[SUIT_TGT_COL]]
self.__updateSuitAtkStat(tgtId)
targets = self.__createSuitTargetList(i)
allTargetsDead = 1
for currTgt in targets:
if self.__getToonHp(currTgt) > 0:
allTargetsDead = 0
break
if allTargetsDead:
self.battle.suitAttacks[i] = getDefaultSuitAttack()
if self.notify.getDebug():
self.notify.debug('clearing suit attack, targets dead')
self.notify.debug('suit attack is now ' + repr(self.battle.suitAttacks[i]))
self.notify.debug('all attacks: ' + repr(self.battle.suitAttacks))
attack = self.battle.suitAttacks[i]
if self.__attackHasHit(attack, suit=1):
self.__applySuitAttackDamages(i)
if self.notify.getDebug():
self.notify.debug('Suit attack: ' + str(self.battle.suitAttacks[i]))
attack[SUIT_BEFORE_TOONS_COL] = 0
def __updateLureTimeouts(self):
if self.notify.getDebug():
self.notify.debug('__updateLureTimeouts()')
self.notify.debug('Lured suits: ' + str(self.currentlyLuredSuits))
noLongerLured = []
for currLuredSuit in self.currentlyLuredSuits.keys():
self.__incLuredCurrRound(currLuredSuit)
if self.__luredMaxRoundsReached(currLuredSuit) or self.__luredWakeupTime(currLuredSuit):
noLongerLured.append(currLuredSuit)
for currLuredSuit in noLongerLured:
self.__removeLured(currLuredSuit)
if self.notify.getDebug():
self.notify.debug('Lured suits: ' + str(self.currentlyLuredSuits))
def __initRound(self):
if self.CLEAR_SUIT_ATTACKERS:
self.SuitAttackers = {}
self.toonAtkOrder = []
attacks = findToonAttack(self.battle.activeToons, self.battle.toonAttacks, PETSOS)
for atk in attacks:
self.toonAtkOrder.append(atk[TOON_ID_COL])
attacks = findToonAttack(self.battle.activeToons, self.battle.toonAttacks, FIRE)
for atk in attacks:
self.toonAtkOrder.append(atk[TOON_ID_COL])
for track in xrange(HEAL, DROP + 1):
attacks = findToonAttack(self.battle.activeToons, self.battle.toonAttacks, track)
if track == TRAP:
sortedTraps = []
for atk in attacks:
if atk[TOON_TRACK_COL] == TRAP:
sortedTraps.append(atk)
for atk in attacks:
if atk[TOON_TRACK_COL] == NPCSOS:
sortedTraps.append(atk)
attacks = sortedTraps
for atk in attacks:
self.toonAtkOrder.append(atk[TOON_ID_COL])
specials = findToonAttack(self.battle.activeToons, self.battle.toonAttacks, NPCSOS)
toonsHit = 0
cogsMiss = 0
for special in specials:
npc_track = NPCToons.getNPCTrack(special[TOON_TGT_COL])
if npc_track == NPC_TOONS_HIT:
BattleCalculatorAI.toonsAlwaysHit = 1
toonsHit = 1
elif npc_track == NPC_COGS_MISS:
BattleCalculatorAI.suitsAlwaysMiss = 1
cogsMiss = 1
if self.notify.getDebug():
self.notify.debug('Toon attack order: ' + str(self.toonAtkOrder))
self.notify.debug('Active toons: ' + str(self.battle.activeToons))
self.notify.debug('Toon attacks: ' + str(self.battle.toonAttacks))
self.notify.debug('Active suits: ' + str(self.battle.activeSuits))
self.notify.debug('Suit attacks: ' + str(self.battle.suitAttacks))
self.toonHPAdjusts = {}
for t in self.battle.activeToons:
self.toonHPAdjusts[t] = 0
self.__clearBonuses()
self.__updateActiveToons()
self.delayedUnlures = []
self.__initTraps()
self.successfulLures = {}
return (toonsHit, cogsMiss)
def calculateRound(self):
longest = max(len(self.battle.activeToons), len(self.battle.activeSuits))
for t in self.battle.activeToons:
for j in xrange(longest):
self.battle.toonAttacks[t][TOON_HP_COL].append(-1)
self.battle.toonAttacks[t][TOON_KBBONUS_COL].append(-1)
for i in xrange(4):
for j in xrange(len(self.battle.activeToons)):
self.battle.suitAttacks[i][SUIT_HP_COL].append(-1)
toonsHit, cogsMiss = self.__initRound()
for suit in self.battle.activeSuits:
if suit.isGenerated():
suit.b_setHP(suit.getHP())
for suit in self.battle.activeSuits:
if not hasattr(suit, 'dna'):
self.notify.warning('a removed suit is in this battle!')
return None
self.__calculateToonAttacks()
self.__updateLureTimeouts()
self.__calculateSuitAttacks()
if toonsHit == 1:
BattleCalculatorAI.toonsAlwaysHit = 0
if cogsMiss == 1:
BattleCalculatorAI.suitsAlwaysMiss = 0
if self.notify.getDebug():
self.notify.debug('Toon skills gained after this round: ' + repr(self.toonSkillPtsGained))
self.__printSuitAtkStats()
return None
def __calculateFiredCogs():
import pdb
pdb.set_trace()
def toonLeftBattle(self, toonId):
if self.notify.getDebug():
self.notify.debug('toonLeftBattle()' + str(toonId))
if toonId in self.toonSkillPtsGained:
del self.toonSkillPtsGained[toonId]
if toonId in self.suitAtkStats:
del self.suitAtkStats[toonId]
if not self.CLEAR_SUIT_ATTACKERS:
oldSuitIds = []
for s in self.SuitAttackers.keys():
if toonId in self.SuitAttackers[s]:
del self.SuitAttackers[s][toonId]
if len(self.SuitAttackers[s]) == 0:
oldSuitIds.append(s)
for oldSuitId in oldSuitIds:
del self.SuitAttackers[oldSuitId]
self.__clearTrapCreator(toonId)
self.__clearLurer(toonId)
def suitLeftBattle(self, suitId):
if self.notify.getDebug():
self.notify.debug('suitLeftBattle(): ' + str(suitId))
self.__removeLured(suitId)
if suitId in self.SuitAttackers:
del self.SuitAttackers[suitId]
self.__removeSuitTrap(suitId)
def __updateActiveToons(self):
if self.notify.getDebug():
self.notify.debug('updateActiveToons()')
if not self.CLEAR_SUIT_ATTACKERS:
oldSuitIds = []
for s in self.SuitAttackers.keys():
for t in self.SuitAttackers[s].keys():
if t not in self.battle.activeToons:
del self.SuitAttackers[s][t]
if len(self.SuitAttackers[s]) == 0:
oldSuitIds.append(s)
for oldSuitId in oldSuitIds:
del self.SuitAttackers[oldSuitId]
for trap in self.traps.keys():
if self.traps[trap][1] not in self.battle.activeToons:
self.notify.debug('Trap for toon ' + str(self.traps[trap][1]) + ' will no longer give exp')
self.traps[trap][1] = 0
def getSkillGained(self, toonId, track):
return BattleExperienceAI.getSkillGained(self.toonSkillPtsGained, toonId, track)
def getLuredSuits(self):
luredSuits = self.currentlyLuredSuits.keys()
self.notify.debug('Lured suits reported to battle: ' + repr(luredSuits))
return luredSuits
def __suitIsLured(self, suitId, prevRound = 0):
inList = suitId in self.currentlyLuredSuits
if prevRound:
return inList and self.currentlyLuredSuits[suitId][0] != -1
return inList
def __findAvailLureId(self, lurerId):
luredSuits = self.currentlyLuredSuits.keys()
lureIds = []
for currLured in luredSuits:
lurerInfo = self.currentlyLuredSuits[currLured][3]
lurers = lurerInfo.keys()
for currLurer in lurers:
currId = lurerInfo[currLurer][1]
if currLurer == lurerId and currId not in lureIds:
lureIds.append(currId)
lureIds.sort()
currId = 1
for currLureId in lureIds:
if currLureId != currId:
return currId
currId += 1
return currId
def __addLuredSuitInfo(self, suitId, currRounds, maxRounds, wakeChance, lurer, lureLvl, lureId = -1, npc = 0):
if lureId == -1:
availLureId = self.__findAvailLureId(lurer)
else:
availLureId = lureId
if npc == 1:
credit = 0
else:
credit = self.itemIsCredit(LURE, lureLvl)
if suitId in self.currentlyLuredSuits:
lureInfo = self.currentlyLuredSuits[suitId]
if not lurer in lureInfo[3]:
lureInfo[1] += maxRounds
if wakeChance < lureInfo[2]:
lureInfo[2] = wakeChance
lureInfo[3][lurer] = [lureLvl, availLureId, credit]
else:
lurerInfo = {lurer: [lureLvl, availLureId, credit]}
self.currentlyLuredSuits[suitId] = [currRounds,
maxRounds,
wakeChance,
lurerInfo]
self.notify.debug('__addLuredSuitInfo: currLuredSuits -> %s' % repr(self.currentlyLuredSuits))
return availLureId
def __getLurers(self, suitId):
if self.__suitIsLured(suitId):
return self.currentlyLuredSuits[suitId][3].keys()
return []
def __getLuredExpInfo(self, suitId):
returnInfo = []
lurers = self.__getLurers(suitId)
if len(lurers) == 0:
return returnInfo
lurerInfo = self.currentlyLuredSuits[suitId][3]
for currLurer in lurers:
returnInfo.append([currLurer,
lurerInfo[currLurer][0],
lurerInfo[currLurer][1],
lurerInfo[currLurer][2]])
return returnInfo
def __clearLurer(self, lurerId, lureId = -1):
luredSuits = self.currentlyLuredSuits.keys()
for currLured in luredSuits:
lurerInfo = self.currentlyLuredSuits[currLured][3]
lurers = lurerInfo.keys()
for currLurer in lurers:
if currLurer == lurerId and (lureId == -1 or lureId == lurerInfo[currLurer][1]):
del lurerInfo[currLurer]
def __setLuredMaxRounds(self, suitId, rounds):
if self.__suitIsLured(suitId):
self.currentlyLuredSuits[suitId][1] = rounds
def __setLuredWakeChance(self, suitId, chance):
if self.__suitIsLured(suitId):
self.currentlyLuredSuits[suitId][2] = chance
def __incLuredCurrRound(self, suitId):
if self.__suitIsLured(suitId):
self.currentlyLuredSuits[suitId][0] += 1
def __removeLured(self, suitId):
if self.__suitIsLured(suitId):
del self.currentlyLuredSuits[suitId]
def __luredMaxRoundsReached(self, suitId):
return self.__suitIsLured(suitId) and self.currentlyLuredSuits[suitId][0] >= self.currentlyLuredSuits[suitId][1]
def __luredWakeupTime(self, suitId):
return self.__suitIsLured(suitId) and self.currentlyLuredSuits[suitId][0] > 0 and random.randint(0, 99) < self.currentlyLuredSuits[suitId][2]
def itemIsCredit(self, track, level):
if track == PETSOS:
return 0
return level < self.creditLevel
def __getActualTrack(self, toonAttack):
if toonAttack[TOON_TRACK_COL] == NPCSOS:
track = NPCToons.getNPCTrack(toonAttack[TOON_TGT_COL])
if track != None:
return track
else:
self.notify.warning('No NPC with id: %d' % toonAttack[TOON_TGT_COL])
return toonAttack[TOON_TRACK_COL]
def __getActualTrackLevel(self, toonAttack):
if toonAttack[TOON_TRACK_COL] == NPCSOS:
track, level, hp = NPCToons.getNPCTrackLevelHp(toonAttack[TOON_TGT_COL])
if track != None:
return (track, level)
else:
self.notify.warning('No NPC with id: %d' % toonAttack[TOON_TGT_COL])
return (toonAttack[TOON_TRACK_COL], toonAttack[TOON_LVL_COL])
def __getActualTrackLevelHp(self, toonAttack):
if toonAttack[TOON_TRACK_COL] == NPCSOS:
track, level, hp = NPCToons.getNPCTrackLevelHp(toonAttack[TOON_TGT_COL])
if track != None:
return (track, level, hp)
else:
self.notify.warning('No NPC with id: %d' % toonAttack[TOON_TGT_COL])
elif toonAttack[TOON_TRACK_COL] == PETSOS:
trick = toonAttack[TOON_LVL_COL]
petProxyId = toonAttack[TOON_TGT_COL]
trickId = toonAttack[TOON_LVL_COL]
healRange = PetTricks.TrickHeals[trickId]
hp = 0
if petProxyId in simbase.air.doId2do:
petProxy = simbase.air.doId2do[petProxyId]
if trickId < len(petProxy.trickAptitudes):
aptitude = petProxy.trickAptitudes[trickId]
hp = int(lerp(healRange[0], healRange[1], aptitude))
else:
self.notify.warning('pet proxy: %d not in doId2do!' % petProxyId)
return (toonAttack[TOON_TRACK_COL], toonAttack[TOON_LVL_COL], hp)
return (toonAttack[TOON_TRACK_COL], toonAttack[TOON_LVL_COL], 0)
def __calculatePetTrickSuccess(self, toonAttack):
petProxyId = toonAttack[TOON_TGT_COL]
if not petProxyId in simbase.air.doId2do:
self.notify.warning('pet proxy %d not in doId2do!' % petProxyId)
toonAttack[TOON_ACCBONUS_COL] = 1
return (0, 0)
petProxy = simbase.air.doId2do[petProxyId]
trickId = toonAttack[TOON_LVL_COL]
toonAttack[TOON_ACCBONUS_COL] = petProxy.attemptBattleTrick(trickId)
if toonAttack[TOON_ACCBONUS_COL] == 1:
return (0, 0)
else:
return (1, 100)
| {
"content_hash": "21ab5790f06c3d6fcabeec2cfe6f48b6",
"timestamp": "",
"source": "github",
"line_count": 1619,
"max_line_length": 272,
"avg_line_length": 45.18282890673255,
"alnum_prop": 0.552063539801233,
"repo_name": "Spiderlover/Toontown",
"id": "d3cce0e70b8fad5c1f28c304abb0c62cc382e76d",
"size": "73151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/battle/BattleCalculatorAI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7774"
},
{
"name": "Python",
"bytes": "17241353"
},
{
"name": "Shell",
"bytes": "7699"
}
],
"symlink_target": ""
} |
'''
MFEM example 8p
See c++ version in the MFEM library for more detail
How to run:
mpirun -np 2 python <arguments>
Example of arguments:
ex8p.py -m square-disc.mesh
ex8p.py -m star.mesh
ex8p.py -m escher.mesh
ex8p.py -m fichera.mesh
ex8p.py -m square-disc-p3.mesh
ex8p.py -m star-surf.mesh -o 2
'''
import sys
from os.path import expanduser, join, dirname
import numpy as np
from numpy import sin, cos, exp, sqrt
from mfem.common.arg_parser import ArgParser
import mfem.par as mfem
from mpi4py import MPI
num_procs = MPI.COMM_WORLD.size
myid = MPI.COMM_WORLD.rank
parser = ArgParser(description='Ex8p')
parser.add_argument('-m', '--mesh',
default='star.mesh',
action='store', type=str,
help='Mesh file to use.')
parser.add_argument('-o', '--order',
action='store', default=1, type=int,
help="Finite element order (polynomial degree)")
parser.add_argument('-vis', '--visualization',
action='store_true', default=True,
help='Enable GLVis visualization')
args = parser.parse_args()
device = mfem.Device('cpu')
if myid == 0:
device.Print()
order = args.order
visualization = args.visualization
if myid == 0:
parser.print_options(args)
# 3. Read the (serial) mesh from the given mesh file on all processors. We
# can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
# and volume meshes with the same code.
meshfile = expanduser(join(dirname(__file__), '..', 'data', 'star.mesh'))
mesh = mfem.Mesh(meshfile, 1, 1)
dim = mesh.Dimension()
# 4. Refine the serial mesh on all processors to increase the resolution. In
# this example we do 'ref_levels' of uniform refinement. We choose
# 'ref_levels' to be the largest number that gives a final mesh with no
# more than 10,000 elements.
ref_levels = int(np.floor(np.log(10000./mesh.GetNE())/np.log(2.)/dim))
for x in range(ref_levels):
mesh.UniformRefinement()
# 5. Define a parallel mesh by a partitioning of the serial mesh. Refine
# this mesh further in parallel to increase the resolution. Once the
# parallel mesh is defined, the serial mesh can be deleted.
pmesh = mfem.ParMesh(MPI.COMM_WORLD, mesh)
del mesh
par_ref_levels = 1
for l in range(par_ref_levels):
pmesh.UniformRefinement()
pmesh.ReorientTetMesh()
# 6. Define the trial, interfacial (trace) and test DPG spaces:
# - The trial space, x0_space, contains the non-interfacial unknowns and
# has the essential BC.
# - The interfacial space, xhat_space, contains the interfacial unknowns
# and does not have essential BC.
# - The test space, test_space, is an enriched space where the enrichment
# degree may depend on the spatial dimension of the domain, the type of
# the mesh and the trial space order.
trial_order = order
trace_order = order - 1
test_order = order # reduced order, full order is (order + dim - 1)
if (dim == 2 and (order % 2 == 0 or (pmesh.MeshGenerator() & 2 and order > 1))):
test_order = test_order + 1
if (test_order < trial_order):
if myid == 0:
print("Warning, test space not enriched enough to handle primal trial space")
x0_fec = mfem.H1_FECollection(trial_order, dim)
xhat_fec = mfem.RT_Trace_FECollection(trace_order, dim)
test_fec = mfem.L2_FECollection(test_order, dim)
x0_space = mfem.ParFiniteElementSpace(pmesh, x0_fec)
xhat_space = mfem.ParFiniteElementSpace(pmesh, xhat_fec)
test_space = mfem.ParFiniteElementSpace(pmesh, test_fec)
glob_true_s0 = x0_space.GlobalTrueVSize()
glob_true_s1 = xhat_space.GlobalTrueVSize()
glob_true_s_test = test_space.GlobalTrueVSize()
if myid == 0:
print('\n'.join(["nNumber of Unknowns",
" Trial space, X0 : " + str(glob_true_s0) +
" (order " + str(trial_order) + ")",
" Interface space, Xhat : " + str(glob_true_s1) +
" (order " + str(trace_order) + ")",
" Test space, Y : " + str(glob_true_s_test) +
" (order " + str(test_order) + ")"]))
# 7. Set up the linear form F(.) which corresponds to the right-hand side of
# the FEM linear system, which in this case is (f,phi_i) where f=1.0 and
# phi_i are the basis functions in the test finite element fespace.
one = mfem.ConstantCoefficient(1.0)
F = mfem.ParLinearForm(test_space)
F.AddDomainIntegrator(mfem.DomainLFIntegrator(one))
F.Assemble()
x0 = mfem.ParGridFunction(x0_space)
x0.Assign(0.0)
# 8. Set up the mixed bilinear form for the primal trial unknowns, B0,
# the mixed bilinear form for the interfacial unknowns, Bhat,
# the inverse stiffness matrix on the discontinuous test space, Sinv,
# and the stiffness matrix on the continuous trial space, S0.
ess_bdr = mfem.intArray(pmesh.bdr_attributes.Max())
ess_bdr.Assign(1)
ess_dof = mfem.intArray()
x0_space.GetEssentialVDofs(ess_bdr, ess_dof)
B0 = mfem.ParMixedBilinearForm(x0_space, test_space)
B0.AddDomainIntegrator(mfem.DiffusionIntegrator(one))
B0.Assemble()
B0.EliminateEssentialBCFromTrialDofs(ess_dof, x0, F)
B0.Finalize()
Bhat = mfem.ParMixedBilinearForm(xhat_space, test_space)
Bhat.AddTraceFaceIntegrator(mfem.TraceJumpIntegrator())
Bhat.Assemble()
Bhat.Finalize()
Sinv = mfem.ParBilinearForm(test_space)
Sum = mfem.SumIntegrator()
Sum.AddIntegrator(mfem.DiffusionIntegrator(one))
Sum.AddIntegrator(mfem.MassIntegrator(one))
Sinv.AddDomainIntegrator(mfem.InverseIntegrator(Sum))
Sinv.Assemble()
Sinv.Finalize()
S0 = mfem.ParBilinearForm(x0_space)
S0.AddDomainIntegrator(mfem.DiffusionIntegrator(one))
S0.Assemble()
S0.EliminateEssentialBC(ess_bdr)
S0.Finalize()
matB0 = B0.ParallelAssemble()
del B0
matBhat = Bhat.ParallelAssemble()
del Bhat
matSinv = Sinv.ParallelAssemble()
del Sinv
matS0 = S0.ParallelAssemble()
del S0
# 9. Define the block structure of the problem, by creating the offset
# variables. Also allocate two BlockVector objects to store the solution
# and rhs.
x0_var = 0
xhat_var = 1
NVAR = 2 # enum in C
true_s0 = x0_space.TrueVSize()
true_s1 = xhat_space.TrueVSize()
true_s_test = test_space.TrueVSize()
true_offsets = mfem.intArray([0, true_s0, true_s0+true_s1])
true_offsets_test = mfem.intArray([0, true_s_test])
x = mfem.BlockVector(true_offsets)
b = mfem.BlockVector(true_offsets)
x.Assign(0.0)
b.Assign(0.0)
# 10. Set up the 1x2 block Least Squares DPG operator, B = [B0 Bhat],
# the normal equation operator, A = B^t Sinv B, and
# the normal equation right-hand-size, b = B^t Sinv F.
B = mfem.BlockOperator(true_offsets_test, true_offsets)
B.SetBlock(0, 0, matB0)
B.SetBlock(0, 1, matBhat)
A = mfem.RAPOperator(B, matSinv, B)
trueF = F.ParallelAssemble()
SinvF = mfem.HypreParVector(test_space)
matSinv.Mult(trueF, SinvF)
B.MultTranspose(SinvF, b)
# 11. Set up a block-diagonal preconditioner for the 2x2 normal equation
#
# [ S0^{-1} 0 ]
# [ 0 Shat^{-1} ] Shat = (Bhat^T Sinv Bhat)
#
# corresponding to the primal (x0) and interfacial (xhat) unknowns.
# Since the Shat operator is equivalent to an H(div) matrix reduced to
# the interfacial skeleton, we approximate its inverse with one V-cycle
# of the ADS preconditioner from the hypre library (in 2D we use AMS for
# the rotated H(curl) problem).
S0inv = mfem.HypreBoomerAMG(matS0)
S0inv.SetPrintLevel(0)
Shat = mfem.RAP(matSinv, matBhat)
if (dim == 2):
Shatinv = mfem.HypreAMS(Shat, xhat_space)
else:
Shatinv = mfem.HypreADS(Shat, xhat_space)
P = mfem.BlockDiagonalPreconditioner(true_offsets)
P.SetDiagonalBlock(0, S0inv)
P.SetDiagonalBlock(1, Shatinv)
# 12. Solve the normal equation system using the PCG iterative solver.
# Check the weighted norm of residual for the DPG least square problem.
# Wrap the primal variable in a GridFunction for visualization purposes.
pcg = mfem.CGSolver(MPI.COMM_WORLD)
pcg.SetOperator(A)
pcg.SetPreconditioner(P)
pcg.SetRelTol(1e-6)
pcg.SetMaxIter(200)
pcg.SetPrintLevel(1)
pcg.Mult(b, x)
LSres = mfem.HypreParVector(test_space)
tmp = mfem.HypreParVector(test_space)
B.Mult(x, LSres)
LSres -= trueF
matSinv.Mult(LSres, tmp)
res = sqrt(mfem.InnerProduct(LSres, tmp))
if (myid == 0):
print("\n|| B0*x0 + Bhat*xhat - F ||_{S^-1} = " + str(res))
x0.Distribute(x.GetBlock(x0_var))
# 13. Save the refined mesh and the solution in parallel. This output can
# be viewed later using GLVis: "glvis -np <np> -m mesh -g sol".
smyid = '{:0>6d}'.format(myid)
mesh_name = "mesh."+smyid
sol_name = "sol."+smyid
pmesh.Print(mesh_name, 8)
x0.Save(sol_name, 8)
# 14. Send the solution by socket to a GLVis server.
if visualization:
sol_sock = mfem.socketstream("localhost", 19916)
sol_sock.send_text("parallel " + str(num_procs) + " " + str(myid))
sol_sock.precision(8)
sol_sock.send_solution(pmesh, x0)
| {
"content_hash": "9b07ed2616a04329a9eb79499c095faf",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 85,
"avg_line_length": 32.87822878228782,
"alnum_prop": 0.6894500561167228,
"repo_name": "mfem/PyMFEM",
"id": "dc432ae9d223493c61254024528e983af956925d",
"size": "8910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/ex8p.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "179682"
},
{
"name": "Grammatical Framework",
"bytes": "18800"
},
{
"name": "Makefile",
"bytes": "1055"
},
{
"name": "Python",
"bytes": "265160"
},
{
"name": "SWIG",
"bytes": "371435"
},
{
"name": "Shell",
"bytes": "1650"
}
],
"symlink_target": ""
} |
class BuiltIn(object):
def __init__(self):
self.code = []
self.methods = []
self.messages = []
self.defaults = {}
self.constant_names = ['False', 'True', 'Less', 'Equal', 'Greater', 'Empty', 'BuiltIn']
self.opaque_names = ['Constant', 'Small-Integer']
self.pointer_names = ['String', 'Array', 'Large-Integer']
class BuiltInMethod(object):
def __init__(self, tag_name, symbol, arg_names, sent_messages, code):
self.tag_name = tag_name
self.symbol = symbol
self.arg_names = arg_names
self.sent_messages = sent_messages
self.code = code
def generate_target_code(self, label, target):
return target.generate_builtin_method(label, self.arg_names, self.code)
def __repr__(self):
return 'BuiltInMethod(%r, %r, %r, %r, %r)' % (self.tag_name, self.symbol, self.arg_names, self.sent_messages, self.code)
class TraceBackInfo(object):
def __init__(self, index, method_name, stream_name, source_line, line_number, column, underline):
self.index = index
self.method_name = method_name
self.stream_name = stream_name
self.source_line = source_line
self.line_number = line_number
self.column = column
self.underline = underline
class CompileOptions(object):
def __init__(self):
self.verbose = False
self.traceback = True
self.source_traceback = True
| {
"content_hash": "0233935df577cf9e8303c0f867c132e3",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 128,
"avg_line_length": 37.30769230769231,
"alnum_prop": 0.6096219931271478,
"repo_name": "shaurz/ome",
"id": "9d71ef383788915fba20232178abe3c3347b8b3a",
"size": "1552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ome/ome_types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "59102"
},
{
"name": "Python",
"bytes": "136748"
}
],
"symlink_target": ""
} |
import time
# ============= local library imports ==========================
from pychron.core.helpers.strtools import csv_to_floats
from pychron.lasers.laser_managers.serial_laser_manager import SerialLaserManager
class ReadPositionError(BaseException):
def __init__(self, xyz):
self._msg = "ReadPosition error. Laser responded={}".format(xyz)
def __str__(self):
return self._msg
def __repr__(self):
return self._msg
class AblationCO2Manager(SerialLaserManager):
stage_manager_id = "ablation.pychron"
configuration_dir_name = "ablation"
read_delay = 25
def set_tray(self, t):
if self.stage_manager:
self.stage_manager.stage_map_name = t
def _test_connection_hook(self):
i = 0
n = 3
while 1:
re = self._ask("GetVersion")
if re:
self.connected = True
return
elif i > n:
self.connected = False
return
time.sleep(1)
i += 1
def end_extract(self, *args, **kw):
self.info("ending extraction. set laser power to 0")
self.set_laser_power(0)
if self._patterning:
self.stop_pattern()
self.disable_laser()
def fire_laser(self):
self.info("fire laser")
self._ask("SetLaserOn 1")
def extract(self, value, units=None, tol=0.1, fire_laser=True, **kw):
if units is None:
units = "watts"
self.info("set laser output to {} {}".format(value, units))
if units == "watts":
ovalue = value
value = self.calculate_calibrated_power(value)
if value < 0:
self.warning(
"Consider changing you calibration curve. "
"{} watts converted to {}%. % must be positive".format(
ovalue, value
)
)
value = 0
resp = self.set_laser_power(value)
if fire_laser:
time.sleep(1)
self.fire_laser()
try:
return abs(float(resp) - value) < tol
except BaseException:
pass
def set_laser_power(self, v):
self.debug("setting laser output to {}".format(v))
return self._ask("SetLaserOutput {}".format(v))
def enable_laser(self, **kw):
# self._ask('laser.enable ON')
self.info("enabling laser")
self._ask("SetLaserFireMode 3") # 3= continuous wave
# self._ask('SetLaserOn 1')
self.enabled = True
def disable_laser(self):
self.info("disabling laser")
self.set_laser_power(0)
self._ask("SetLaserOn 0")
self.enabled = False
def get_position(self, retry=True):
x, y, z = self._x, self._y, self._z
xyz = self._ask("ReadPosition")
if xyz:
try:
x, y, z = [float(v) for v in xyz.split(",")]
if self.stage_manager.use_sign_position_correction:
x = x * self.stage_manager.x_sign
y = y * self.stage_manager.y_sign
z = z * self.stage_manager.z_sign
except ValueError:
self.warning("failed parsing position: {}".format(xyz))
if retry:
time.sleep(0.5)
x, y, z = self.get_position(retry=False)
else:
raise ReadPositionError(xyz)
return x, y, z
def _ask(self, cmd, retry=3):
resp = super(AblationCO2Manager, self)._ask(cmd)
if not resp or (resp and resp.strip().startswith("ERROR")):
if retry:
resp = self._ask(cmd, retry - 1)
return resp
def linear_move(self, x, y, block=False, *args, **kw):
self._move_to_position((x, y), block=block)
def stop(self):
self.warning_dialog(
"The Laser Ablation software does not allow remote stopping of the laser motion"
)
# self._ask('stage.stop')
# self._is_moving = False
# self.update_position()
# private
def _stage_stop_button_fired(self):
self.stop()
def _fire_laser_button_fired(self):
# if self._firing:
# cmd = 0
# else:
# cmd = 1
self._firing = not self._firing
self._ask("SetLaserOn {}".format(int(self._firing)))
def _output_power_changed(self, new):
self.extract(new, self.units, fire_laser=False)
def _set_x(self, v):
if self._move_enabled and v != self._x:
self._is_moving = True
self._ask("SetPosition {:0.3f},{:0.3f},{:0.3f}".format(v, self._y, self._z))
self._single_axis_moving(v, 0)
def _set_y(self, v):
if self._move_enabled and v != self._y:
self._is_moving = True
self._ask("SetPosition {:0.3f},{:0.3f},{:0.3f}".format(self._x, v, self._z))
self._single_axis_moving(v, 1)
def _set_z(self, v):
if self._move_enabled and v != self._z:
self._is_moving = True
self._ask("SetPosition {:0.3f},{:0.3f},{:0.3f}".format(self._x, self._y, v))
self._single_axis_moving(v, 2)
def _single_axis_moving(self, v, axis):
def cmpfunc(xyz):
try:
if not self._is_moving:
return True
# pos =[float(p) for p in xyz.split(','))[axis]
pos = float(xyz.split(",")[axis])
return abs(pos - v) > 2
# print map(lambda ab: abs(ab[0] - ab[1]) <= 2,
# zip(map(float, xyz.split(',')),
# (xm, ym, zm)))
# return not all(map(lambda ab: abs(ab[0] - ab[1]) <= 2,
# zip(map(float, xyz.split(',')),
# (xm, ym, zm))))
except ValueError as e:
print("_moving exception {}".format(e))
self._block(cmd="ReadPosition", cmpfunc=cmpfunc)
time.sleep(0.25)
self._is_moving = False
self.update_position()
def _move_to_position(self, pos, autocenter=False, block=True, *args, **kw):
sm = self.stage_manager
try:
x, y = self._get_hole_xy(pos)
except ValueError:
return
z = self._z
# xs = 5000
# ys = 5000
# zs = 100
self._is_moving = True
self.debug("pos={}, x={}, y={}".format(pos, x, y))
if sm.use_sign_position_correction:
x *= sm.x_sign
y *= sm.y_sign
z *= sm.z_sign
cmd = "SetPosition {:0.3f},{:0.3f},{:0.3f}".format(x, y, z)
self.info("sending {}".format(cmd))
self._ask(cmd)
time.sleep(1)
return self._moving(x, y, z, block)
def _moving(self, xm, ym, zm, block=True):
r = True
if block:
time.sleep(0.5)
def cmpfunc(xyz):
try:
if not self._is_moving:
return True
# ps = [float(p) for p in xyz.split(',')]
ps = csv_to_floats(xyz)
# return not all([abs(ab[0] - ab[1]) <= 2 for ab in zip(list(map(float, xyz.split(','))),
# (xm, ym, zm))])
return not all(abs(a - b) <= 0.01 for a, b in zip(ps, (xm, ym, zm)))
except ValueError as e:
print("_moving exception {}".format(e))
r = self._block(cmd="ReadPosition", cmpfunc=cmpfunc, period=1)
self._is_moving = False
time.sleep(0.5)
self.update_position()
return r
def _stage_manager_default(self):
name = "ablation"
args = dict(
name="stage",
configuration_name="stage",
configuration_dir_name=name,
parent=self,
)
return self._stage_manager_factory(args)
def _stage_manager_factory(self, args):
from pychron.lasers.stage_managers.ablation_stage_manager import (
AblationStageManager,
)
self.stage_args = args
klass = AblationStageManager
sm = klass(**args)
sm.id = self.stage_manager_id
return sm
def _pattern_executor_default(self):
from pychron.lasers.pattern.pattern_executor import PatternExecutor
pm = PatternExecutor(
application=self.application, controller=self, laser_manager=self
)
return pm
# ============= EOF =============================================
| {
"content_hash": "bf44f6ab0bef03b04d1d4352c1c8f4af",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 109,
"avg_line_length": 30.90459363957597,
"alnum_prop": 0.49485479076149097,
"repo_name": "NMGRL/pychron",
"id": "f10c4d9598b40f94fc5891c5b729f38b844b2364",
"size": "9612",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pychron/lasers/laser_managers/ablation_laser_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
} |
database(
thermoLibraries = ['primaryThermoLibrary'],
reactionLibraries = [],
seedMechanisms = [],
kineticsDepositories = ['training'],
kineticsFamilies = ['!Intra_Disproportionation','!Substitution_O'],
kineticsEstimator = 'rate rules',
)
# List of species
species(
label='ethane',
reactive=True,
structure=SMILES("CC"),
)
# Reaction systems
simpleReactor(
temperature=(1350,'K'),
pressure=(1.0,'bar'),
initialMoleFractions={
"ethane": 1.0,
},
terminationConversion={
'ethane': 0.9,
},
terminationTime=(1e6,'s'),
sensitivity=['ethane'],
sensitivityThreshold=0.01,
)
simulator(
atol=1e-16,
rtol=1e-8,
sens_atol=1e-6,
sens_rtol=1e-4,
)
model(
toleranceKeepInEdge=0.0,
toleranceMoveToCore=0.1,
toleranceInterruptSimulation=0.1,
maximumEdgeSpecies=100000
)
options(
units='si',
saveRestartPeriod=None,
saveSimulationProfiles=True,
drawMolecules=False,
generatePlots=False,
)
| {
"content_hash": "fd7c7180b6c77dcbd5e505a45a9d04a7",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 71,
"avg_line_length": 19.557692307692307,
"alnum_prop": 0.6460176991150443,
"repo_name": "enochd/RMG-Py",
"id": "1bf80ef09e70a9645ec76339de3d4e0b475ed181",
"size": "1032",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/sensitivity/input.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3650"
},
{
"name": "Makefile",
"bytes": "3781"
},
{
"name": "Python",
"bytes": "3139323"
},
{
"name": "Shell",
"bytes": "8634"
}
],
"symlink_target": ""
} |
import warnings
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..utils.metaestimators import available_if
from ..utils.validation import (
_allclose_dense_sparse,
_check_feature_names_in,
check_array,
)
from ..utils._param_validation import StrOptions
def _identity(X):
"""The identity function."""
return X
class FunctionTransformer(TransformerMixin, BaseEstimator):
"""Constructs a transformer from an arbitrary callable.
A FunctionTransformer forwards its X (and optionally y) arguments to a
user-defined function or function object and returns the result of this
function. This is useful for stateless transformations such as taking the
log of frequencies, doing custom scaling, etc.
Note: If a lambda is used as the function, then the resulting
transformer will not be pickleable.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <function_transformer>`.
Parameters
----------
func : callable, default=None
The callable to use for the transformation. This will be passed
the same arguments as transform, with args and kwargs forwarded.
If func is None, then func will be the identity function.
inverse_func : callable, default=None
The callable to use for the inverse transformation. This will be
passed the same arguments as inverse transform, with args and
kwargs forwarded. If inverse_func is None, then inverse_func
will be the identity function.
validate : bool, default=False
Indicate that the input X array should be checked before calling
``func``. The possibilities are:
- If False, there is no input validation.
- If True, then X will be converted to a 2-dimensional NumPy array or
sparse matrix. If the conversion is not possible an exception is
raised.
.. versionchanged:: 0.22
The default of ``validate`` changed from True to False.
accept_sparse : bool, default=False
Indicate that func accepts a sparse matrix as input. If validate is
False, this has no effect. Otherwise, if accept_sparse is false,
sparse matrix inputs will cause an exception to be raised.
check_inverse : bool, default=True
Whether to check that or ``func`` followed by ``inverse_func`` leads to
the original inputs. It can be used for a sanity check, raising a
warning when the condition is not fulfilled.
.. versionadded:: 0.20
feature_names_out : callable, 'one-to-one' or None, default=None
Determines the list of feature names that will be returned by the
`get_feature_names_out` method. If it is 'one-to-one', then the output
feature names will be equal to the input feature names. If it is a
callable, then it must take two positional arguments: this
`FunctionTransformer` (`self`) and an array-like of input feature names
(`input_features`). It must return an array-like of output feature
names. The `get_feature_names_out` method is only defined if
`feature_names_out` is not None.
See ``get_feature_names_out`` for more details.
.. versionadded:: 1.1
kw_args : dict, default=None
Dictionary of additional keyword arguments to pass to func.
.. versionadded:: 0.18
inv_kw_args : dict, default=None
Dictionary of additional keyword arguments to pass to inverse_func.
.. versionadded:: 0.18
Attributes
----------
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 1.0
See Also
--------
MaxAbsScaler : Scale each feature by its maximum absolute value.
StandardScaler : Standardize features by removing the mean and
scaling to unit variance.
LabelBinarizer : Binarize labels in a one-vs-all fashion.
MultiLabelBinarizer : Transform between iterable of iterables
and a multilabel format.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import FunctionTransformer
>>> transformer = FunctionTransformer(np.log1p)
>>> X = np.array([[0, 1], [2, 3]])
>>> transformer.transform(X)
array([[0. , 0.6931...],
[1.0986..., 1.3862...]])
"""
_parameter_constraints: dict = {
"func": [callable, None],
"inverse_func": [callable, None],
"validate": ["boolean"],
"accept_sparse": ["boolean"],
"check_inverse": ["boolean"],
"feature_names_out": [callable, StrOptions({"one-to-one"}), None],
"kw_args": [dict, None],
"inv_kw_args": [dict, None],
}
def __init__(
self,
func=None,
inverse_func=None,
*,
validate=False,
accept_sparse=False,
check_inverse=True,
feature_names_out=None,
kw_args=None,
inv_kw_args=None,
):
self.func = func
self.inverse_func = inverse_func
self.validate = validate
self.accept_sparse = accept_sparse
self.check_inverse = check_inverse
self.feature_names_out = feature_names_out
self.kw_args = kw_args
self.inv_kw_args = inv_kw_args
def _check_input(self, X, *, reset):
if self.validate:
return self._validate_data(X, accept_sparse=self.accept_sparse, reset=reset)
elif reset:
# Set feature_names_in_ and n_features_in_ even if validate=False
# We run this only when reset==True to store the attributes but not
# validate them, because validate=False
self._check_n_features(X, reset=reset)
self._check_feature_names(X, reset=reset)
return X
def _check_inverse_transform(self, X):
"""Check that func and inverse_func are the inverse."""
idx_selected = slice(None, None, max(1, X.shape[0] // 100))
X_round_trip = self.inverse_transform(self.transform(X[idx_selected]))
if not np.issubdtype(X.dtype, np.number):
raise ValueError(
"'check_inverse' is only supported when all the elements in `X` is"
" numerical."
)
if not _allclose_dense_sparse(X[idx_selected], X_round_trip):
warnings.warn(
"The provided functions are not strictly"
" inverse of each other. If you are sure you"
" want to proceed regardless, set"
" 'check_inverse=False'.",
UserWarning,
)
def fit(self, X, y=None):
"""Fit transformer by checking X.
If ``validate`` is ``True``, ``X`` will be checked.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input array.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
FunctionTransformer class instance.
"""
self._validate_params()
X = self._check_input(X, reset=True)
if self.check_inverse and not (self.func is None or self.inverse_func is None):
self._check_inverse_transform(X)
return self
def transform(self, X):
"""Transform X using the forward function.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input array.
Returns
-------
X_out : array-like, shape (n_samples, n_features)
Transformed input.
"""
X = self._check_input(X, reset=False)
return self._transform(X, func=self.func, kw_args=self.kw_args)
def inverse_transform(self, X):
"""Transform X using the inverse function.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input array.
Returns
-------
X_out : array-like, shape (n_samples, n_features)
Transformed input.
"""
if self.validate:
X = check_array(X, accept_sparse=self.accept_sparse)
return self._transform(X, func=self.inverse_func, kw_args=self.inv_kw_args)
@available_if(lambda self: self.feature_names_out is not None)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
This method is only defined if `feature_names_out` is not None.
Parameters
----------
input_features : array-like of str or None, default=None
Input feature names.
- If `input_features` is None, then `feature_names_in_` is
used as the input feature names. If `feature_names_in_` is not
defined, then names are generated:
`[x0, x1, ..., x(n_features_in_ - 1)]`.
- If `input_features` is array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
- If `feature_names_out` is 'one-to-one', the input feature names
are returned (see `input_features` above). This requires
`feature_names_in_` and/or `n_features_in_` to be defined, which
is done automatically if `validate=True`. Alternatively, you can
set them in `func`.
- If `feature_names_out` is a callable, then it is called with two
arguments, `self` and `input_features`, and its return value is
returned by this method.
"""
if hasattr(self, "n_features_in_") or input_features is not None:
input_features = _check_feature_names_in(self, input_features)
if self.feature_names_out == "one-to-one":
names_out = input_features
elif callable(self.feature_names_out):
names_out = self.feature_names_out(self, input_features)
else:
raise ValueError(
f"feature_names_out={self.feature_names_out!r} is invalid. "
'It must either be "one-to-one" or a callable with two '
"arguments: the function transformer and an array-like of "
"input feature names. The callable must return an array-like "
"of output feature names."
)
return np.asarray(names_out, dtype=object)
def _transform(self, X, func=None, kw_args=None):
if func is None:
func = _identity
return func(X, **(kw_args if kw_args else {}))
def __sklearn_is_fitted__(self):
"""Return True since FunctionTransfomer is stateless."""
return True
def _more_tags(self):
return {"no_validation": not self.validate, "stateless": True}
def set_output(self, *, transform=None):
"""Set output container.
See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py`
for an example on how to use the API.
Parameters
----------
transform : {"default", "pandas"}, default=None
Configure output of `transform` and `fit_transform`.
- `"default"`: Default output format of a transformer
- `"pandas"`: DataFrame output
- `None`: Transform configuration is unchanged
Returns
-------
self : estimator instance
Estimator instance.
"""
if hasattr(super(), "set_output"):
return super().set_output(transform=transform)
if transform == "pandas" and self.feature_names_out is None:
warnings.warn(
'With transform="pandas", `func` should return a DataFrame to follow'
" the set_output API."
)
return self
| {
"content_hash": "ac11c8ca07c36d55dad8b573127874d3",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 88,
"avg_line_length": 35.84365781710915,
"alnum_prop": 0.6006913011274793,
"repo_name": "betatim/scikit-learn",
"id": "d4c2cf6de7af2163f29eeda2d5b261924427e1f6",
"size": "12151",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "sklearn/preprocessing/_function_transformer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "668499"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10504881"
},
{
"name": "Shell",
"bytes": "41551"
}
],
"symlink_target": ""
} |
def extractKysqiWordpressCom(item):
'''
Parser for 'kysqi.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| {
"content_hash": "6c201c49e771c66d0ee8a72b9087c9ef",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 26.142857142857142,
"alnum_prop": 0.6284153005464481,
"repo_name": "fake-name/ReadableWebProxy",
"id": "6b70057fc5edb708e0f2bcbcdb6ad38a20566880",
"size": "550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractKysqiWordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
import torch
from fairseq.optim.amp_optimizer import AMPOptimizer
from fairseq.tasks import register_task
from fairseq.tasks.speech_to_text import SpeechToTextTask
from .data.speech_to_text_dataset_with_domain import SpeechToTextDatasetCreatorWithDomain
from .loss.attention_head_selection import HeadSelectionLoss
@register_task("speech_to_text_head_selection")
class SpeechToTextHeadSelectionTask(SpeechToTextTask):
@classmethod
def add_args(cls, parser):
SpeechToTextTask.add_args(parser)
parser.add_argument(
"--task-type",
type=str,
default="lang",
help="task type for head selection, lang or domain"
)
parser.add_argument(
"--kl-weight",
type=float,
default=0.0,
help="the weight of KL loss"
)
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.task_type = args.task_type
assert self.task_type in ["lang", "domain"], "invalid task_type: {}, should be either lang or domain".format(self.task_type)
self.map_task_to_id(args.train_subset)
self.encoder_head_prior = float(args.decoder_attention_heads) / args.total_decoder_attention_heads
self.decoder_head_prior = float(args.encoder_attention_heads) / args.total_encoder_attention_heads
self.kl_loss = HeadSelectionLoss(args)
def map_task_to_id(self, train_subset):
src_lang_set, tgt_lang_set, domain_set = set(), set(), set()
for split in train_subset.split(","):
seq = split.split("_")
assert len(seq) == 4, "subset {} should be in the format of train_src_tgt_domain".format(split)
_, src_lang, tgt_lang, domain = seq
src_lang_set.add(src_lang)
tgt_lang_set.add(tgt_lang)
domain_set.add(domain)
src_langs = sorted(src_lang_set)
tgt_langs = sorted(tgt_lang_set)
domains = sorted(domain_set)
self.src_lang_map = {src_lang: i for (i, src_lang) in enumerate(src_langs)}
self.tgt_lang_map = {tgt_lang: i for (i, tgt_lang) in enumerate(tgt_langs)}
self.domain_map = {domain: i for (i, domain) in enumerate(domains)}
if self.task_type == "lang":
self.encoder_tasks = len(self.src_lang_map)
self.decoder_tasks = len(self.tgt_lang_map)
elif self.task_type == "domain":
self.encoder_tasks = len(self.domain_map)
self.decoder_tasks = len(self.domain_map)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith("train")
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = SpeechToTextDatasetCreatorWithDomain.from_tsv(
self.args.data,
self.data_cfg,
split,
self.tgt_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split=is_train_split,
epoch=epoch,
seed=self.args.seed,
src_lang_map=self.src_lang_map,
tgt_lang_map=self.tgt_lang_map,
domain_map=self.domain_map,
speaker_to_id=self.speaker_to_id
)
def build_model(self, args):
args.encoder_tasks = self.encoder_tasks
args.decoder_tasks = self.decoder_tasks
return super(SpeechToTextHeadSelectionTask, self).build_model(args)
def get_sample_sizes(self, sample, task_ids, num_tasks):
"""
task_ids: (bsz,)
get sample sizes for each task
"""
bsz = task_ids.size(0)
mat = torch.zeros((num_tasks, bsz), device=task_ids.device)
mat[task_ids, torch.arange(bsz)] = 1.0
ntokens = torch.sum(sample['target'] != 1, dim=-1)
sample_sizes = torch.matmul(mat, ntokens.float())
return sample_sizes
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
model.set_num_updates(update_num)
# task ids
if self.task_type == "lang":
encoder_task_ids = sample["src_lang_ids"]
decoder_task_ids = sample["tgt_lang_ids"]
elif self.task_type == "domain":
encoder_task_ids = sample["domain_ids"]
decoder_task_ids = sample["domain_ids"]
model.encoder.set_task_ids(encoder_task_ids)
model.decoder.set_task_ids(decoder_task_ids)
with torch.autograd.profiler.record_function("forward"):
with torch.cuda.amp.autocast(enabled=(isinstance(optimizer, AMPOptimizer))):
loss, sample_size, logging_output = criterion(model, sample)
# KL loss
if self.args.encoder_attn_head_select:
sample_sizes = self.get_sample_sizes(sample, encoder_task_ids, self.encoder_tasks)
loss += self.kl_loss(
model.encoder.attn_head_selector.head_samples,
sample_sizes,
self.encoder_head_prior
)
if self.args.decoder_self_attn_head_select:
sample_sizes = self.get_sample_sizes(sample, decoder_task_ids, self.decoder_tasks)
loss += self.kl_loss(
model.decoder.self_attn_head_selector.head_samples,
sample_sizes,
self.decoder_head_prior
)
if self.args.dec_enc_attn_head_select:
sample_sizes = self.get_sample_sizes(sample, decoder_task_ids, self.decoder_tasks)
loss += self.kl_loss(
model.decoder.enc_attn_head_selector.head_sampes,
sample_sizes,
self.decoder_head_prior
)
if ignore_grad:
loss *= 0
with torch.autograd.profiler.record_function("backward"):
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
# task ids
if self.task_type == "lang":
encoder_task_ids = sample["src_lang_ids"]
decoder_task_ids = sample["tgt_lang_ids"]
elif self.task_type == "domain":
encoder_task_ids = sample["domain_ids"]
decoder_task_ids = sample["domain_ids"]
model.encoder.set_task_ids(encoder_task_ids)
model.decoder.set_task_ids(decoder_task_ids)
with torch.no_grad():
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
# task ids
if self.task_type == "lang":
encoder_task_ids = sample["src_lang_ids"][:1]
decoder_task_ids = sample["tgt_lang_ids"][:1]
elif self.task_type == "domain":
encoder_task_ids = sample["domain_ids"][:1]
decoder_task_ids = sample["domain_ids"][:1]
for model in models:
model.encoder.set_task_ids(encoder_task_ids)
model.decoder.set_task_ids(decoder_task_ids)
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, constraints=constraints
)
| {
"content_hash": "f98678d2f682cd8f1bfabade36553480",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 132,
"avg_line_length": 43.137142857142855,
"alnum_prop": 0.5788846204795337,
"repo_name": "pytorch/fairseq",
"id": "6e0ce11d6307493b30da865ba23adf23d0015b7c",
"size": "7727",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/attention_head_selection/src/speech_to_text_head_selection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "21106"
},
{
"name": "Cuda",
"bytes": "38166"
},
{
"name": "Cython",
"bytes": "13294"
},
{
"name": "Lua",
"bytes": "4210"
},
{
"name": "Python",
"bytes": "3699357"
},
{
"name": "Shell",
"bytes": "2182"
}
],
"symlink_target": ""
} |
def foo(some_pa<caret>ram: str):
pass | {
"content_hash": "3fece53c695081387e9ba090fc3f314c",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 32,
"avg_line_length": 20.5,
"alnum_prop": 0.6585365853658537,
"repo_name": "siosio/intellij-community",
"id": "70e1da13e07e268bd7b70917d66290f638745660",
"size": "41",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "python/testData/intentions/SpecifyTypeInPy3AnnotationsIntentionTest/annotatedParameterNoIntention.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from unittest import TestCase
import klineyes
from klineyes.util.test_data import load_test_data
class TestEyes(TestCase):
def test_get_dates_pattern(self):
'''
testing
:return:
'''
klineyes.get_dates_pattern(input_data=load_test_data(), ptypes=['hammer', 'line', 'star']) | {
"content_hash": "070a93e5a7085313ab7669a68f3fceb9",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 98,
"avg_line_length": 22.857142857142858,
"alnum_prop": 0.64375,
"repo_name": "tenstone/klineyes",
"id": "fb3fd86292dca0483f7cd4c678cca8ff70d6b9ec",
"size": "320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_klineyes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16279"
}
],
"symlink_target": ""
} |
"""
Wind Turbine Company - 2013
Author: Stephan Rayner
Email: [email protected]
"""
from HIL.controller.Base_Controller import Base_Controller
from Yaw_Generation import Yaw_Generation
Yaw_Generation = Yaw_Generation()
class Yaw_Manual(Base_Controller):
"""docstring for Yaw_Manual"""
def __init__(self):
super(Yaw_Manual, self).__init__()
self.controler = "mcc"
self.readVariable = ['@GV.DO_YawPin1', '@GV.DO_YawPin2']
self.writeVariable = ["@GV.HWB_YawDriveCWDemand", "@GV.HWB_YawDriveCCWDemand"]
def write(self, direction):
assert(direction in ["cw", "ccw", "clockwise", "counterclockwise"])
if(direction in["cw", "clockwise"]):
self.mcc.raw_write(self.mccip, self.writeVariable[0], "1")
self.mcc.raw_write(self.mccip, self.writeVariable[1], "0")
pass
else:
self.mcc.raw_write(self.mccip, self.writeVariable[0], "0")
self.mcc.raw_write(self.mccip, self.writeVariable[1], "1")
def read(self):
self.generation = Yaw_Generation.read()
[Temp1, Temp2] = self.mcc.read(self.readVariable).items()
Yaw_Pins = [Temp1[1], Temp2[1]]
if self.generation == '1':
if Yaw_Pins == ['1', '0']:
return 'counterclockwise'
elif Yaw_Pins == ['0', '1']:
return 'clockwise'
else:
return 'Neither counterclockwise nor clockwise'
if self.generation == '2':
if Yaw_Pins == ['1', '1']:
return 'clockwise'
elif Yaw_Pins == ['1', '0']:
return 'counterclockwise'
else:
return 'Neither counterclockwise nor clockwise'
def reset(self):
for variable in self.writeVariable:
self.mcc.raw_write(self.mccip, variable, '0')
def help(self):
return "Help string for Yaw_Manual"
| {
"content_hash": "0b171d2649f2e76555a047e566c2e3da",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 86,
"avg_line_length": 36.39622641509434,
"alnum_prop": 0.5759460860549508,
"repo_name": "stephan-rayner/HIL-TestStation",
"id": "bc96c7d3608c6635bf6269b3bc13c8fc9a6121a2",
"size": "1929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HIL/controller/e3120/Yaw_Manual.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "95483"
}
],
"symlink_target": ""
} |
"""The type mappings for the ``simplejson``-like API.
In particular, this module provides the extension to native Python data types with
particulars of the Ion data model.
"""
# Python 2/3 compatibility
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from decimal import Decimal
from collections import MutableMapping
import six
from amazon.ion.symbols import SymbolToken
from .core import TIMESTAMP_PRECISION_FIELD
from .core import Multimap, Timestamp, IonEvent, IonType, TIMESTAMP_FRACTION_PRECISION_FIELD, TimestampPrecision, \
MICROSECOND_PRECISION, TIMESTAMP_FRACTIONAL_SECONDS_FIELD
class _IonNature(object):
"""Mix-in for Ion related properties.
Attributes:
ion_event (Optional[IonEvent]): The event, if any associated with the value.
ion_type (IonType): The Ion type for the value.
ion_annotations (Sequence[unicode]): The annotations associated with the value.
Notes:
There is no ``field_name`` attribute as that is generally modeled as a property of the
container.
The ``ion_event`` field is only provided if the value was derived from a low-level event.
User constructed values will generally not set this field.
"""
def __init__(self, *args, **kwargs):
self.ion_type = None
self.ion_annotations = ()
def _copy(self):
"""Copies this instance. Its IonEvent (if any) is not preserved.
Keeping this protected until/unless we decide there's use for it publicly.
"""
args, kwargs = self._to_constructor_args(self)
value = self.__class__(*args, **kwargs)
value.ion_type = self.ion_type
value.ion_annotations = self.ion_annotations
return value
@staticmethod
def _to_constructor_args(value):
return (value, ), {}
@classmethod
def from_event(cls, ion_event):
"""Constructs the given native extension from the properties of an event.
Args:
ion_event (IonEvent): The event to construct the native value from.
"""
if ion_event.value is not None:
args, kwargs = cls._to_constructor_args(ion_event.value)
else:
# if value is None (i.e. this is a container event), args must be empty or initialization of the
# underlying container will fail.
args, kwargs = (), {}
value = cls(*args, **kwargs)
value.ion_type = ion_event.ion_type
value.ion_annotations = ion_event.annotations
return value
@classmethod
def from_value(cls, ion_type, value, annotations=()):
"""Constructs a value as a copy with an associated Ion type and annotations.
Args:
ion_type (IonType): The associated Ion type.
value (Any): The value to construct from, generally of type ``cls``.
annotations (Sequence[unicode]): The sequence Unicode strings decorating this value.
"""
if value is None:
value = IonPyNull()
else:
args, kwargs = cls._to_constructor_args(value)
value = cls(*args, **kwargs)
value.ion_type = ion_type
value.ion_annotations = annotations
return value
def to_event(self, event_type, field_name=None, in_struct=False, depth=None):
"""Constructs an IonEvent from this _IonNature value.
Args:
event_type (IonEventType): The type of the resulting event.
field_name (Optional[text]): The field name associated with this value, if any. When ``None``
is specified and ``in_struct`` is ``True``, the returned event's ``field_name`` will
represent symbol zero (a ``SymbolToken`` with text=None and sid=0).
in_struct (Optional[True|False]): When ``True``, indicates the returned event ``field_name``
will be populated. When ``False``, ``field_name`` will be ``None``.
depth (Optional[int]): The depth of this value.
Returns:
An IonEvent with the properties from this value.
"""
value = self
if isinstance(self, IonPyNull) or self.ion_type.is_container:
value = None
if in_struct:
if not isinstance(field_name, SymbolToken):
field_name = SymbolToken(field_name, 0 if field_name is None else None)
else:
field_name = None
return IonEvent(event_type, ion_type=self.ion_type, value=value, field_name=field_name,
annotations=self.ion_annotations, depth=depth)
def _ion_type_for(name, base_cls):
class IonPyValueType(base_cls, _IonNature):
def __init__(self, *args, **kwargs):
super(IonPyValueType, self).__init__(*args, **kwargs)
IonPyValueType.__name__ = name
IonPyValueType.__qualname__ = name
return IonPyValueType
if six.PY2:
IonPyInt = _ion_type_for('IonPyInt', long)
else:
IonPyInt = _ion_type_for('IonPyInt', int)
IonPyBool = IonPyInt
IonPyFloat = _ion_type_for('IonPyFloat', float)
IonPyDecimal = _ion_type_for('IonPyDecimal', Decimal)
IonPyText = _ion_type_for('IonPyText', six.text_type)
IonPyBytes = _ion_type_for('IonPyBytes', six.binary_type)
class IonPySymbol(SymbolToken, _IonNature):
def __init__(self, *args, **kwargs):
super(IonPySymbol, self).__init__(*args, **kwargs)
@staticmethod
def _to_constructor_args(st):
try:
args = (st.text, st.sid, st.location)
except AttributeError:
args = (st, None, None)
kwargs = {}
return args, kwargs
class IonPyTimestamp(Timestamp, _IonNature):
def __init__(self, *args, **kwargs):
super(IonPyTimestamp, self).__init__(*args, **kwargs)
@staticmethod
def _to_constructor_args(ts):
if isinstance(ts, Timestamp):
args = (ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second, None, ts.tzinfo)
fractional_seconds = getattr(ts, TIMESTAMP_FRACTIONAL_SECONDS_FIELD, None)
precision = getattr(ts, TIMESTAMP_PRECISION_FIELD, TimestampPrecision.SECOND)
kwargs = {TIMESTAMP_PRECISION_FIELD: precision, TIMESTAMP_FRACTIONAL_SECONDS_FIELD: fractional_seconds}
else:
args = (ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second, ts.microsecond, ts.tzinfo)
kwargs = {TIMESTAMP_PRECISION_FIELD: TimestampPrecision.SECOND}
return args, kwargs
class IonPyNull(_IonNature):
"""Representation of ``null``.
Notes:
``None`` is a singleton and cannot be sub-classed, so we have our
own value type for it. The function ``is_null`` is the best way
to test for ``null``-ness or ``None``-ness.
"""
def __init__(self, *args, **kwargs):
super(IonPyNull, self).__init__(*args, **kwargs)
def __nonzero__(self):
return False
def __bool__(self):
return False
@staticmethod
def _to_constructor_args(value):
return (), {}
def is_null(value):
"""A mechanism to determine if a value is ``None`` or an Ion ``null``."""
return value is None or isinstance(value, IonPyNull)
IonPyList = _ion_type_for('IonPyList', list)
IonPyDict = _ion_type_for('IonPyDict', Multimap)
| {
"content_hash": "c945eee0dafd18235b42e8665ee9c161",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 115,
"avg_line_length": 35.78536585365854,
"alnum_prop": 0.629907306434024,
"repo_name": "almann/ion-python",
"id": "9db902da13cc3d7944ff53dcc3aaf033ca784602",
"size": "7904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amazon/ion/simple_types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "537096"
}
],
"symlink_target": ""
} |
import mock
from rally.plugins.openstack.context.network import allow_ssh
from tests.unit import fakes
from tests.unit import test
CTX = "rally.plugins.openstack.context.network.allow_ssh"
class AllowSSHContextTestCase(test.TestCase):
def setUp(self):
super(AllowSSHContextTestCase, self).setUp()
self.users = 2
self.secgroup_name = "test-secgroup"
self.ctx_with_secgroup = test.get_test_context()
self.ctx_with_secgroup.update({
"users": [
{
"tenant_id": "uuid1",
"endpoint": "endpoint",
"secgroup": {"id": "secgroup_id", "name": "secgroup"}
}
] * self.users,
"admin": {"tenant_id": "uuid2", "endpoint": "admin_endpoint"},
"tenants": {"uuid1": {"id": "uuid1", "name": "uuid1"}},
})
self.ctx_without_secgroup = test.get_test_context()
self.ctx_without_secgroup.update({
"users": [{"tenant_id": "uuid1",
"endpoint": "endpoint"},
{"tenant_id": "uuid1",
"endpoint": "endpoint"}],
"admin": {"tenant_id": "uuid2", "endpoint": "admin_endpoint"},
"tenants": {"uuid1": {"id": "uuid1", "name": "uuid1"}},
})
@mock.patch("%s.osclients.Clients" % CTX)
def test__prepare_open_secgroup(self, mock_clients):
fake_nova = fakes.FakeNovaClient()
self.assertEqual(len(fake_nova.security_groups.list()), 1)
mock_cl = mock.MagicMock()
mock_cl.nova.return_value = fake_nova
mock_clients.return_value = mock_cl
ret = allow_ssh._prepare_open_secgroup("endpoint", self.secgroup_name)
self.assertEqual(self.secgroup_name, ret["name"])
self.assertEqual(2, len(fake_nova.security_groups.list()))
self.assertIn(
self.secgroup_name,
[sg.name for sg in fake_nova.security_groups.list()])
# run prep again, check that another security group is not created
allow_ssh._prepare_open_secgroup("endpoint", self.secgroup_name)
self.assertEqual(2, len(fake_nova.security_groups.list()))
@mock.patch("%s.osclients.Clients" % CTX)
def test__prepare_open_secgroup_rules(self, mock_clients):
fake_nova = fakes.FakeNovaClient()
# NOTE(hughsaunders) Default security group is precreated
self.assertEqual(1, len(fake_nova.security_groups.list()))
mock_cl = mock.MagicMock()
mock_cl.nova.return_value = fake_nova
mock_clients.return_value = mock_cl
allow_ssh._prepare_open_secgroup("endpoint", self.secgroup_name)
self.assertEqual(2, len(fake_nova.security_groups.list()))
rally_open = fake_nova.security_groups.find(self.secgroup_name)
self.assertEqual(3, len(rally_open.rules))
# run prep again, check that extra rules are not created
allow_ssh._prepare_open_secgroup("endpoint", self.secgroup_name)
rally_open = fake_nova.security_groups.find(self.secgroup_name)
self.assertEqual(3, len(rally_open.rules))
@mock.patch("%s.osclients.Clients" % CTX)
@mock.patch("%s._prepare_open_secgroup" % CTX)
@mock.patch("rally.plugins.openstack.wrappers.network.wrap")
def test_secgroup_setup_cleanup_with_secgroup_supported(
self, mock_network_wrap, mock__prepare_open_secgroup,
mock_clients):
mock_network_wrapper = mock.MagicMock()
mock_network_wrapper.supports_extension.return_value = (
True, "")
mock_network_wrap.return_value = mock_network_wrapper
mock__prepare_open_secgroup.return_value = {
"name": "secgroup",
"id": "secgroup_id"}
mock_clients.return_value = mock.MagicMock()
secgrp_ctx = allow_ssh.AllowSSH(self.ctx_with_secgroup)
secgrp_ctx.setup()
self.assertEqual(self.ctx_with_secgroup, secgrp_ctx.context)
secgrp_ctx.cleanup()
self.assertEqual(
[
mock.call("admin_endpoint"),
mock.call("endpoint"),
mock.call().nova(),
mock.call().nova().security_groups.get("secgroup_id"),
mock.call().nova().security_groups.get().delete()
],
mock_clients.mock_calls)
mock_network_wrap.assert_called_once_with(
mock_clients.return_value, self.ctx_with_secgroup["task"],
config={})
@mock.patch("%s.osclients.Clients" % CTX)
@mock.patch("rally.plugins.openstack.wrappers.network.wrap")
def test_secgroup_setup_with_secgroup_unsupported(
self, mock_network_wrap, mock_clients):
mock_network_wrapper = mock.MagicMock()
mock_network_wrapper.supports_extension.return_value = (
False, "Not supported")
mock_network_wrap.return_value = mock_network_wrapper
mock_clients.return_value = mock.MagicMock()
secgrp_ctx = allow_ssh.AllowSSH(dict(self.ctx_without_secgroup))
secgrp_ctx.setup()
self.assertEqual(self.ctx_without_secgroup, secgrp_ctx.context)
mock_clients.assert_called_once_with("admin_endpoint")
mock_network_wrap.assert_called_once_with(
mock_clients.return_value, self.ctx_without_secgroup["task"],
config={})
| {
"content_hash": "af94c5eab9d90157c968c19cd63b00af",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 78,
"avg_line_length": 40.50375939849624,
"alnum_prop": 0.6047893075923519,
"repo_name": "redhat-openstack/rally",
"id": "8615bc08a1a9a2abf6e50b48d16c0ee6e84722d6",
"size": "6017",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/unit/plugins/openstack/context/network/test_allow_ssh.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "48863"
},
{
"name": "Python",
"bytes": "2746418"
},
{
"name": "Shell",
"bytes": "43908"
}
],
"symlink_target": ""
} |
import math, random
# Primality test functions. Assume that "n" is odd and greater than 3.
# Tests if 'n' is prime by trial division, by checking if it has a factor
# k (1 < k <= sqrt(n))
def trial_division(n):
return not(any(n % x == 0 for x in range(3, math.floor(math.sqrt(n))+1)))
# Tests if 'n' is a strong pseudoprime using the Miller-Rabin probabilistic algorithm.
#
# If 'n' is prime, then the following properties hold. If one of them fails,
# then we know 'n' is composite.
#
# Randomly picks 'rounds' bases.
# For each base 'a', start with Fermat's little theorem (a^(n-1) = 1 (mod n)).
# Take the square root of 1 (in a finite field Z/nZ, assuming n is prime)
# and verify that every root is either 1 or -1.
# so, (suppose n-1 = 2^s * d (d odd) and n prime), we have EITHER:
# - a^d = 1 (mod n) (no need to continue, will give 1 for the 's' time that we'll square it)
# - a^(2^r * d) = -1 (mod n), for a certain r in [0,s-1] (all subsequent squarings will
# then give 1, so we can stop there).
# The contrapositive:
# n is composite if we have BOTH:
# - a^d != 1 (mod n)
# - a^(2^r * d) != -1 (mod n), for all r in [0,s-1]
#
# Has a probability of 4^(-'rounds') to return a composite number as a strong pseudoprime.
def miller_rabin(n, rounds = 40):
# get the form n - 1 = 2^s * d
s = 0
d = n-1
while is_even(d):
d //= 2
s += 1
for i in range(rounds):
a = random.randrange(2,n)
x = pow(a, d, n)
if x == 1: # pseudoprime
continue
skip = False
for r in range(s):
if x == n-1: # pseudoprime
skip = True
break
x = pow(x, 2, n)
if not skip:
return False # composite
return True
# Tests if 'n' is probably prime using the Solovay-Strassen probabilistic algorithm.
#
# If 'n' is prime, the Euler's criterion holds for any 'a' (if it fails, 'n' is composite):
# a ^ ( (n-1)/2 ) = Legendre(a, n)
# Where Legendre stands for Legendre's symbol.
# Since we don't know if n is prime, we'll use Jacobi's symbol (a generalization
# of Legendre's symbol).
#
# Randomly pick 'rounds' bases.
# For each base 'a', see if the criterion holds. If not, 'n' is composite.
# If the criterion holds for all bases, 'n' is considered probably prime.
def solovay_strassen(n, rounds = 80):
for i in range(rounds):
a = random.randrange(2,n)
x = pow(a, (n-1)//2, n)
jacobi = jacobi_symbol(a, n)
if jacobi < 0: jacobi += n # we want n-1 instead of -1 (mod n)
if x != jacobi: return False
return True
def is_even(n):
return n & 1 == 0
# Calculates the Jacobi symbol of 'a' and 'n'.
#
# Note: the comments of the form '(i)' indicate what property
# was used from this: https://en.wikipedia.org/wiki/Jacobi_symbol#Properties
def jacobi_symbol(a, n):
j = 1
while a > 0:
while is_even(a):
# extract (2/n) using (4)
a //= 2
# evaluate (2/n) using (8)
if n % 8 == 3 or n % 8 == 5:
j = -j
# swap, by the law of quadratic reciprocity (6)
if n % 4 == 3 and a % 4 == 3:
j = -j
a, n = n, a
a = a % n # can reduce using (2)
# n != 1 means that gcd(a,n) != 1 => jacobi = 0
return j if n == 1 else 0
| {
"content_hash": "6d4ff592915901f3e5adb0bf87c22474",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 93,
"avg_line_length": 33.13861386138614,
"alnum_prop": 0.5760382432028682,
"repo_name": "JesseEmond/benchmarkus-prime",
"id": "7bebaf00c549693467be45d587f5818abd6a2a7b",
"size": "3347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "primes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6183"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import sys
from PySide import QtGui
from PySide import QtCore
class Ejemplo11(QtGui.QWidget):
def __init__(self):
super(Ejemplo11, self).__init__()
self.WidgetsParte2_4()
def WidgetsParte2_4(self):
self.boton = QtGui.QPushButton('Abrir imagen')
self.boton.clicked.connect(self.abrir_imagen)
self.imagen_etiqueta = QtGui.QLabel()
self.imagen_etiqueta.setBackgroundRole(QtGui.QPalette.Base)
self.imagen_etiqueta.setSizePolicy(QtGui.QSizePolicy.Ignored,
QtGui.QSizePolicy.Ignored)
self.vbox = QtGui.QVBoxLayout()
self.vbox.addWidget(self.imagen_etiqueta)
self.vbox.addWidget(self.boton)
self.setLayout(self.vbox)
self.setGeometry(300, 50, 500, 650)
self.setWindowTitle('Ejemplo de widget N°4')
self.show()
def abrir_imagen(self):
imagen_abierta = QtGui.QFileDialog.getOpenFileName(self,
'Abrir imagen', QtCore.QDir.currentPath(),
'Archivos de imagen (*.jpg *.png *gif)')
if imagen_abierta:
imagen = QtGui.QImage(imagen_abierta[0])
if imagen.isNull():
print imagen.isNull()
self.imagen_etiqueta.setPixmap(
QtGui.QPixmap.fromImage(imagen))
self.imagen_etiqueta.setScaledContents(True)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
ej11 = Ejemplo11()
sys.exit(app.exec_())
| {
"content_hash": "b353953d9eeeea6e5614535c228344f8",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 69,
"avg_line_length": 31.93617021276596,
"alnum_prop": 0.6295802798134577,
"repo_name": "DoctorMalboro/CharlaPySidePyDayLujan2014",
"id": "798059fad1cc20011719935fe431a61385f1ceae",
"size": "1526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ejemplos/ejemplo-11.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "123520"
},
{
"name": "JavaScript",
"bytes": "132187"
},
{
"name": "Python",
"bytes": "18931"
}
],
"symlink_target": ""
} |
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
'''
import traceback
import sys
import functools
import asyncio
import simplejson as json
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm.exc import NoResultFound
import tornado.ioloop
import tornado.web
from keylime import config
from keylime.agentstates import AgentAttestStates
from keylime.common import states
from keylime.db.verifier_db import VerfierMain
from keylime.db.verifier_db import VerifierAllowlist
from keylime.db.keylime_db import DBEngineManager, SessionManager
from keylime import keylime_logging
from keylime import cloud_verifier_common
from keylime import revocation_notifier
from keylime import tornado_requests
from keylime import api_version as keylime_api_version
from keylime.ima_ast import START_HASH
logger = keylime_logging.init_logging('cloudverifier')
try:
engine = DBEngineManager().make_engine('cloud_verifier')
except SQLAlchemyError as err:
logger.error('Error creating SQL engine or session: %s', err)
sys.exit(1)
def get_session():
return SessionManager().make_session(engine)
def get_AgentAttestStates():
return AgentAttestStates.get_instance()
# The "exclude_db" dict values are removed from the response before adding the dict to the DB
# This is because we want these values to remain ephemeral and not stored in the database.
exclude_db = {
'registrar_data': '',
'nonce': '',
'b64_encrypted_V': '',
'provide_V': True,
'num_retries': 0,
'pending_event': None,
'first_verified': False,
# the following 3 items are updated to VerifierDB only when the AgentState is stored
'boottime': '',
'ima_pcrs': [],
'pcr10': '',
'next_ima_ml_entry': 0
}
def _from_db_obj(agent_db_obj):
fields = [ 'agent_id', \
'v', \
'ip', \
'port', \
'operational_state', \
'public_key', \
'tpm_policy', \
'vtpm_policy', \
'meta_data', \
'mb_refstate', \
'allowlist', \
'ima_sign_verification_keys', \
'revocation_key', \
'accept_tpm_hash_algs', \
'accept_tpm_encryption_algs', \
'accept_tpm_signing_algs', \
'hash_alg', \
'enc_alg', \
'sign_alg', \
'boottime', \
'ima_pcrs', \
'pcr10', \
'next_ima_ml_entry']
agent_dict = {}
for field in fields:
agent_dict[field] = getattr(agent_db_obj, field, None)
return agent_dict
def verifier_db_delete_agent(session, agent_id):
get_AgentAttestStates().delete_by_agent_id(agent_id)
session.query(VerfierMain).filter_by(
agent_id=agent_id).delete()
session.commit()
def store_attestation_state(agentAttestState):
# Only store if IMA log was evaluated
if len(agentAttestState.get_ima_pcrs()):
session = get_session()
try:
update_agent = session.query(VerfierMain).get(agentAttestState.get_agent_id())
update_agent.boottime = agentAttestState.get_boottime()
update_agent.next_ima_ml_entry = agentAttestState.get_next_ima_ml_entry()
ima_pcrs_dict = agentAttestState.get_ima_pcrs()
update_agent.ima_pcrs = list(ima_pcrs_dict.keys())
for pcr_num, value in ima_pcrs_dict.items():
setattr(update_agent, 'pcr%d' % pcr_num, value)
try:
session.add(update_agent)
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error on storing attestation state: %s', e)
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error on storing attestation state: %s', e)
class BaseHandler(tornado.web.RequestHandler):
def prepare(self): # pylint: disable=W0235
super().prepare()
def write_error(self, status_code, **kwargs):
self.set_header('Content-Type', 'text/json')
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
lines = []
for line in traceback.format_exception(*kwargs["exc_info"]):
lines.append(line)
self.finish(json.dumps({
'code': status_code,
'status': self._reason,
'traceback': lines,
'results': {},
}))
else:
self.finish(json.dumps({
'code': status_code,
'status': self._reason,
'results': {},
}))
def data_received(self, chunk):
raise NotImplementedError()
class MainHandler(tornado.web.RequestHandler):
def head(self):
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface instead")
def get(self):
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface instead")
def delete(self):
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface instead")
def post(self):
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface instead")
def put(self):
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface instead")
def data_received(self, chunk):
raise NotImplementedError()
class VersionHandler(BaseHandler):
def head(self):
config.echo_json_response(
self, 405, "Not Implemented: Use GET interface instead")
def get(self):
rest_params = config.get_restful_params(self.request.uri)
if rest_params is None:
config.echo_json_response(self, 405, "Not Implemented")
return
if "version" not in rest_params:
config.echo_json_response(self, 400, "URI not supported")
logger.warning('GET returning 400 response. URI not supported: %s', self.request.path)
return
version_info = {
"current_version": keylime_api_version.current_version(),
"supported_versions": keylime_api_version.all_versions(),
}
config.echo_json_response(self, 200, "Success", version_info)
def delete(self):
config.echo_json_response(
self, 405, "Not Implemented: Use GET interface instead")
def post(self):
config.echo_json_response(
self, 405, "Not Implemented: Use GET interface instead")
def put(self):
config.echo_json_response(
self, 405, "Not Implemented: Use GET interface instead")
def data_received(self, chunk):
raise NotImplementedError()
class AgentsHandler(BaseHandler):
def head(self):
"""HEAD not supported"""
config.echo_json_response(self, 405, "HEAD not supported")
def get(self):
"""This method handles the GET requests to retrieve status on agents from the Cloud Verifier.
Currently, only agents resources are available for GETing, i.e. /agents. All other GET uri's
will return errors. Agents requests require a single agent_id parameter which identifies the
agent to be returned. If the agent_id is not found, a 404 response is returned. If the agent_id
was not found, it either completed successfully, or failed. If found, the agent_id is still polling
to contact the Cloud Agent.
"""
session = get_session()
rest_params = config.get_restful_params(self.request.uri)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
config.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
config.echo_json_response(self, 400, "uri not supported")
logger.warning('GET returning 400 response. uri not supported: %s', self.request.path)
return
agent_id = rest_params["agents"]
if (agent_id is not None) and (agent_id != ''):
try:
agent = session.query(VerfierMain).filter_by(
agent_id=agent_id).one_or_none()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
if agent is not None:
response = cloud_verifier_common.process_get_status(agent)
config.echo_json_response(self, 200, "Success", response)
else:
config.echo_json_response(self, 404, "agent id not found")
else:
json_response = None
if "bulk" in rest_params.keys():
agent_list = None
if ("verifier" in rest_params.keys()) and (rest_params["verifier"] != ''):
agent_list = session.query(VerfierMain).filter_by(verifier_id=rest_params["verifier"]).all()
else:
agent_list = session.query(VerfierMain).all()
json_response = {}
for agent in agent_list:
json_response[agent.agent_id] = cloud_verifier_common.process_get_status(agent)
config.echo_json_response(self, 200, "Success", json_response)
else:
if ("verifier" in rest_params.keys()) and (rest_params["verifier"] != ''):
json_response = session.query(VerfierMain.agent_id).filter_by(
verifier_id=rest_params["verifier"]).all()
else:
json_response = session.query(VerfierMain.agent_id).all()
config.echo_json_response(self, 200, "Success", {
'uuids': json_response})
logger.info('GET returning 200 response for agent_id list')
def delete(self):
"""This method handles the DELETE requests to remove agents from the Cloud Verifier.
Currently, only agents resources are available for DELETEing, i.e. /agents. All other DELETE uri's will return errors.
agents requests require a single agent_id parameter which identifies the agent to be deleted.
"""
session = get_session()
rest_params = config.get_restful_params(self.request.uri)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
config.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
config.echo_json_response(self, 400, "uri not supported")
return
agent_id = rest_params["agents"]
if agent_id is None:
config.echo_json_response(self, 400, "uri not supported")
logger.warning('DELETE returning 400 response. uri not supported: %s', self.request.path)
return
try:
agent = session.query(VerfierMain).filter_by(
agent_id=agent_id).first()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
if agent is None:
config.echo_json_response(self, 404, "agent id not found")
logger.info('DELETE returning 404 response. agent id: %s not found.', agent_id)
return
verifier_id = config.get('cloud_verifier', 'cloudverifier_id', cloud_verifier_common.DEFAULT_VERIFIER_ID)
if verifier_id != agent.verifier_id:
config.echo_json_response(self, 404, "agent id associated to this verifier")
logger.info('DELETE returning 404 response. agent id: %s not associated to this verifer.', agent_id)
return
op_state = agent.operational_state
if op_state in (states.SAVED, states.FAILED, states.TERMINATED,
states.TENANT_FAILED, states.INVALID_QUOTE):
try:
verifier_db_delete_agent(session, agent_id)
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
config.echo_json_response(self, 200, "Success")
logger.info('DELETE returning 200 response for agent id: %s', agent_id)
else:
try:
update_agent = session.query(VerfierMain).get(agent_id)
update_agent.operational_state = states.TERMINATED
try:
session.add(update_agent)
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
session.commit()
config.echo_json_response(self, 202, "Accepted")
logger.info('DELETE returning 202 response for agent id: %s', agent_id)
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
def post(self):
"""This method handles the POST requests to add agents to the Cloud Verifier.
Currently, only agents resources are available for POSTing, i.e. /agents. All other POST uri's will return errors.
agents requests require a json block sent in the body
"""
session = get_session()
try:
rest_params = config.get_restful_params(self.request.uri)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
config.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
config.echo_json_response(self, 400, "uri not supported")
logger.warning('POST returning 400 response. uri not supported: %s', self.request.path)
return
agent_id = rest_params["agents"]
if agent_id is not None:
content_length = len(self.request.body)
if content_length == 0:
config.echo_json_response(
self, 400, "Expected non zero content length")
logger.warning('POST returning 400 response. Expected non zero content length.')
else:
json_body = json.loads(self.request.body)
agent_data = {}
agent_data['v'] = json_body['v']
agent_data['ip'] = json_body['cloudagent_ip']
agent_data['port'] = int(json_body['cloudagent_port'])
agent_data['operational_state'] = states.START
agent_data['public_key'] = ""
agent_data['tpm_policy'] = json_body['tpm_policy']
agent_data['vtpm_policy'] = json_body['vtpm_policy']
agent_data['meta_data'] = json_body['metadata']
agent_data['allowlist'] = json_body['allowlist']
agent_data['mb_refstate'] = json_body['mb_refstate']
agent_data['ima_sign_verification_keys'] = json_body['ima_sign_verification_keys']
agent_data['revocation_key'] = json_body['revocation_key']
agent_data['accept_tpm_hash_algs'] = json_body['accept_tpm_hash_algs']
agent_data['accept_tpm_encryption_algs'] = json_body['accept_tpm_encryption_algs']
agent_data['accept_tpm_signing_algs'] = json_body['accept_tpm_signing_algs']
agent_data['hash_alg'] = ""
agent_data['enc_alg'] = ""
agent_data['sign_alg'] = ""
agent_data['agent_id'] = agent_id
agent_data['boottime'] = 0
agent_data['ima_pcrs'] = []
agent_data['pcr10'] = START_HASH
agent_data['next_ima_ml_entry'] = 0
agent_data['verifier_id'] = config.get('cloud_verifier', 'cloudverifier_id', cloud_verifier_common.DEFAULT_VERIFIER_ID)
agent_data['verifier_ip'] = config.get('cloud_verifier', 'cloudverifier_ip')
agent_data['verifier_port'] = config.get('cloud_verifier', 'cloudverifier_port')
is_valid, err_msg = cloud_verifier_common.validate_agent_data(agent_data)
if not is_valid:
config.echo_json_response(self, 400, err_msg)
logger.warning(err_msg)
return
try:
new_agent_count = session.query(
VerfierMain).filter_by(agent_id=agent_id).count()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
# don't allow overwriting
if new_agent_count > 0:
config.echo_json_response(
self, 409, "Agent of uuid %s already exists" % (agent_id))
logger.warning("Agent of uuid %s already exists", agent_id)
else:
try:
# Add the agent and data
session.add(VerfierMain(**agent_data))
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
for key in list(exclude_db.keys()):
agent_data[key] = exclude_db[key]
asyncio.ensure_future(
process_agent(agent_data, states.GET_QUOTE))
config.echo_json_response(self, 200, "Success")
logger.info('POST returning 200 response for adding agent id: %s', agent_id)
else:
config.echo_json_response(self, 400, "uri not supported")
logger.warning("POST returning 400 response. uri not supported")
except Exception as e:
config.echo_json_response(self, 400, "Exception error: %s" % e)
logger.warning("POST returning 400 response. Exception error: %s", e)
logger.exception(e)
self.finish()
def put(self):
"""This method handles the PUT requests to add agents to the Cloud Verifier.
Currently, only agents resources are available for PUTing, i.e. /agents. All other PUT uri's will return errors.
agents requests require a json block sent in the body
"""
session = get_session()
try:
rest_params = config.get_restful_params(self.request.uri)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
config.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
config.echo_json_response(self, 400, "uri not supported")
logger.warning('PUT returning 400 response. uri not supported: %s', self.request.path)
return
agent_id = rest_params["agents"]
if agent_id is None:
config.echo_json_response(self, 400, "uri not supported")
logger.warning("PUT returning 400 response. uri not supported")
try:
verifier_id = config.get('cloud_verifier', 'cloudverifier_id', cloud_verifier_common.DEFAULT_VERIFIER_ID)
agent = session.query(VerfierMain).filter_by(
agent_id=agent_id, verifier_id=verifier_id).one()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
if agent is None:
config.echo_json_response(self, 404, "agent id not found")
logger.info('PUT returning 404 response. agent id: %s not found.', agent_id)
return
if "reactivate" in rest_params:
agent.operational_state = states.START
asyncio.ensure_future(
process_agent(agent, states.GET_QUOTE))
config.echo_json_response(self, 200, "Success")
logger.info('PUT returning 200 response for agent id: %s', agent_id)
elif "stop" in rest_params:
# do stuff for terminate
logger.debug("Stopping polling on %s", agent_id)
try:
session.query(VerfierMain).filter(VerfierMain.agent_id == agent_id).update(
{'operational_state': states.TENANT_FAILED})
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
config.echo_json_response(self, 200, "Success")
logger.info('PUT returning 200 response for agent id: %s', agent_id)
else:
config.echo_json_response(self, 400, "uri not supported")
logger.warning("PUT returning 400 response. uri not supported")
except Exception as e:
config.echo_json_response(self, 400, "Exception error: %s" % e)
logger.warning("PUT returning 400 response. Exception error: %s", e)
logger.exception(e)
self.finish()
def data_received(self, chunk):
raise NotImplementedError()
class AllowlistHandler(BaseHandler):
def head(self):
config.echo_json_response(
self, 400, "Allowlist handler: HEAD Not Implemented")
def get(self):
"""Get an allowlist
GET /allowlists/{name}
"""
rest_params = config.get_restful_params(self.request.uri)
if rest_params is None or 'allowlists' not in rest_params:
config.echo_json_response(self, 400, "Invalid URL")
return
if not rest_params["api_version"]:
config.echo_json_response(self, 400, "API Version not supported")
return
allowlist_name = rest_params['allowlists']
if allowlist_name is None:
config.echo_json_response(self, 400, "Invalid URL")
logger.warning(
'GET returning 400 response: ' + self.request.path)
return
session = get_session()
try:
allowlist = session.query(VerifierAllowlist).filter_by(
name=allowlist_name).one()
except NoResultFound:
config.echo_json_response(self, 404, "Allowlist %s not found" % allowlist_name)
return
except SQLAlchemyError as e:
logger.error(f'SQLAlchemy Error: {e}')
config.echo_json_response(self, 500, "Failed to get allowlist")
raise
response = {}
for field in ('name', 'tpm_policy', 'vtpm_policy', 'ima_policy'):
response[field] = getattr(allowlist, field, None)
config.echo_json_response(self, 200, 'Success', response)
def delete(self):
"""Delete an allowlist
DELETE /allowlists/{name}
"""
rest_params = config.get_restful_params(self.request.uri)
if rest_params is None or 'allowlists' not in rest_params:
config.echo_json_response(self, 400, "Invalid URL")
return
if not rest_params["api_version"]:
config.echo_json_response(self, 400, "API Version not supported")
return
allowlist_name = rest_params['allowlists']
if allowlist_name is None:
config.echo_json_response(self, 400, "Invalid URL")
logger.warning(
'DELETE returning 400 response: ' + self.request.path)
return
session = get_session()
try:
session.query(VerifierAllowlist).filter_by(
name=allowlist_name).one()
except NoResultFound:
config.echo_json_response(self, 404, "Allowlist %s not found" % allowlist_name)
return
except SQLAlchemyError as e:
logger.error(f'SQLAlchemy Error: {e}')
config.echo_json_response(self, 500, "Failed to get allowlist")
raise
try:
session.query(VerifierAllowlist).filter_by(
name=allowlist_name).delete()
session.commit()
except SQLAlchemyError as e:
logger.error(f'SQLAlchemy Error: {e}')
config.echo_json_response(self, 500, "Failed to get allowlist")
raise
# NOTE(kaifeng) 204 Can not have response body, but current helper
# doesn't support this case.
self.set_status(204)
self.set_header('Content-Type', 'application/json')
self.finish()
logger.info(
'DELETE returning 204 response for allowlist: ' + allowlist_name)
def post(self):
"""Create an allowlist
POST /allowlists/{name}
body: {"tpm_policy": {..}, "vtpm_policy": {..}
"""
rest_params = config.get_restful_params(self.request.uri)
if rest_params is None or 'allowlists' not in rest_params:
config.echo_json_response(self, 400, "Invalid URL")
return
if not rest_params["api_version"]:
config.echo_json_response(self, 400, "API Version not supported")
return
allowlist_name = rest_params['allowlists']
if allowlist_name is None:
config.echo_json_response(self, 400, "Invalid URL")
return
content_length = len(self.request.body)
if content_length == 0:
config.echo_json_response(
self, 400, "Expected non zero content length")
logger.warning(
'POST returning 400 response. Expected non zero content length.')
return
allowlist = {}
json_body = json.loads(self.request.body)
allowlist['name'] = allowlist_name
tpm_policy = json_body.get('tpm_policy')
if tpm_policy:
allowlist['tpm_policy'] = tpm_policy
vtpm_policy = json_body.get('vtpm_policy')
if vtpm_policy:
allowlist['vtpm_policy'] = vtpm_policy
ima_policy = json_body.get('ima_policy')
if ima_policy:
allowlist['ima_policy'] = ima_policy
session = get_session()
# don't allow overwritting
try:
al_count = session.query(
VerifierAllowlist).filter_by(name=allowlist_name).count()
if al_count > 0:
config.echo_json_response(
self, 409, "Allowlist with name %s already exists" % allowlist_name)
logger.warning(
"Allowlist with name %s already exists" % allowlist_name)
return
except SQLAlchemyError as e:
logger.error(f'SQLAlchemy Error: {e}')
raise
try:
# Add the agent and data
session.add(VerifierAllowlist(**allowlist))
session.commit()
except SQLAlchemyError as e:
logger.error(f'SQLAlchemy Error: {e}')
raise
config.echo_json_response(self, 201)
logger.info('POST returning 201')
def put(self):
config.echo_json_response(
self, 400, "Allowlist handler: PUT Not Implemented")
def data_received(self, chunk):
raise NotImplementedError()
async def invoke_get_quote(agent, need_pubkey):
if agent is None:
raise Exception("agent deleted while being processed")
params = cloud_verifier_common.prepare_get_quote(agent)
partial_req = "1"
if need_pubkey:
partial_req = "0"
version = keylime_api_version.current_version()
res = tornado_requests.request("GET",
"http://%s:%d/v%s/quotes/integrity?nonce=%s&mask=%s&vmask=%s&partial=%s&ima_ml_entry=%d" %
(agent['ip'], agent['port'], version, params["nonce"], params["mask"], params['vmask'], partial_req, params['ima_ml_entry']), context=None)
response = await res
if response.status_code != 200:
# this is a connection error, retry get quote
if response.status_code == 599:
asyncio.ensure_future(process_agent(
agent, states.GET_QUOTE_RETRY))
else:
# catastrophic error, do not continue
logger.critical("Unexpected Get Quote response error for cloud agent %s, Error: %s", agent['agent_id'], response.status_code)
asyncio.ensure_future(process_agent(agent, states.FAILED))
else:
try:
json_response = json.loads(response.body)
# validate the cloud agent response
if 'provide_V' not in agent :
agent['provide_V'] = True
agentAttestState = get_AgentAttestStates().get_by_agent_id(agent['agent_id'])
if cloud_verifier_common.process_quote_response(agent, json_response['results'], agentAttestState):
if agent['provide_V']:
asyncio.ensure_future(process_agent(agent, states.PROVIDE_V))
else:
asyncio.ensure_future(process_agent(agent, states.GET_QUOTE))
else:
asyncio.ensure_future(process_agent(agent, states.INVALID_QUOTE))
# store the attestation state
store_attestation_state(agentAttestState)
except Exception as e:
logger.exception(e)
async def invoke_provide_v(agent):
if agent is None:
raise Exception("Agent deleted while being processed")
try:
if agent['pending_event'] is not None:
agent['pending_event'] = None
except KeyError:
pass
v_json_message = cloud_verifier_common.prepare_v(agent)
version = keylime_api_version.current_version()
res = tornado_requests.request(
"POST", "http://%s:%d/%s/keys/vkey" % (agent['ip'], agent['port'], version), data=v_json_message)
response = await res
if response.status_code != 200:
if response.status_code == 599:
asyncio.ensure_future(
process_agent(agent, states.PROVIDE_V_RETRY))
else:
# catastrophic error, do not continue
logger.critical("Unexpected Provide V response error for cloud agent %s, Error: %s", agent['agent_id'], response.error)
asyncio.ensure_future(process_agent(agent, states.FAILED))
else:
asyncio.ensure_future(process_agent(agent, states.GET_QUOTE))
async def process_agent(agent, new_operational_state):
# Convert to dict if the agent arg is a db object
if not isinstance(agent, dict):
agent = _from_db_obj(agent)
session = get_session()
try:
main_agent_operational_state = agent['operational_state']
try:
stored_agent = session.query(VerfierMain).filter_by(
agent_id=str(agent['agent_id'])).first()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
# if the user did terminated this agent
if stored_agent.operational_state == states.TERMINATED:
logger.warning("Agent %s terminated by user.", agent['agent_id'])
if agent['pending_event'] is not None:
tornado.ioloop.IOLoop.current().remove_timeout(
agent['pending_event'])
verifier_db_delete_agent(session, agent['agent_id'])
return
# if the user tells us to stop polling because the tenant quote check failed
if stored_agent.operational_state == states.TENANT_FAILED:
logger.warning("Agent %s has failed tenant quote. Stopping polling", agent['agent_id'])
if agent['pending_event'] is not None:
tornado.ioloop.IOLoop.current().remove_timeout(
agent['pending_event'])
return
# If failed during processing, log regardless and drop it on the floor
# The administration application (tenant) can GET the status and act accordingly (delete/retry/etc).
if new_operational_state in (states.FAILED, states.INVALID_QUOTE):
agent['operational_state'] = new_operational_state
# issue notification for invalid quotes
if new_operational_state == states.INVALID_QUOTE:
cloud_verifier_common.notify_error(agent)
if agent['pending_event'] is not None:
tornado.ioloop.IOLoop.current().remove_timeout(
agent['pending_event'])
for key in exclude_db:
if key in agent:
del agent[key]
session.query(VerfierMain).filter_by(
agent_id=agent['agent_id']).update(agent)
session.commit()
logger.warning("Agent %s failed, stopping polling", agent['agent_id'])
return
# propagate all state, but remove none DB keys first (using exclude_db)
try:
agent_db = dict(agent)
for key in exclude_db:
if key in agent_db:
del agent_db[key]
session.query(VerfierMain).filter_by(
agent_id=agent_db['agent_id']).update(agent_db)
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
# if new, get a quote
if (main_agent_operational_state == states.START and
new_operational_state == states.GET_QUOTE):
agent['num_retries'] = 0
agent['operational_state'] = states.GET_QUOTE
await invoke_get_quote(agent, True)
return
if (main_agent_operational_state == states.GET_QUOTE and
new_operational_state == states.PROVIDE_V):
agent['num_retries'] = 0
agent['operational_state'] = states.PROVIDE_V
await invoke_provide_v(agent)
return
if (main_agent_operational_state in (states.PROVIDE_V, states.GET_QUOTE) and
new_operational_state == states.GET_QUOTE):
agent['num_retries'] = 0
interval = config.getfloat('cloud_verifier', 'quote_interval')
agent['operational_state'] = states.GET_QUOTE
if interval == 0:
await invoke_get_quote(agent, False)
else:
logger.debug("Setting up callback to check again in %f seconds", interval)
# set up a call back to check again
cb = functools.partial(invoke_get_quote, agent, False)
pending = tornado.ioloop.IOLoop.current().call_later(interval, cb)
agent['pending_event'] = pending
return
maxr = config.getint('cloud_verifier', 'max_retries')
retry = config.getfloat('cloud_verifier', 'retry_interval')
if (main_agent_operational_state == states.GET_QUOTE and
new_operational_state == states.GET_QUOTE_RETRY):
if agent['num_retries'] >= maxr:
logger.warning("Agent %s was not reachable for quote in %d tries, setting state to FAILED", agent['agent_id'], maxr)
if agent['first_verified']: # only notify on previously good agents
cloud_verifier_common.notify_error(
agent, msgtype='comm_error')
else:
logger.debug("Communication error for new agent. No notification will be sent")
await process_agent(agent, states.FAILED)
else:
agent['operational_state'] = states.GET_QUOTE
cb = functools.partial(invoke_get_quote, agent, True)
agent['num_retries'] += 1
logger.info("Connection to %s refused after %d/%d tries, trying again in %f seconds", agent['ip'], agent['num_retries'], maxr, retry)
tornado.ioloop.IOLoop.current().call_later(retry, cb)
return
if (main_agent_operational_state == states.PROVIDE_V and
new_operational_state == states.PROVIDE_V_RETRY):
if agent['num_retries'] >= maxr:
logger.warning("Agent %s was not reachable to provide v in %d tries, setting state to FAILED", agent['agent_id'], maxr)
cloud_verifier_common.notify_error(
agent, msgtype='comm_error')
await process_agent(agent, states.FAILED)
else:
agent['operational_state'] = states.PROVIDE_V
cb = functools.partial(invoke_provide_v, agent)
agent['num_retries'] += 1
logger.info("Connection to %s refused after %d/%d tries, trying again in %f seconds", agent['ip'], agent['num_retries'], maxr, retry)
tornado.ioloop.IOLoop.current().call_later(retry, cb)
return
raise Exception("nothing should ever fall out of this!")
except Exception as e:
logger.error("Polling thread error: %s", e)
logger.exception(e)
async def activate_agents(verifier_id, verifier_ip, verifier_port):
session = get_session()
aas = get_AgentAttestStates()
try:
agents = session.query(VerfierMain).filter_by(
verifier_id=verifier_id).all()
for agent in agents:
agent.verifier_ip = verifier_ip
agent.verifier_host = verifier_port
if agent.operational_state == states.START:
asyncio.ensure_future(process_agent(agent, states.GET_QUOTE))
if agent.boottime:
ima_pcrs_dict = {}
for pcr_num in agent.ima_pcrs:
ima_pcrs_dict[pcr_num] = getattr(agent, 'pcr%d' % pcr_num)
aas.add(agent.agent_id, agent.boottime, ima_pcrs_dict, agent.next_ima_ml_entry)
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
def start_tornado(tornado_server, port):
tornado_server.listen(port)
print("Starting Torando on port " + str(port))
tornado.ioloop.IOLoop.instance().start()
print("Tornado finished")
def main():
"""Main method of the Cloud Verifier Server. This method is encapsulated in a function for packaging to allow it to be
called as a function by an external program."""
cloudverifier_port = config.get('cloud_verifier', 'cloudverifier_port')
cloudverifier_host = config.get('cloud_verifier', 'cloudverifier_ip')
cloudverifier_id = config.get('cloud_verifier', 'cloudverifier_id', cloud_verifier_common.DEFAULT_VERIFIER_ID)
# allow tornado's max upload size to be configurable
max_upload_size = None
if config.has_option('cloud_verifier', 'max_upload_size'):
max_upload_size = int(config.get('cloud_verifier', 'max_upload_size'))
VerfierMain.metadata.create_all(engine, checkfirst=True)
session = get_session()
try:
query_all = session.query(VerfierMain).all()
for row in query_all:
if row.operational_state in states.APPROVED_REACTIVATE_STATES:
row.operational_state = states.START
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
num = session.query(VerfierMain.agent_id).count()
if num > 0:
agent_ids = session.query(VerfierMain.agent_id).all()
logger.info("Agent ids in db loaded from file: %s", agent_ids)
logger.info('Starting Cloud Verifier (tornado) on port %s, use <Ctrl-C> to stop', cloudverifier_port)
# print out API versions we support
keylime_api_version.log_api_versions(logger)
app = tornado.web.Application([
(r"/v?[0-9]+(?:\.[0-9]+)?/agents/.*", AgentsHandler),
(r"/v?[0-9]+(?:\.[0-9]+)?/allowlists/.*", AllowlistHandler),
(r"/versions?", VersionHandler),
(r".*", MainHandler),
])
context = cloud_verifier_common.init_mtls()
# after TLS is up, start revocation notifier
if config.getboolean('cloud_verifier', 'revocation_notifier'):
logger.info("Starting service for revocation notifications on port %s", config.getint('cloud_verifier', 'revocation_notifier_port'))
revocation_notifier.start_broker()
sockets = tornado.netutil.bind_sockets(
int(cloudverifier_port), address=cloudverifier_host)
task_id = tornado.process.fork_processes(config.getint(
'cloud_verifier', 'multiprocessing_pool_num_workers'))
asyncio.set_event_loop(asyncio.new_event_loop())
# Auto reactivate agent
if task_id == 0:
asyncio.ensure_future(activate_agents(cloudverifier_id, cloudverifier_host, cloudverifier_port))
server = tornado.httpserver.HTTPServer(app, ssl_options=context, max_buffer_size=max_upload_size)
server.add_sockets(sockets)
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
tornado.ioloop.IOLoop.instance().stop()
if config.getboolean('cloud_verifier', 'revocation_notifier'):
revocation_notifier.stop_broker()
| {
"content_hash": "7c5f3a581c61338faa9b12797e66b7cf",
"timestamp": "",
"source": "github",
"line_count": 1006,
"max_line_length": 174,
"avg_line_length": 41.35487077534791,
"alnum_prop": 0.5814484532365455,
"repo_name": "mit-ll/python-keylime",
"id": "cfcb59f556094b20442dbc68eda467acad8028cf",
"size": "41622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keylime/cloud_verifier_tornado.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "3128"
},
{
"name": "CSS",
"bytes": "4767"
},
{
"name": "JavaScript",
"bytes": "18188"
},
{
"name": "Python",
"bytes": "617887"
},
{
"name": "Shell",
"bytes": "51983"
}
],
"symlink_target": ""
} |
import os
import setuptools
VERSION = '0.1.1'
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
setuptools.setup(
name='pyglib',
author='Benjamin Staffin',
author_email='[email protected]',
url='https://github.com/benley/pyglib',
install_requires=[
'python-gflags',
'glog>=0.3',
],
description='Opinionated but handy app startup wrapper.',
long_description=README,
packages=['pyglib'],
license='BSD',
version=VERSION,
classifiers=[
'Programming Language :: Python',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Topic :: System :: Logging',
'Topic :: Software Development :: Libraries :: Python Modules',
],
platforms='any',
)
| {
"content_hash": "3d1fd1ab3db07978603657e1fcb09315",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 75,
"avg_line_length": 27.129032258064516,
"alnum_prop": 0.6076099881093936,
"repo_name": "benley/pyglib",
"id": "43cb28b47c5faf4602af4057e03636bafa1ffbb9",
"size": "864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "6126"
}
],
"symlink_target": ""
} |
"""
Note: Django 1.4 support was dropped in #107
https://github.com/pydanny/dj-stripe/pull/107
"""
from django.contrib import admin
from .models import Event, EventProcessingException, Transfer, Charge#, Plan
from .models import Invoice, InvoiceItem, CurrentSubscription, Customer
class CustomerHasCardListFilter(admin.SimpleListFilter):
title = "card presence"
parameter_name = "has_card"
def lookups(self, request, model_admin):
return [
["yes", "Has Card"],
["no", "Does Not Have a Card"]
]
def queryset(self, request, queryset):
if self.value() == "yes":
return queryset.exclude(card_fingerprint="")
if self.value() == "no":
return queryset.filter(card_fingerprint="")
class InvoiceCustomerHasCardListFilter(admin.SimpleListFilter):
title = "card presence"
parameter_name = "has_card"
def lookups(self, request, model_admin):
return [
["yes", "Has Card"],
["no", "Does Not Have a Card"]
]
def queryset(self, request, queryset):
if self.value() == "yes":
return queryset.exclude(customer__card_fingerprint="")
if self.value() == "no":
return queryset.filter(customer__card_fingerprint="")
class CustomerSubscriptionStatusListFilter(admin.SimpleListFilter):
title = "subscription status"
parameter_name = "sub_status"
def lookups(self, request, model_admin):
statuses = [
[x, x.replace("_", " ").title()]
for x in CurrentSubscription.objects.all().values_list(
"status",
flat=True
).distinct()
]
statuses.append(["none", "No Subscription"])
return statuses
def queryset(self, request, queryset):
if self.value() is None:
return queryset.all()
else:
return queryset.filter(current_subscription__status=self.value())
def send_charge_receipt(modeladmin, request, queryset):
"""
Function for sending receipts from the admin if a receipt is not sent for
a specific charge.
"""
for charge in queryset:
charge.send_receipt()
admin.site.register(
Charge,
readonly_fields=('created',),
list_display=[
"stripe_id",
"customer",
"amount",
"description",
"paid",
"disputed",
"refunded",
"fee",
"receipt_sent",
"created"
],
search_fields=[
"stripe_id",
"customer__stripe_id",
"customer__subscriber__email",
"card_last_4",
"invoice__stripe_id"
],
list_filter=[
"paid",
"disputed",
"refunded",
"card_kind",
"created"
],
raw_id_fields=[
"customer",
"invoice"
],
actions=(send_charge_receipt,),
)
admin.site.register(
EventProcessingException,
readonly_fields=('created',),
list_display=[
"message",
"event",
"created"
],
search_fields=[
"message",
"traceback",
"data"
],
)
admin.site.register(
Event,
raw_id_fields=["customer"],
readonly_fields=('created',),
list_display=[
"stripe_id",
"kind",
"livemode",
"valid",
"processed",
"created"
],
list_filter=[
"kind",
"created",
"valid",
"processed"
],
search_fields=[
"stripe_id",
"customer__stripe_id",
"customer__subscriber__email",
"validated_message"
],
)
class CurrentSubscriptionInline(admin.TabularInline):
model = CurrentSubscription
def subscription_status(obj):
return obj.current_subscription.status
subscription_status.short_description = "Subscription Status"
admin.site.register(
Customer,
#raw_id_fields=["subscriber"],
readonly_fields=('created',),
list_display=[
"stripe_id",
#"subscriber",
"card_kind",
"card_last_4",
subscription_status,
"created"
],
list_filter=[
"card_kind",
CustomerHasCardListFilter,
CustomerSubscriptionStatusListFilter
],
search_fields=[
"stripe_id",
"subscriber__email"
],
inlines=[CurrentSubscriptionInline]
)
class InvoiceItemInline(admin.TabularInline):
model = InvoiceItem
def customer_has_card(obj):
""" Returns True if the customer has a card attached to its account."""
return obj.customer.card_fingerprint != ""
customer_has_card.short_description = "Customer Has Card"
def customer_email(obj):
""" Returns a string representation of the customer's email."""
return str(obj.customer.subscriber.email)
customer_email.short_description = "Customer"
admin.site.register(
Invoice,
raw_id_fields=["customer"],
readonly_fields=('created',),
list_display=[
"stripe_id",
"paid",
"closed",
customer_email,
customer_has_card,
"period_start",
"period_end",
"subtotal",
"total",
"created"
],
search_fields=[
"stripe_id",
"customer__stripe_id",
"customer__subscriber__email"
],
list_filter=[
InvoiceCustomerHasCardListFilter,
"paid",
"closed",
"attempted",
"attempts",
"created",
"date",
"period_end",
"total"
],
inlines=[InvoiceItemInline]
)
admin.site.register(
Transfer,
raw_id_fields=["event"],
readonly_fields=('created',),
list_display=[
"stripe_id",
"amount",
"status",
"date",
"description",
"created"
],
search_fields=[
"stripe_id",
"event__stripe_id"
]
)
# class PlanAdmin(admin.ModelAdmin):
#
# def save_model(self, request, obj, form, change):
# """Update or create objects using our custom methods that
# sync with Stripe."""
#
# if change:
# obj.update_name()
#
# else:
# Plan.get_or_create(**form.cleaned_data)
#
# def get_readonly_fields(self, request, obj=None):
# readonly_fields = list(self.readonly_fields)
# if obj:
# readonly_fields.extend([
# 'stripe_id',
# 'amount',
# 'currency',
# 'interval',
# 'interval_count',
# 'trial_period_days'])
#
# return readonly_fields
#
# admin.site.register(Plan, PlanAdmin)
| {
"content_hash": "a230291a54f32fba6c0b35b48725504c",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 77,
"avg_line_length": 23.23859649122807,
"alnum_prop": 0.5582062509436811,
"repo_name": "rawjam/dj-stripe",
"id": "fbedf2d89b79450e7ba0ed8e37ee0b81065db280",
"size": "6647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djstripe/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "22023"
},
{
"name": "Makefile",
"bytes": "226"
},
{
"name": "Python",
"bytes": "189353"
}
],
"symlink_target": ""
} |
from google.cloud import container_v1
async def sample_set_addons_config():
# Create a client
client = container_v1.ClusterManagerAsyncClient()
# Initialize request argument(s)
request = container_v1.SetAddonsConfigRequest(
)
# Make the request
response = await client.set_addons_config(request=request)
# Handle the response
print(response)
# [END container_v1_generated_ClusterManager_SetAddonsConfig_async]
| {
"content_hash": "aca4257155809b1d5e4c03533ba22870",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 67,
"avg_line_length": 25.11111111111111,
"alnum_prop": 0.7323008849557522,
"repo_name": "googleapis/python-container",
"id": "fd9de008a6cdc80fa06d1bff067b7a989bc8e54a",
"size": "1847",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "samples/generated_samples/container_v1_generated_cluster_manager_set_addons_config_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2480646"
},
{
"name": "Shell",
"bytes": "30669"
}
],
"symlink_target": ""
} |
"""Tests for API client and approvals-related API calls."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import threading
import time
from absl import app
from grr_response_core.lib.util import compatibility
from grr_response_server.gui import api_auth_manager
from grr_response_server.gui import api_call_router_with_approval_checks
from grr_response_server.gui import api_integration_test_lib
from grr.test_lib import hunt_test_lib
from grr.test_lib import test_lib
class ApiClientLibApprovalsTest(api_integration_test_lib.ApiIntegrationTest,
hunt_test_lib.StandardHuntTestMixin):
def setUp(self):
super(ApiClientLibApprovalsTest, self).setUp()
cls = api_call_router_with_approval_checks.ApiCallRouterWithApprovalChecks
cls.ClearCache()
config_overrider = test_lib.ConfigOverrider(
{"API.DefaultRouter": compatibility.GetName(cls)})
config_overrider.Start()
self.addCleanup(config_overrider.Stop)
# Force creation of new APIAuthorizationManager, so that configuration
# changes are picked up.
api_auth_manager.InitializeApiAuthManager()
def testCreateClientApproval(self):
client_id = self.SetupClient(0)
approval = self.api.Client(client_id).CreateApproval(
reason="blah", notified_users=[u"foo"])
self.assertEqual(approval.client_id, client_id)
self.assertEqual(approval.data.subject.client_id, client_id)
self.assertEqual(approval.data.reason, "blah")
self.assertFalse(approval.data.is_valid)
def testWaitUntilClientApprovalValid(self):
client_id = self.SetupClient(0)
approval = self.api.Client(client_id).CreateApproval(
reason="blah", notified_users=[u"foo"])
self.assertFalse(approval.data.is_valid)
def ProcessApproval():
time.sleep(1)
self.GrantClientApproval(
client_id,
requestor=self.token.username,
approval_id=approval.approval_id,
approver=u"foo")
thread = threading.Thread(name="ProcessApprover", target=ProcessApproval)
thread.start()
try:
result_approval = approval.WaitUntilValid()
self.assertTrue(result_approval.data.is_valid)
finally:
thread.join()
def testCreateHuntApproval(self):
h_id = self.StartHunt()
approval = self.api.Hunt(h_id).CreateApproval(
reason="blah", notified_users=[u"foo"])
self.assertEqual(approval.hunt_id, h_id)
self.assertEqual(approval.data.subject.hunt_id, h_id)
self.assertEqual(approval.data.reason, "blah")
self.assertFalse(approval.data.is_valid)
def testWaitUntilHuntApprovalValid(self):
h_id = self.StartHunt()
approval = self.api.Hunt(h_id).CreateApproval(
reason="blah", notified_users=[u"approver"])
self.assertFalse(approval.data.is_valid)
def ProcessApproval():
time.sleep(1)
self.GrantHuntApproval(
h_id,
requestor=self.token.username,
approval_id=approval.approval_id,
approver=u"approver")
ProcessApproval()
thread = threading.Thread(name="HuntApprover", target=ProcessApproval)
thread.start()
try:
result_approval = approval.WaitUntilValid()
self.assertTrue(result_approval.data.is_valid)
finally:
thread.join()
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| {
"content_hash": "d259b136359af786f7d35075ac7751a1",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 78,
"avg_line_length": 31.118181818181817,
"alnum_prop": 0.7028921998247152,
"repo_name": "demonchild2112/travis-test",
"id": "252fa9510b263c17699823876c82a25658adfcb7",
"size": "3445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/server/grr_response_server/gui/api_integration_tests/approvals_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "3446"
},
{
"name": "C",
"bytes": "11321"
},
{
"name": "C++",
"bytes": "54535"
},
{
"name": "CSS",
"bytes": "35549"
},
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HCL",
"bytes": "7208"
},
{
"name": "HTML",
"bytes": "190212"
},
{
"name": "JavaScript",
"bytes": "11691"
},
{
"name": "Jupyter Notebook",
"bytes": "199190"
},
{
"name": "Makefile",
"bytes": "3139"
},
{
"name": "PowerShell",
"bytes": "1984"
},
{
"name": "Python",
"bytes": "7213255"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "48882"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TSQL",
"bytes": "51"
}
],
"symlink_target": ""
} |
"""Generates training data for learning/updating MentorNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import itertools
import os
import pickle
import models
import numpy as np
import tensorflow as tf
flags = tf.app.flags
flags.DEFINE_string('outdir', '', 'Directory to the save training data.')
flags.DEFINE_string('vstar_fn', '', 'the vstar function to use.')
flags.DEFINE_string('vstar_gamma', '', 'the hyper_parameter for the vstar_fn')
flags.DEFINE_integer('sample_size', 100000,
'size to of the total generated data set.')
flags.DEFINE_string('input_csv_filename', '', 'input_csv_filename')
FLAGS = flags.FLAGS
tf.logging.set_verbosity(tf.logging.INFO)
def generate_pretrain_defined(vstar_fn, outdir, sample_size):
"""Generates a trainable dataset given a vstar_fn.
Args:
vstar_fn: the name of the variable star function to use.
outdir: directory to save the training data.
sample_size: size of the sample.
"""
batch_l = np.concatenate((np.arange(0, 10, 0.1), np.arange(10, 30, 1)))
batch_diff = np.arange(-5, 5, 0.1)
batch_y = np.array([0])
batch_e = np.arange(0, 100, 1)
data = []
for t in itertools.product(batch_l, batch_diff, batch_y, batch_e):
data.append(t)
data = np.array(data)
v = vstar_fn(data)
v = v.reshape([-1, 1])
data = np.hstack((data, v))
perm = np.arange(data.shape[0])
np.random.shuffle(perm)
data = data[perm[0:min(sample_size, len(perm))],]
tr_size = int(data.shape[0] * 0.8)
tr = data[0:tr_size]
ts = data[(tr_size + 1):data.shape[0]]
if not os.path.exists(outdir):
os.makedirs(outdir)
print('training_shape={} test_shape={}'.format(tr.shape, ts.shape))
with open(os.path.join(outdir, 'tr.p'), 'wb') as outfile:
pickle.dump(tr, outfile)
with open(os.path.join(outdir, 'ts.p'), 'wb') as outfile:
pickle.dump(ts, outfile)
def generate_data_driven(input_csv_filename,
outdir,
percentile_range='40,50,60,75,80,90'):
"""Generates a data-driven trainable dataset, given a CSV.
Refer to README.md for details on how to format the CSV.
Args:
input_csv_filename: the path of the CSV file. The csv file format
0: epoch_percentage
1: noisy label
2: clean label
3: loss
outdir: directory to save the training data.
percentile_range: the percentiles used to compute the moving average.
"""
raw = read_from_csv(input_csv_filename)
raw = np.array(raw.values())
dataset_name = os.path.splitext(os.path.basename(input_csv_filename))[0]
percentile_range = percentile_range.split(',')
percentile_range = [int(x) for x in percentile_range]
for percentile in percentile_range:
percentile = int(percentile)
p_perncentile = np.percentile(raw[:, 3], percentile)
v_star = np.float32(raw[:, 1] == raw[:, 2])
l = raw[:, 3]
diff = raw[:, 3] - p_perncentile
# label not used in the current version.
y = np.array([0] * len(v_star))
epoch_percentage = raw[:, 0]
data = np.vstack((l, diff, y, epoch_percentage, v_star))
data = np.transpose(data)
perm = np.arange(data.shape[0])
np.random.shuffle(perm)
data = data[perm,]
tr_size = int(data.shape[0] * 0.8)
tr = data[0:tr_size]
ts = data[(tr_size + 1):data.shape[0]]
cur_outdir = os.path.join(
outdir, '{}_percentile_{}'.format(dataset_name, percentile))
if not os.path.exists(cur_outdir):
os.makedirs(cur_outdir)
print('training_shape={} test_shape={}'.format(tr.shape, ts.shape))
print(cur_outdir)
with open(os.path.join(cur_outdir, 'tr.p'), 'wb') as outfile:
pickle.dump(tr, outfile)
with open(os.path.join(cur_outdir, 'ts.p'), 'wb') as outfile:
pickle.dump(ts, outfile)
def read_from_csv(input_csv_file):
"""Reads Data from an input CSV file.
Args:
input_csv_file: the path of the CSV file.
Returns:
a numpy array with different data at each index:
"""
data = {}
with open(input_csv_file, 'r') as csv_file_in:
reader = csv.reader(csv_file_in)
for row in reader:
for (_, cell) in enumerate(row):
rdata = cell.strip().split(' ')
rid = rdata[0]
rdata = [float(t) for t in rdata[1:]]
data[rid] = rdata
csv_file_in.close()
return data
def main(_):
if FLAGS.vstar_fn == 'data_driven':
generate_data_driven(FLAGS.input_csv_filename, FLAGS.outdir)
elif FLAGS.vstar_fn in dir(models):
generate_pretrain_defined(
getattr(models, FLAGS.vstar_fn), FLAGS.outdir, FLAGS.sample_size)
else:
tf.logging.error('%s is not defined in models.py', FLAGS.vstar_fn)
if __name__ == '__main__':
tf.app.run()
| {
"content_hash": "057dbf01187ad1d3b80f9bdccc39189b",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 78,
"avg_line_length": 28.72289156626506,
"alnum_prop": 0.6424077181208053,
"repo_name": "google/mentornet",
"id": "f4ccd15dfa60acae7876422e59a723e6b89bcea5",
"size": "5447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/training_mentornet/data_generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "102112"
},
{
"name": "Shell",
"bytes": "51715"
}
],
"symlink_target": ""
} |
import os
import sys
import yaml
def parse_args(envirnment, arguments):
manifest = envirnment.manifest
try:
with open(manifest, 'r') as stream:
available_arguments = yaml.load(stream)
except:
print('An error occurred while attempting to open the Manifest-file.')
sys.exit()
else:
for argument in arguments:
if available_arguments != None:
if argument in available_arguments:
value = available_arguments[argument]
available_arguments = available_arguments[argument]
else:
# print('This command is not available. Try again.')
# sys.exit()
alternate_parse_args(envirnment, arguments)
return
else:
alternate_parse_args(envirnment, arguments)
return
if type(value) is list:
for v in value:
envirnment.run(v)
elif not type(value) is dict:
envirnment.run(value)
else:
print('This command is not available. Try again.')
sys.exit()
# Поиск команды в конфигурации из домашней директории, если такая
# есть.
def alternate_parse_args(envirnment, arguments):
manifest = os.path.join(os.path.expanduser('~'), '.wa')
try:
with open(manifest, 'r') as stream:
available_arguments = yaml.load(stream)
except:
print('An error occurred while attempting to open the Manifest-file.')
sys.exit()
else:
for argument in arguments:
if available_arguments != None:
if argument in available_arguments:
value = available_arguments[argument]
available_arguments = available_arguments[argument]
else:
print('This command is not available. Try again.')
sys.exit()
else:
print('This command is not available. Try again.')
sys.exit()
if type(value) is list:
for v in value:
envirnment.run(v)
elif not type(value) is dict:
envirnment.run(value)
else:
print('This command is not available. Try again.')
sys.exit()
# А что если пользовательская папка является корнем файловой системы? Баг?
def find_manifest(path=os.getcwd()):
if not os.path.isfile(os.path.join(path, '.wa')) and os.path.dirname(path) != os.path.dirname(os.path.expanduser('~')):
if os.path.dirname(path) == path:
# you have yourself root.
# works on Windows and *nix paths.
# does NOT work on Windows shares (\\server\share)
# If .wa not found, but exist in <home path>
# current dir will be a project root
if os.path.isfile(os.path.join(os.path.expanduser('~'), '.wa')):
touch('.wa')
return '.'
else:
print(os.path.join(os.path.expanduser('~'), '.wa'))
print('Manifest-file not found')
sys.exit()
path = os.path.abspath(os.path.join(path, os.pardir))
return find_manifest(path)
else:
return path
def touch(fname):
if os.path.exists(fname):
os.utime(fname, None)
else:
open(fname, 'a').close()
| {
"content_hash": "bf2e90a267fe69448f5991e473fdf954",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 123,
"avg_line_length": 34.707070707070706,
"alnum_prop": 0.5480209545983702,
"repo_name": "char16t/wa",
"id": "9f88fd3bfd2379ec8c2ca5f48640abaca5ab04df",
"size": "3571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wa/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "82"
},
{
"name": "Python",
"bytes": "15421"
}
],
"symlink_target": ""
} |
import os
from django.utils import six
from django.core.files.base import File, ContentFile
from django.core.files.storage import (
default_storage, Storage)
from django.db.models.fields.files import ImageFieldFile, FieldFile
from django.core.files.images import get_image_dimensions
from django.utils.safestring import mark_safe
from django.utils.html import escape
from django.utils import timezone
from easy_thumbnails import engine, exceptions, models, utils, signals, storage
from easy_thumbnails.alias import aliases
from easy_thumbnails.conf import settings
from easy_thumbnails.options import ThumbnailOptions
def get_thumbnailer(obj, relative_name=None):
"""
Get a :class:`Thumbnailer` for a source file.
The ``obj`` argument is usually either one of the following:
* ``FieldFile`` instance (i.e. a model instance file/image field
property).
* A string, which will be used as the relative name (the source will be
set to the default storage).
* ``Storage`` instance - the ``relative_name`` argument must also be
provided.
Or it could be:
* A file-like instance - the ``relative_name`` argument must also be
provided.
In this case, the thumbnailer won't use or create a cached reference
to the thumbnail (i.e. a new thumbnail will be created for every
:meth:`Thumbnailer.get_thumbnail` call).
If ``obj`` is a ``Thumbnailer`` instance, it will just be returned. If it's
an object with an ``easy_thumbnails_thumbnailer`` then the attribute is
simply returned under the assumption it is a Thumbnailer instance)
"""
if hasattr(obj, 'easy_thumbnails_thumbnailer'):
return obj.easy_thumbnails_thumbnailer
if isinstance(obj, Thumbnailer):
return obj
elif isinstance(obj, FieldFile):
if not relative_name:
relative_name = obj.name
return ThumbnailerFieldFile(obj.instance, obj.field, relative_name)
source_storage = None
if isinstance(obj, six.string_types):
relative_name = obj
obj = None
if not relative_name:
raise ValueError(
"If object is not a FieldFile or Thumbnailer instance, the "
"relative name must be provided")
if isinstance(obj, File):
obj = obj.file
if isinstance(obj, Storage) or obj == default_storage:
source_storage = obj
obj = None
return Thumbnailer(
file=obj, name=relative_name, source_storage=source_storage,
remote_source=obj is not None)
def generate_all_aliases(fieldfile, include_global):
"""
Generate all of a file's aliases.
:param fieldfile: A ``FieldFile`` instance.
:param include_global: A boolean which determines whether to generate
thumbnails for project-wide aliases in addition to field, model, and
app specific aliases.
"""
all_options = aliases.all(fieldfile, include_global=include_global)
if all_options:
thumbnailer = get_thumbnailer(fieldfile)
for options in all_options.values():
thumbnailer.get_thumbnail(options)
def database_get_image_dimensions(file, close=False, dimensions=None):
"""
Returns the (width, height) of an image, given ThumbnailFile. Set
'close' to True to close the file at the end if it is initially in an open
state.
Will attempt to get the dimensions from the file itself if they aren't
in the db.
"""
storage_hash = utils.get_storage_hash(file.storage)
dimensions = None
dimensions_cache = None
try:
thumbnail = models.Thumbnail.objects.select_related('dimensions').get(
storage_hash=storage_hash, name=file.name)
except models.Thumbnail.DoesNotExist:
thumbnail = None
else:
try:
dimensions_cache = thumbnail.dimensions
except models.ThumbnailDimensions.DoesNotExist:
dimensions_cache = None
if dimensions_cache:
return dimensions_cache.width, dimensions_cache.height
dimensions = get_image_dimensions(file, close=close)
if settings.THUMBNAIL_CACHE_DIMENSIONS and thumbnail:
dimensions_cache = models.ThumbnailDimensions(thumbnail=thumbnail)
dimensions_cache.width, dimensions_cache.height = dimensions
dimensions_cache.save()
return dimensions
class FakeField(object):
name = 'fake'
def __init__(self, storage=None):
if storage is None:
storage = default_storage
self.storage = storage
def generate_filename(self, instance, name, *args, **kwargs):
return name
class FakeInstance(object):
def save(self, *args, **kwargs):
pass
class ThumbnailFile(ImageFieldFile):
"""
A thumbnailed file.
This can be used just like a Django model instance's property for a file
field (i.e. an ``ImageFieldFile`` object).
"""
def __init__(self, name, file=None, storage=None, thumbnail_options=None,
*args, **kwargs):
fake_field = FakeField(storage=storage)
super(ThumbnailFile, self).__init__(
FakeInstance(), fake_field, name, *args, **kwargs)
del self.field
if file:
self.file = file
if thumbnail_options is None:
thumbnail_options = ThumbnailOptions()
elif not isinstance(thumbnail_options, ThumbnailOptions):
thumbnail_options = ThumbnailOptions(thumbnail_options)
self.thumbnail_options = thumbnail_options
def save(self, *args, **kwargs):
# Can't save a ``ThumbnailFile`` directly.
raise NotImplementedError()
def delete(self, *args, **kwargs):
# Can't delete a ``ThumbnailFile`` directly, it doesn't have a
# reference to the source image, so it can't update the cache. If you
# really need to do this, do it with ``self.storage.delete`` directly.
raise NotImplementedError()
# Be consistant with standard behaviour, even though these methods don't
# actually alter data any more.
save.alters_data = True
delete.alters_data = True
def _get_image(self):
"""
Get a PIL Image instance of this file.
The image is cached to avoid the file needing to be read again if the
function is called again.
"""
if not hasattr(self, '_image_cache'):
from easy_thumbnails.source_generators import pil_image
self.image = pil_image(self)
return self._image_cache
def _set_image(self, image):
"""
Set the image for this file.
This also caches the dimensions of the image.
"""
if image:
self._image_cache = image
self._dimensions_cache = image.size
else:
if hasattr(self, '_image_cache'):
del self._cached_image
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
image = property(_get_image, _set_image)
def tag(self, alt='', use_size=None, **attrs):
"""
Return a standard XHTML ``<img ... />`` tag for this field.
:param alt: The ``alt=""`` text for the tag. Defaults to ``''``.
:param use_size: Whether to get the size of the thumbnail image for use
in the tag attributes. If ``None`` (default), the size will only
be used it if won't result in a remote file retrieval.
All other keyword parameters are added as (properly escaped) extra
attributes to the `img` tag.
"""
if use_size is None:
if getattr(self, '_dimensions_cache', None):
use_size = True
else:
try:
self.storage.path(self.name)
use_size = True
except NotImplementedError:
use_size = False
attrs['alt'] = alt
attrs['src'] = self.url
if use_size:
attrs.update(dict(width=self.width, height=self.height))
attrs = ' '.join(['%s="%s"' % (key, escape(value))
for key, value in sorted(attrs.items())])
return mark_safe('<img %s />' % attrs)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, 'rb')
return self._file
def _set_file(self, value):
if value is not None and not isinstance(value, File):
value = File(value)
self._file = value
self._committed = False
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
def open(self, mode=None, *args, **kwargs):
if self.closed and self.name:
mode = mode or getattr(self, 'mode', None) or 'rb'
self.file = self.storage.open(self.name, mode)
else:
return super(ThumbnailFile, self).open(mode, *args, **kwargs)
def _get_image_dimensions(self):
if not hasattr(self, '_dimensions_cache'):
close = self.closed
self.open()
self._dimensions_cache = database_get_image_dimensions(
self, close=close)
return self._dimensions_cache
def set_image_dimensions(self, thumbnail):
"""
Set image dimensions from the cached dimensions of a ``Thumbnail``
model instance.
"""
try:
dimensions = getattr(thumbnail, 'dimensions', None)
except models.ThumbnailDimensions.DoesNotExist:
dimensions = None
if not dimensions:
return False
self._dimensions_cache = dimensions.size
return self._dimensions_cache
class Thumbnailer(File):
"""
A file-like object which provides some methods to generate thumbnail
images.
You can subclass this object and override the following properties to
change the defaults (pulled from the default settings):
* source_generators
* thumbnail_processors
"""
#: A list of source generators to use. If ``None``, will use the default
#: generators defined in settings.
source_generators = None
#: A list of thumbnail processors. If ``None``, will use the default
#: processors defined in settings.
thumbnail_processors = None
def __init__(self, file=None, name=None, source_storage=None,
thumbnail_storage=None, remote_source=False, generate=True,
*args, **kwargs):
super(Thumbnailer, self).__init__(file, name, *args, **kwargs)
if source_storage is None:
source_storage = default_storage
self.source_storage = source_storage
if thumbnail_storage is None:
thumbnail_storage = storage.thumbnail_default_storage
self.thumbnail_storage = thumbnail_storage
self.remote_source = remote_source
self.alias_target = None
self.generate = generate
# Set default properties. For backwards compatibilty, check to see
# if the attribute exists already (it could be set as a class property
# on a subclass) before getting it from settings.
for default in (
'basedir', 'subdir', 'prefix', 'quality', 'extension',
'preserve_extensions', 'transparency_extension',
'check_cache_miss', 'high_resolution', 'highres_infix',
'namer'):
attr_name = 'thumbnail_%s' % default
if getattr(self, attr_name, None) is None:
value = getattr(settings, attr_name.upper())
setattr(self, attr_name, value)
def __getitem__(self, alias):
"""
Retrieve a thumbnail matching the alias options (or raise a
``KeyError`` if no such alias exists).
"""
options = aliases.get(alias, target=self.alias_target)
if not options:
raise KeyError(alias)
return self.get_thumbnail(options, silent_template_exception=True)
def get_options(self, thumbnail_options, **kwargs):
"""
Get the thumbnail options that includes the default options for this
thumbnailer (and the project-wide default options).
"""
if isinstance(thumbnail_options, ThumbnailOptions):
return thumbnail_options
args = []
if thumbnail_options is not None:
args.append(thumbnail_options)
opts = ThumbnailOptions(*args, **kwargs)
if 'quality' not in thumbnail_options:
opts['quality'] = self.thumbnail_quality
return opts
def generate_thumbnail(self, thumbnail_options, high_resolution=False,
silent_template_exception=False):
"""
Return an unsaved ``ThumbnailFile`` containing a thumbnail image.
The thumbnail image is generated using the ``thumbnail_options``
dictionary.
"""
thumbnail_options = self.get_options(thumbnail_options)
orig_size = thumbnail_options['size'] # remember original size
# Size sanity check.
min_dim, max_dim = 0, 0
for dim in orig_size:
try:
dim = int(dim)
except (TypeError, ValueError):
continue
min_dim, max_dim = min(min_dim, dim), max(max_dim, dim)
if max_dim == 0 or min_dim < 0:
raise exceptions.EasyThumbnailsError(
"The source image is an invalid size (%sx%s)" % orig_size)
if high_resolution:
thumbnail_options['size'] = (orig_size[0] * 2, orig_size[1] * 2)
image = engine.generate_source_image(
self, thumbnail_options, self.source_generators,
fail_silently=silent_template_exception)
if image is None:
raise exceptions.InvalidImageFormatError(
"The source file does not appear to be an image")
thumbnail_image = engine.process_image(image, thumbnail_options,
self.thumbnail_processors)
if high_resolution:
thumbnail_options['size'] = orig_size # restore original size
filename = self.get_thumbnail_name(
thumbnail_options,
transparent=utils.is_transparent(thumbnail_image),
high_resolution=high_resolution)
quality = thumbnail_options['quality']
subsampling = thumbnail_options['subsampling']
img = engine.save_image(
thumbnail_image, filename=filename, quality=quality,
subsampling=subsampling)
data = img.read()
thumbnail = ThumbnailFile(
filename, file=ContentFile(data), storage=self.thumbnail_storage,
thumbnail_options=thumbnail_options)
thumbnail.image = thumbnail_image
thumbnail._committed = False
return thumbnail
def get_thumbnail_name(self, thumbnail_options, transparent=False,
high_resolution=False):
"""
Return a thumbnail filename for the given ``thumbnail_options``
dictionary and ``source_name`` (which defaults to the File's ``name``
if not provided).
"""
thumbnail_options = self.get_options(thumbnail_options)
path, source_filename = os.path.split(self.name)
source_extension = os.path.splitext(source_filename)[1][1:]
preserve_extensions = self.thumbnail_preserve_extensions
if preserve_extensions and (
preserve_extensions is True or
source_extension.lower() in preserve_extensions):
extension = source_extension
elif transparent:
extension = self.thumbnail_transparency_extension
else:
extension = self.thumbnail_extension
extension = extension or 'jpg'
prepared_opts = thumbnail_options.prepared_options()
opts_text = '_'.join(prepared_opts)
data = {'opts': opts_text}
basedir = self.thumbnail_basedir % data
subdir = self.thumbnail_subdir % data
if isinstance(self.thumbnail_namer, six.string_types):
namer_func = utils.dynamic_import(self.thumbnail_namer)
else:
namer_func = self.thumbnail_namer
filename = namer_func(
thumbnailer=self,
source_filename=source_filename,
thumbnail_extension=extension,
thumbnail_options=thumbnail_options,
prepared_options=prepared_opts,
)
if high_resolution:
filename = self.thumbnail_highres_infix.join(
os.path.splitext(filename))
filename = '%s%s' % (self.thumbnail_prefix, filename)
return os.path.join(basedir, path, subdir, filename)
def get_existing_thumbnail(self, thumbnail_options, high_resolution=False):
"""
Return a ``ThumbnailFile`` containing an existing thumbnail for a set
of thumbnail options, or ``None`` if not found.
"""
thumbnail_options = self.get_options(thumbnail_options)
names = [
self.get_thumbnail_name(
thumbnail_options, transparent=False,
high_resolution=high_resolution)]
transparent_name = self.get_thumbnail_name(
thumbnail_options, transparent=True,
high_resolution=high_resolution)
if transparent_name not in names:
names.append(transparent_name)
for filename in names:
exists = self.thumbnail_exists(filename)
if exists:
thumbnail_file = ThumbnailFile(
name=filename, storage=self.thumbnail_storage,
thumbnail_options=thumbnail_options)
if settings.THUMBNAIL_CACHE_DIMENSIONS:
# If this wasn't local storage, exists will be a thumbnail
# instance so we can store the image dimensions now to save
# a future potential query.
thumbnail_file.set_image_dimensions(exists)
return thumbnail_file
def get_thumbnail(self, thumbnail_options, save=True, generate=None,
silent_template_exception=False):
"""
Return a ``ThumbnailFile`` containing a thumbnail.
If a matching thumbnail already exists, it will simply be returned.
By default (unless the ``Thumbnailer`` was instanciated with
``generate=False``), thumbnails that don't exist are generated.
Otherwise ``None`` is returned.
Force the generation behaviour by setting the ``generate`` param to
either ``True`` or ``False`` as required.
The new thumbnail image is generated using the ``thumbnail_options``
dictionary. If the ``save`` argument is ``True`` (default), the
generated thumbnail will be saved too.
"""
thumbnail_options = self.get_options(thumbnail_options)
if generate is None:
generate = self.generate
thumbnail = self.get_existing_thumbnail(thumbnail_options)
if not thumbnail:
if generate:
thumbnail = self.generate_thumbnail(
thumbnail_options,
silent_template_exception=silent_template_exception)
if save:
self.save_thumbnail(thumbnail)
else:
signals.thumbnail_missed.send(
sender=self, options=thumbnail_options,
high_resolution=False)
if 'HIGH_RESOLUTION' in thumbnail_options:
generate_high_resolution = thumbnail_options.get('HIGH_RESOLUTION')
else:
generate_high_resolution = self.thumbnail_high_resolution
if generate_high_resolution:
thumbnail.high_resolution = self.get_existing_thumbnail(
thumbnail_options, high_resolution=True)
if not thumbnail.high_resolution:
if generate:
thumbnail.high_resolution = self.generate_thumbnail(
thumbnail_options, high_resolution=True,
silent_template_exception=silent_template_exception)
if save:
self.save_thumbnail(thumbnail.high_resolution)
else:
signals.thumbnail_missed.send(
sender=self, options=thumbnail_options,
high_resolution=False)
return thumbnail
def save_thumbnail(self, thumbnail):
"""
Save a thumbnail to the thumbnail_storage.
Also triggers the ``thumbnail_created`` signal and caches the
thumbnail values and dimensions for future lookups.
"""
filename = thumbnail.name
try:
self.thumbnail_storage.delete(filename)
except Exception:
pass
self.thumbnail_storage.save(filename, thumbnail)
thumb_cache = self.get_thumbnail_cache(
thumbnail.name, create=True, update=True)
# Cache thumbnail dimensions.
if settings.THUMBNAIL_CACHE_DIMENSIONS:
dimensions_cache, created = (
models.ThumbnailDimensions.objects.get_or_create(
thumbnail=thumb_cache,
defaults={'width': thumbnail.width,
'height': thumbnail.height}))
if not created:
dimensions_cache.width = thumbnail.width
dimensions_cache.height = thumbnail.height
dimensions_cache.save()
signals.thumbnail_created.send(sender=thumbnail)
def thumbnail_exists(self, thumbnail_name):
"""
Calculate whether the thumbnail already exists and that the source is
not newer than the thumbnail.
If the source and thumbnail file storages are local, their file
modification times are used. Otherwise the database cached modification
times are used.
"""
if self.remote_source:
return False
if utils.is_storage_local(self.source_storage):
source_modtime = utils.get_modified_time(
self.source_storage, self.name)
else:
source = self.get_source_cache()
if not source:
return False
source_modtime = source.modified
if not source_modtime:
return False
local_thumbnails = utils.is_storage_local(self.thumbnail_storage)
if local_thumbnails:
thumbnail_modtime = utils.get_modified_time(
self.thumbnail_storage, thumbnail_name)
if not thumbnail_modtime:
return False
return source_modtime <= thumbnail_modtime
thumbnail = self.get_thumbnail_cache(thumbnail_name)
if not thumbnail:
return False
thumbnail_modtime = thumbnail.modified
if thumbnail.modified and source_modtime <= thumbnail.modified:
return thumbnail
return False
def get_source_cache(self, create=False, update=False):
if self.remote_source:
return None
if hasattr(self, '_source_cache') and not update:
if self._source_cache or not create:
return self._source_cache
update_modified = (update or create) and timezone.now()
self._source_cache = models.Source.objects.get_file(
create=create, update_modified=update_modified,
storage=self.source_storage, name=self.name,
check_cache_miss=self.thumbnail_check_cache_miss)
return self._source_cache
def get_thumbnail_cache(self, thumbnail_name, create=False, update=False):
if self.remote_source:
return None
source = self.get_source_cache(create=True)
update_modified = (update or create) and timezone.now()
return models.Thumbnail.objects.get_file(
create=create, update_modified=update_modified,
storage=self.thumbnail_storage, source=source, name=thumbnail_name,
check_cache_miss=self.thumbnail_check_cache_miss)
def open(self, mode=None):
if self.closed:
mode = mode or getattr(self, 'mode', None) or 'rb'
self.file = self.source_storage.open(self.name, mode)
else:
self.seek(0)
# open() doesn't alter the file's contents, but it does reset the pointer.
open.alters_data = True
class ThumbnailerFieldFile(FieldFile, Thumbnailer):
"""
A field file which provides some methods for generating (and returning)
thumbnail images.
"""
def __init__(self, *args, **kwargs):
super(ThumbnailerFieldFile, self).__init__(*args, **kwargs)
self.source_storage = self.field.storage
thumbnail_storage = getattr(self.field, 'thumbnail_storage', None)
if thumbnail_storage:
self.thumbnail_storage = thumbnail_storage
self.alias_target = self
def save(self, name, content, *args, **kwargs):
"""
Save the file, also saving a reference to the thumbnail cache Source
model.
"""
super(ThumbnailerFieldFile, self).save(name, content, *args, **kwargs)
self.get_source_cache(create=True, update=True)
def delete(self, *args, **kwargs):
"""
Delete the image, along with any generated thumbnails.
"""
source_cache = self.get_source_cache()
# First, delete any related thumbnails.
self.delete_thumbnails(source_cache)
# Next, delete the source image.
super(ThumbnailerFieldFile, self).delete(*args, **kwargs)
# Finally, delete the source cache entry.
if source_cache:
source_cache.delete()
delete.alters_data = True
def delete_thumbnails(self, source_cache=None):
"""
Delete any thumbnails generated from the source image.
:arg source_cache: An optional argument only used for optimisation
where the source cache instance is already known.
:returns: The number of files deleted.
"""
source_cache = self.get_source_cache()
deleted = 0
if source_cache:
thumbnail_storage_hash = utils.get_storage_hash(
self.thumbnail_storage)
for thumbnail_cache in source_cache.thumbnails.all():
# Only attempt to delete the file if it was stored using the
# same storage as is currently used.
if thumbnail_cache.storage_hash == thumbnail_storage_hash:
self.thumbnail_storage.delete(thumbnail_cache.name)
# Delete the cache thumbnail instance too.
thumbnail_cache.delete()
deleted += 1
return deleted
delete_thumbnails.alters_data = True
def get_thumbnails(self, *args, **kwargs):
"""
Return an iterator which returns ThumbnailFile instances.
"""
# First, delete any related thumbnails.
source_cache = self.get_source_cache()
if source_cache:
thumbnail_storage_hash = utils.get_storage_hash(
self.thumbnail_storage)
for thumbnail_cache in source_cache.thumbnails.all():
# Only iterate files which are stored using the current
# thumbnail storage.
if thumbnail_cache.storage_hash == thumbnail_storage_hash:
yield ThumbnailFile(name=thumbnail_cache.name,
storage=self.thumbnail_storage)
class ThumbnailerImageFieldFile(ImageFieldFile, ThumbnailerFieldFile):
"""
A field file which provides some methods for generating (and returning)
thumbnail images.
"""
def save(self, name, content, *args, **kwargs):
"""
Save the image.
The image will be resized down using a ``ThumbnailField`` if
``resize_source`` (a dictionary of thumbnail options) is provided by
the field.
"""
options = getattr(self.field, 'resize_source', None)
if options:
if 'quality' not in options:
options['quality'] = self.thumbnail_quality
content = Thumbnailer(content, name).generate_thumbnail(options)
# If the generated extension differs from the original, use it
# instead.
orig_name, ext = os.path.splitext(name)
generated_ext = os.path.splitext(content.name)[1]
if generated_ext.lower() != ext.lower():
name = orig_name + generated_ext
super(ThumbnailerImageFieldFile, self).save(name, content, *args,
**kwargs)
| {
"content_hash": "6c7b121d0812d4c8ff6c89f4a2e98ce0",
"timestamp": "",
"source": "github",
"line_count": 753,
"max_line_length": 79,
"avg_line_length": 38.45683930942895,
"alnum_prop": 0.6095379515159887,
"repo_name": "sandow-digital/easy-thumbnails-cropman",
"id": "796527962b8ccf7c380b598f7f7f4b59764256d8",
"size": "28958",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "easy_thumbnails/files.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "257367"
},
{
"name": "Shell",
"bytes": "2975"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os
import shutil
import nox
LOCAL_DEPS = (os.path.join("..", "api_core"), os.path.join("..", "core"))
@nox.session(python="3.7")
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", "black", *LOCAL_DEPS)
session.run(
"black",
"--check",
"google",
"tests",
"docs",
)
session.run("flake8", "google", "tests")
@nox.session(python="3.6")
def blacken(session):
"""Run black.
Format code to uniform standard.
This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
That run uses an image that doesn't have 3.6 installed. Before updating this
check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
"""
session.install("black")
session.run(
"black",
"google",
"tests",
"docs",
)
@nox.session(python="3.7")
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
session.install("mock", "pytest", "pytest-cov")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", ".")
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
"--cov=google.cloud",
"--cov=tests.unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=97",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=["2.7", "3.5", "3.6", "3.7"])
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=["2.7", "3.7"])
def system(session):
"""Run the system test suite."""
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Sanity check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", "../test_utils/")
session.install("-e", ".")
# Run py.test against the system tests.
if system_test_exists:
session.run("py.test", "--quiet", system_test_path, *session.posargs)
if system_test_folder_exists:
session.run("py.test", "--quiet", system_test_folder_path, *session.posargs)
@nox.session(python="3.7")
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python="3.7")
def docs(session):
"""Build the docs for this library."""
session.install('-e', '.')
session.install('sphinx', 'alabaster', 'recommonmark')
shutil.rmtree(os.path.join('docs', '_build'), ignore_errors=True)
session.run(
'sphinx-build',
'-W', # warnings as errors
'-T', # show full traceback on exception
'-N', # no colors
'-b', 'html',
'-d', os.path.join('docs', '_build', 'doctrees', ''),
os.path.join('docs', ''),
os.path.join('docs', '_build', 'html', ''),
)
| {
"content_hash": "4de065bbffef44d93cd81da0e6470a5f",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 84,
"avg_line_length": 30.07638888888889,
"alnum_prop": 0.612791503117063,
"repo_name": "tswast/google-cloud-python",
"id": "0f528b7f3902a3125b02718b92dd0ccdb51edc45",
"size": "4933",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "trace/noxfile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "33785371"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
import mock
from rdomanager_oscplugin.tests.v1.overcloud_node import fakes
from rdomanager_oscplugin.v1 import overcloud_node
class TestDeleteNode(fakes.TestDeleteNode):
def setUp(self):
super(TestDeleteNode, self).setUp()
# Get the command object to test
self.cmd = overcloud_node.DeleteNode(self.app, None)
# TODO(someone): This test does not pass with autospec=True, it should
# probably be fixed so that it can pass with that.
@mock.patch('tripleo_common.scale.ScaleManager')
def test_node_delete(self, scale_manager):
argslist = ['instance1', 'instance2', '--plan', 'overcloud',
'--stack', 'overcloud']
verifylist = [
('plan', 'overcloud'),
('stack', 'overcloud'),
('nodes', ['instance1', 'instance2'])
]
parsed_args = self.check_parser(self.cmd, argslist, verifylist)
self.cmd.take_action(parsed_args)
scale_manager.scaledown(parsed_args.nodes)
scale_manager.scaledown.assert_called_once_with(['instance1',
'instance2'])
| {
"content_hash": "290e9353022891cb5a2483fcf5332025",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 74,
"avg_line_length": 38.2,
"alnum_prop": 0.6116928446771379,
"repo_name": "rdo-management/python-rdomanager-oscplugin",
"id": "5433a0a62dd611be0b2fe96b90a962085da3fbb1",
"size": "1747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdomanager_oscplugin/tests/v1/overcloud_node/test_overcloud_node.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "280242"
}
],
"symlink_target": ""
} |
import requests
__author__ = 'Lei Yu'
# connection wrapper
class Error(Exception):
def __init__(self,message):
self.message=message
def __str__(self):
return repr(self.message)
class Connection:
def __init__(self,host="api.coursera.org/api",version="catalog.v1"):
self.host=host
self.version=version
def get(self,path):
return self._do_call(path,"GET")
def _do_call(self,path,method):
url='https://{0}/{1}{2}'.format(self.host,self.version,path)
response=requests.request(method,url)
if(response.status_code==requests.codes.ok):
json_body=response.json()
return json_body
raise Error(response.json())
| {
"content_hash": "e151bc31e4843c1b9efd250bb2a7b578",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 72,
"avg_line_length": 17.72093023255814,
"alnum_prop": 0.5826771653543307,
"repo_name": "leiyu123/coursera_rest_client",
"id": "ab103b639e9bde1f944acc22c2a786112da0c54d",
"size": "762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coursera_rest_client/connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14934"
}
],
"symlink_target": ""
} |
import abc
class FwaasDriverBase(object, metaclass=abc.ABCMeta):
"""Firewall as a Service Driver base class.
Using FwaasDriver Class, an instance of L3 perimeter Firewall
can be created. The firewall co-exists with the L3 agent.
One instance is created for each tenant. One firewall policy
is associated with each tenant (in the Havana release).
The Firewall can be visualized as having two zones (in Havana
release), trusted and untrusted.
All the 'internal' interfaces of Neutron Router is treated as trusted. The
interface connected to 'external network' is treated as untrusted.
The policy is applied on traffic ingressing/egressing interfaces on
the trusted zone. This implies that policy will be applied for traffic
passing from
- trusted to untrusted zones
- untrusted to trusted zones
- trusted to trusted zones
Policy WILL NOT be applied for traffic from untrusted to untrusted zones.
This is not a problem in Havana release as there is only one interface
connected to external network.
Since the policy is applied on the internal interfaces, the traffic
will be not be NATed to floating IP. For incoming traffic, the
traffic will get NATed to internal IP address before it hits
the firewall rules. So, while writing the rules, care should be
taken if using rules based on floating IP.
The firewall rule addition/deletion/insertion/update are done by the
management console. When the policy is sent to the driver, the complete
policy is sent and the whole policy has to be applied atomically. The
firewall rules will not get updated individually. This is to avoid problems
related to out-of-order notifications or inconsistent behaviour by partial
application of rules. Argument agent_mode indicates the l3 agent in DVR or
DVR_SNAT or LEGACY mode.
"""
# TODO(Margaret): Remove the first 3 methods and make the second three
# @abc.abstractmethod
def create_firewall(self, agent_mode, apply_list, firewall):
"""Create the Firewall with default (drop all) policy.
The default policy will be applied on all the interfaces of
trusted zone.
"""
pass
def delete_firewall(self, agent_mode, apply_list, firewall):
"""Delete firewall.
Removes all policies created by this instance and frees up
all the resources.
"""
pass
def update_firewall(self, agent_mode, apply_list, firewall):
"""Apply the policy on all trusted interfaces.
Remove previous policy and apply the new policy on all trusted
interfaces.
"""
pass
def create_firewall_group(self, agent_mode, apply_list, firewall):
"""Create the Firewall with default (drop all) policy.
The default policy will be applied on all the interfaces of
trusted zone.
"""
pass
def delete_firewall_group(self, agent_mode, apply_list, firewall):
"""Delete firewall.
Removes all policies created by this instance and frees up
all the resources.
"""
pass
def update_firewall_group(self, agent_mode, apply_list, firewall):
"""Apply the policy on all trusted interfaces.
Remove previous policy and apply the new policy on all trusted
interfaces.
"""
pass
@abc.abstractmethod
def apply_default_policy(self, agent_mode, apply_list, firewall):
"""Apply the default policy on all trusted interfaces.
Remove current policy and apply the default policy on all trusted
interfaces.
"""
pass
| {
"content_hash": "54f77130fcfe376623dbcb9d29462983",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 79,
"avg_line_length": 36.13725490196079,
"alnum_prop": 0.6923494302767227,
"repo_name": "openstack/neutron-fwaas",
"id": "afdb7d15dff2d2bf710334fa664166b3d648b1ff",
"size": "4311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron_fwaas/services/firewall/service_drivers/agents/drivers/fwaas_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1053"
},
{
"name": "Python",
"bytes": "921570"
},
{
"name": "Shell",
"bytes": "21966"
}
],
"symlink_target": ""
} |
import os
import config
import web
import app.controllers
urls = (
# front page
'/', 'app.controllers.home.index',
'/hello/', 'app.controllers.hello.index',
)
app = web.application(urls, globals())
def notfound():
render = web.template.render(config.global_template_path, base='layout', cache=config.get_cache_config(), globals=globals())
return web.notfound(render.notfound())
def internalerror():
render = web.template.render(config.global_template_path, base='layout', cache=config.get_cache_config(), globals=globals())
return web.notfound(render.internalerror())
# You can change this to run only under the else condition (above app.wsgifunc()) to show as it's in prod.
app.notfound = notfound
app.internalerror = internalerror
if __name__ == "__main__":
app.run()
else:
# move app.notfound and app.internalerror here if you want to show them only in production envs
current_dir = os.path.dirname(__file__)
session = web.session.Session(app, web.session.DiskStore(os.path.join(current_dir, 'sessions')), )
application = app.wsgifunc() | {
"content_hash": "850bfabbb516908a8b7268e1a1332321",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 128,
"avg_line_length": 31.314285714285713,
"alnum_prop": 0.7071167883211679,
"repo_name": "neuweb/mvc-webpy",
"id": "a97b2131ffa4e62d492f6f5c46cd8414d6b6c8e1",
"size": "1145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3998"
}
],
"symlink_target": ""
} |
import time
from openerp.osv import fields, osv
from openerp.osv.orm import browse_record, browse_null
from openerp.tools.translate import _
class purchase_requisition_partner(osv.osv_memory):
_name = "purchase.requisition.partner"
_description = "Purchase Requisition Partner"
_columns = {
'partner_id': fields.many2one('res.partner', 'Supplier', required=True,domain=[('supplier', '=', True)]),
}
def view_init(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
res = super(purchase_requisition_partner, self).view_init(cr, uid, fields_list, context=context)
record_id = context and context.get('active_id', False) or False
tender = self.pool.get('purchase.requisition').browse(cr, uid, record_id, context=context)
if not tender.line_ids:
raise osv.except_osv(_('Error!'), _('No Product in Tender.'))
return res
def create_order(self, cr, uid, ids, context=None):
active_ids = context and context.get('active_ids', [])
data = self.browse(cr, uid, ids, context=context)[0]
self.pool.get('purchase.requisition').make_purchase_order(cr, uid, active_ids, data.partner_id.id, context=context)
return {'type': 'ir.actions.act_window_close'}
purchase_requisition_partner()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| {
"content_hash": "c0592eb2b30afdc67f388e560f8d21b8",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 123,
"avg_line_length": 43.8125,
"alnum_prop": 0.6697574893009985,
"repo_name": "chjw8016/GreenOdoo7-haibao",
"id": "58a72556b221925ed3dfa87cb79a89e316b347ee",
"size": "2381",
"binary": false,
"copies": "49",
"ref": "refs/heads/master",
"path": "openerp/addons/purchase_requisition/wizard/purchase_requisition_partner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "90846"
},
{
"name": "CSS",
"bytes": "384369"
},
{
"name": "JavaScript",
"bytes": "1730589"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "9394626"
},
{
"name": "Shell",
"bytes": "5172"
},
{
"name": "XSLT",
"bytes": "156761"
}
],
"symlink_target": ""
} |
import re
import json
import dateutil
import logging
import urllib3
import collections
from bson import ObjectId
from urllib.parse import urlparse, parse_qs, parse_qsl
from datetime import date, datetime
import requests
from functools import partial
from time import time, sleep
from slovar.strings import split_strip, str2dt, str2rdt
from slovar.utils import maybe_dotted
from slovar import slovar
from prf.utils.errors import DValueError, DKeyError
log = logging.getLogger(__name__)
OPERATORS = ['ne', 'lt', 'lte', 'gt', 'gte', 'in', 'all',
'startswith', 'exists', 'range', 'geobb', 'size']
class Params(slovar):
'Subclass of slovar that will raise D* exceptions'
def __init__(self, *arg, **kw):
super().__init__(*arg, **kw)
def bad_value_error_klass(self, e):
return DValueError(e)
def missing_key_error_klass(self, e):
return DKeyError(e)
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime, date)):
return obj.isoformat().split(".")[0]
try:
return super(JSONEncoder, self).default(obj)
except TypeError:
return str(obj) # fallback to unicode
def json_dumps(body):
return json.dumps(body, cls=JSONEncoder)
def process_limit(start, page, limit):
try:
limit = int(limit)
if start is not None and page is not None:
raise DValueError('Can not specify _start and _page at the same time')
if start is not None:
start = int(start)
elif page is not None and limit > 0:
start = int(page) * limit
else:
start = 0
if limit < -1 or start < 0:
raise DValueError('_limit/_page or _limit/_start can not be < 0')
except (ValueError, TypeError) as e:
raise DValueError(e)
except Exception as e: #pragma nocover
raise DValueError('Bad _limit param: %s ' % e)
return start, limit
def snake2camel(text):
'''turn the snake case to camel case: snake_camel -> SnakeCamel'''
return ''.join([a.title() for a in text.split('_')])
def camel2snake(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def urlify(s):
s = re.sub(r"[^\w\s]", '', s)
s = re.sub(r"\s+", '_', s)
return s
def process_key(key, suffix=''):
_key, div, op = key.rpartition('__')
if div and op in OPERATORS:
key = _key
key = key.replace('__', '.')
return ('%s.%s' % (key, suffix) if suffix else key), (op if op in OPERATORS else '')
def parse_specials(orig_params):
specials = Params(
_sort=None,
_fields=None,
_count=None,
_start=None,
_limit=None,
_page=None,
_end=None,
_frequencies=None,
_group=None,
_distinct=None,
_scalar=None,
_flat=None,
_flat_sep=None,
_flat_keep_lists=None,
_join=None,
_unwind=None,
_meta=None,
_tr=None
)
def short(name):
_n = name[:2]
if _n in params:
return _n
else:
return name
params = orig_params.copy()
specials._sort = params.aslist(short('_sort'), default=[], pop=True)
specials._fields = params.aslist(short('_fields'), default=[], pop=True)
specials._flat = params.aslist(short('_flat'), default=[], pop=True)
specials._group = params.aslist(short('_group'), default=[], pop=True)
specials._count = short('_count') in params; params.pop(short('_count'), False)
params.asint('_start', allow_missing=True)
params.asint('_page', allow_missing=True)
params.asint('_limit', allow_missing=True)
if not specials._count and params.get('_limit'):
specials._start, specials._limit = process_limit(
params.pop('_start', None),
params.pop('_page', None),
params.asint(short('_limit'), pop=True))
specials._end = specials._start+specials._limit\
if specials._limit > -1 else None
specials._flat_keep_lists = params.asbool('_flat_keep_lists', default=False)
specials._flat_sep = params.asstr('_flat_sep', default='.')
specials._asdict = params.pop('_asdict', False)
specials._pop_empty = params.pop('_pop_empty', False)
for each in list(params.keys()):
if each.startswith('_'):
specials[each] = params.pop(each)
if '.' in each and each in params:
params[each.replace('.', '__')] = params.pop(each)
params = typecast(params)
#deduce fields from the query
if 'AUTO' in specials._fields:
for kk in params:
fld, _ = process_key(kk)
specials._fields.append(fld)
specials._meta = specials.get('_meta') or slovar()
specials._tr = specials.aslist('_tr', default=[])
return params, specials
def typecast(params):
params = Params(params)
list_ops = ('in', 'nin', 'all')
int_ops = ('exists', 'size', 'max_distance', 'min_distance', 'empty')
geo_ops = ('near',)
types = ('asbool', 'asint', 'asfloat', 'asstr', 'aslist',
'asset', 'asdt', 'asobj', 'asdtob')
for key in list(params.keys()):
if params[key] == 'null':
params[key] = None
continue
parts = key.split('__')
if len(parts) <= 1:
continue
suf = []
op = ''
for ix in range(len(parts)-1, -1, -1):
part = parts[ix]
if part in list_ops+int_ops+geo_ops+types:
op = part
break
if not op:
continue
new_key = '__'.join([e for e in parts if e != op])
if op in geo_ops:
coords = params.aslist(key)
try:
coords = [float(e) for e in coords]
if len(coords) != 2:
raise ValueError
except ValueError:
raise DValueError('`near` operator takes pair of'
' numeric elements. Got `%s` instead' % coords)
params[key] = coords
continue
if op in list_ops:
new_key = key
op = 'aslist'
if op in int_ops:
new_key = key
op = 'asint'
if not op.startswith('as') and not op in types:
continue
if op == 'asobj':
params[new_key]=ObjectId(params.pop(key))
continue
try:
method = getattr(params, op)
if isinstance(method, collections.Callable):
params[new_key] = method(key, pop=True)
except (KeyError, AttributeError) as e:
raise DValueError('Unknown typecast operator `%s`' % op)
return params
def with_metaclass(meta, *bases):
"""Defines a metaclass.
Creates a dummy class with a dummy metaclass. When subclassed, the dummy
metaclass is used, which has a constructor that instantiates a
new class from the original parent. This ensures that the dummy class and
dummy metaclass are not in the inheritance tree.
Credit to Armin Ronacher.
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
def resolve_host_to(url, newhost):
'''
substitute the host in `url` with `newhost`
if newhost ends with `:` the original port will be preserved.
'''
elements = urlparse(url)
_, _, port = elements.netloc.partition(':')
newhost,newcol,newport=newhost.partition(':')
if newcol:
if not newport:
newport = port
else:
newport = ''
if newport:
newhost = '%s:%s' % (newhost, newport)
return elements._replace(netloc=newhost).geturl()
def sanitize_url(url, to_remove=None):
if not to_remove:
return urlparse(url)._replace(query='').geturl()
if isinstance(to_remove, str):
to_remove = [to_remove]
elements = urlparse(url)
qs_dict = parse_qs(elements.query)
for rm in to_remove:
qs_dict.pop(rm, None)
return elements._replace(
query=urlencode(qs_dict, True)).geturl()
def to_dunders(d, only=None):
new_d = slovar()
for key in d:
if only and key not in only:
continue
if '__' not in key:
new_d['set__%s'%key.replace('.', '__')] = d[key]
else:
new_d[key] = d[key]
return new_d
def validate_url(url, method='GET'):
from requests import Session, Request
try:
return Session().send(Request(method, url).prepare()).status_code
except Exception:
raise DValueError('URL not reachable `%s`' % url)
def is_url(text, validate=False):
if text.startswith('http'):
if validate:
return validate_url(text)
else:
return True
return False
def chunks(_list, chunk_size):
for ix in range(0, len(_list), chunk_size):
yield _list[ix:ix+chunk_size]
def encoded_dict(in_dict):
out_dict = {}
for k, v in list(in_dict.items()):
if isinstance(v, dict):
out_dict[k] = encoded_dict(v)
elif isinstance(v, list):
for ix in range(len(v)):
v[ix] = str(v[ix]).encode('utf-8')
out_dict[k] = v
else:
out_dict[k] = str(v).encode('utf-8')
return out_dict
def urlencode(query, doseq=False):
import urllib.request, urllib.parse, urllib.error
try:
return urllib.parse.urlencode(encoded_dict(query), doseq)
except UnicodeEncodeError as e:
log.error(e)
def pager(start, page, total):
def _pager(start,page,total):
if total != -1:
for each in chunks(list(range(0, total)), page):
_page = len(each)
yield (start, _page)
start += _page
else:
while 1:
yield (start, page)
start += page
return partial(_pager, start, page, total)
def cleanup_url(url, _raise=True):
if not url:
if _raise:
raise DValueError('bad url `%s`' % url)
return ''
try:
parsed = urllib3.util.parse_url(url)
except Exception as e:
if _raise:
raise e
return ''
host = parsed.host
if not host:
if _raise:
raise DValueError('missing host in %s' % url)
else:
return ''
if host.startswith('www.'):
host = host[4:]
path = (parsed.path or '').strip('/')
return ('%s/%s' % (host, path)).strip('/')
def ld2dl(ld):
dl = {}
for _d in ld:
for kk,vv in _d.items():
if kk in dl:
dl[kk].append(vv)
else:
dl[kk] = [vv]
return dl
def dl2ld(dl):
"dict of lists to list of dicts"
return [{key:value[index] for key, value in list(dl.items())}
for index in range(len(list(dl.values())[0]))]
def ld2dd(ld, key):
'list of dicts to dict of dicts'
return {each[key]:each for each in ld}
def d2inv(d, value_as_list=True):
inv_dict = {}
for kk,vv in d.items():
if value_as_list:
inv_dict[vv] = inv_dict.get(vv, [])
inv_dict[vv].append(kk)
elif vv not in inv_dict:
inv_dict[vv] = kk
return inv_dict
def qs2dict(qs):
from urllib.parse import parse_qsl
return slovar(parse_qsl(qs,keep_blank_values=True))
def TODAY(sep='_'):
return datetime.utcnow().strftime(sep.join(['%Y', '%m', '%d']))
def NOW(sep=None):
dnow = datetime.utcnow()
if sep:
return dnow.strftime(sep.join(['%Y', '%m', '%dT%H', '%M', '%S']))
return dnow.strftime('%Y-%m-%dT%H:%M:%S')
def raise_or_log(_raise=False):
if _raise:
import sys
_type, value, _ = sys.exc_info()
raise _type(value)
else:
import traceback
traceback.print_exc()
def join(objects, joinee_itor, join_on, require_match=True, join_ns=None,
join_unwind=True, join_params=None):
'''
require_match = False is equivalent to SQL left join
require_match = True is equivalent to SQL left inner join
'''
join_params = slovar(join_params or {}).flat()
for each in objects:
_d1 = each.to_dict()
join_params.update(_d1.extract(join_on).flat())
if not join_params:
if require_match:
log.warning('empty query for cls2')
continue
else:
yield _d1
continue
join_params.setdefault('_limit', 1)
matched = joinee_itor(**join_params)
if not matched:
if require_match:
continue
elif not join_ns:
yield _d1
continue
else:
matched = [slovar()] #attach empty slovar to results as join_ns
if not join_unwind:
joinee_list = [it.to_dict(join_params.get('_fields', [])) for it in matched]
_d1[join_ns or 'joinee'] = joinee_list
yield _d1
else:
for it in matched:
_d2 = it.to_dict(join_params.get('_fields', []))
if join_ns:
_d2 = slovar({join_ns:_d2})
yield _d1.update_with(_d2)
def rextract(expr, data, delim, _raise=True):
for fld in expr.split(delim)[1::2]:
_d = data.extract(fld)
if _d:
val = typecast(_d.flat())[fld]
if val:
expr = expr.replace('#%s#'%fld, val)
else:
msg = 'missing fields or value None in data.\nfields: %s\ndata keys: %s' % (fld, data.keys())
if _raise:
raise ValueError(msg)
else:
log.warning(msg)
return expr
def get_dt_unique_name(name='', add_seconds=True, only_seconds=False):
now = datetime.utcnow()
seconds_since_midnight = int((now - now.replace(hour=0, minute=0, second=0, microsecond=0))\
.total_seconds())
if only_seconds:
return '%s_%s' % (name, seconds_since_midnight)
if name:
uname = '%s_%s' % (TODAY(), name)
else:
uname = TODAY()
if add_seconds:
uname += '_%s' % seconds_since_midnight
return uname
class Throttler:
def __init__(self, max_counter, period):
self.max_counter = max_counter
self.period = period
self.reset()
def reset(self):
self.stime = time()
self.counter = 0
def pause(self):
time_past = time() - self.stime
sleep_for = self.period-time_past
log.debug('THROTTLE: sleep for %s, with %s' % (sleep_for, self.__dict__))
sleep(sleep_for)
self.reset()
def __call__(self):
self.counter += 1
if time() - self.stime <= self.period and self.counter >= self.max_counter:
self.pause()
def dict2tab(data, fields=None, format_='csv', skip_headers=False):
import tablib
def render(each, key):
val = each.get(key)
if isinstance(val, (datetime, date)):
val = val.strftime('%Y-%m-%dT%H:%M:%SZ') # iso
elif isinstance(val, (list, tuple)):
val = json.dumps(val)
if val is None:
val = ''
return val
data = data or []
if not data:
return None
headers = []
if fields:
for each in split_strip(fields):
aa, _, bb = each.partition('__as__')
name = (bb or aa).split(':')[0]
headers.append(name)
else:
#get the headers from the first item in the data.
#Note, data IS schemaless, so other items could have different fields.
headers = sorted(list(data[0].flat(keep_lists=1).keys()))
log.warn('Default headers take from the first item: %s', headers)
tabdata = tablib.Dataset(headers=None if skip_headers else headers)
try:
for each in data:
each = each.extract(headers).flat(keep_lists=1)
row = []
for col in headers:
row.append(render(each, col))
tabdata.append(row)
return getattr(tabdata, format_)
except Exception as e:
log.error('Headers:%s, Format:%s\nData:%s',
tabdata.headers, format_, each)
raise prf_exc.HTTPBadRequest('dict2tab error: %r' % e)
class TabRenderer(object):
def __init__(self, info):
pass
def __call__(self, value, system):
request = system.get('request')
response = request.response
_, specials = system['view'](None, request).process_params(request)
if 'text/csv' in request.accept:
response.content_type = 'text/csv'
_format = 'csv'
elif 'text/xls' in request.accept:
_format = 'xls'
else:
raise prf_exc.HTTPBadRequest(
'Unsupported Accept Header `%s`' % request.accept)
return dict2tab(value.get('data', []),
fields=specials.aslist('_csv_fields', default=[]),
format_=_format)
| {
"content_hash": "baa38fa6c51658a13a1a912b149b2413",
"timestamp": "",
"source": "github",
"line_count": 651,
"max_line_length": 105,
"avg_line_length": 27.043010752688172,
"alnum_prop": 0.5413234876455553,
"repo_name": "vahana/prf",
"id": "6282ea7fbb0af6689b777f13dbe251b583b64d46",
"size": "17605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prf/utils/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "167421"
}
],
"symlink_target": ""
} |
import zlib
import cPickle
import sqlite3
try:
from cyordereddict import OrderedDict
except:
from collections import OrderedDict
def pack_blob(obj):
return sqlite3.Binary(zdumps(obj))
def unpack_genotype_blob(blob):
return cPickle.loads(zlib.decompress(blob))
def unpack_ordereddict_blob(blob):
blob_val = cPickle.loads(zlib.decompress(blob))
if blob_val is not None:
return OrderedDict(blob_val)
return None
def zdumps(obj):
return zlib.compress(cPickle.dumps(obj, cPickle.HIGHEST_PROTOCOL), 9)
def zloads(obj):
return cPickle.loads(zlib.decompress(obj))
| {
"content_hash": "ad930efcd0071e2e3d9e277df6fc2bcd",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 73,
"avg_line_length": 22.48148148148148,
"alnum_prop": 0.7347611202635914,
"repo_name": "bpow/gemini",
"id": "63dbf34f94b2f5a84e8f9ce89d0ea1760c2f1c74",
"size": "607",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gemini/compression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "130"
},
{
"name": "HTML",
"bytes": "54411"
},
{
"name": "Perl",
"bytes": "5684"
},
{
"name": "Python",
"bytes": "575290"
},
{
"name": "Shell",
"bytes": "286535"
}
],
"symlink_target": ""
} |
import collections
import six
import unittest2 as unittest
from fabricio import docker, utils
class OptionsTestCase(unittest.TestCase):
def test_str_version(self):
cases = dict(
empty_options_list=dict(
options=collections.OrderedDict(),
expected_str_version='',
),
with_underscore=dict(
options=collections.OrderedDict(foo_baz='bar'),
expected_str_version='--foo_baz=bar',
),
multiword=dict(
options=collections.OrderedDict(foo='bar baz'),
expected_str_version="--foo='bar baz'",
),
empty=dict(
options=collections.OrderedDict(foo=''),
expected_str_version="--foo=''",
),
str=dict(
options=collections.OrderedDict(foo='bar'),
expected_str_version='--foo=bar',
),
unicode=dict(
options=collections.OrderedDict(foo=u'привет'),
expected_str_version=u"--foo='привет'",
),
integer=dict(
options=collections.OrderedDict(foo=42),
expected_str_version='--foo=42',
),
integer_zero=dict(
options=collections.OrderedDict(foo=0),
expected_str_version='--foo=0',
),
integer_one=dict(
options=collections.OrderedDict(foo=1),
expected_str_version='--foo=1',
),
integer_minus_one=dict(
options=collections.OrderedDict(foo=-1),
expected_str_version='--foo=-1',
),
image=dict(
options=collections.OrderedDict(image=docker.Image('image:tag')),
expected_str_version='--image=image:tag',
),
triple_length=dict(
options=collections.OrderedDict([
('foo', 'foo'),
('bar', 'bar'),
('baz', 'baz'),
]),
expected_str_version='--foo=foo --bar=bar --baz=baz',
),
multi_value_empty=dict(
options=collections.OrderedDict(foo=[]),
expected_str_version='',
),
multi_value=dict(
options=collections.OrderedDict(foo=['bar', 'baz']),
expected_str_version='--foo=bar --foo=baz',
),
multi_value_integer=dict(
options=collections.OrderedDict(foo=[42, 43]),
expected_str_version='--foo=42 --foo=43',
),
boolean_values=dict(
options=collections.OrderedDict(foo=True, bar=False),
expected_str_version='--foo',
),
mix=dict(
options=collections.OrderedDict([
('foo', 'foo'),
('bar', True),
('baz', ['1', 'a']),
]),
expected_str_version='--foo=foo --bar --baz=1 --baz=a',
),
)
for case, params in cases.items():
with self.subTest(case=case):
options = utils.Options(params['options'])
expected_str_version = params['expected_str_version']
self.assertEqual(expected_str_version, six.text_type(options))
| {
"content_hash": "6558588923e1464d5f2163812333df28",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 81,
"avg_line_length": 36.744680851063826,
"alnum_prop": 0.47278517660683267,
"repo_name": "renskiy/fabricio",
"id": "b5ebdd3797ed9725c9b55e053c59bab3a63d7f15",
"size": "3482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "582330"
}
],
"symlink_target": ""
} |
"""The fsl module provides classes for interfacing with the `FSL
<http://www.fmrib.ox.ac.uk/fsl/index.html>`_ command line tools. This
was written to work with FSL version 4.1.4.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
import os,shutil
import warnings
from nipype.interfaces.fsl.base import FSLCommand, FSLCommandInputSpec
from nipype.interfaces.base import Bunch, TraitedSpec, isdefined, File,Directory,\
InputMultiPath
import enthought.traits.api as traits
warn = warnings.warn
warnings.filterwarnings('always', category=UserWarning)
class DTIFitInputSpec(FSLCommandInputSpec):
dwi = File(exists=True, desc = 'diffusion weighted image data file',
argstr='-k %s', position=0, mandatory=True)
base_name = traits.Str("dtifit_", desc = 'base_name that all output files will start with',
argstr='-o %s', position=1, usedefault=True)
mask = File(exists=True, desc = 'bet binary mask file',
argstr='-m %s', position=2, mandatory=True)
bvecs = File(exists=True, desc = 'b vectors file',
argstr='-r %s', position=3, mandatory=True)
bvals = File(exists=True,desc = 'b values file',
argstr='-b %s', position=4, mandatory=True)
min_z = traits.Int(argstr='-z %d', desc='min z')
max_z = traits.Int(argstr='-Z %d', desc='max z')
min_y = traits.Int(argstr='-y %d', desc='min y')
max_y = traits.Int(argstr='-Y %d', desc='max y')
min_x = traits.Int(argstr='-x %d', desc='min x')
max_x = traits.Int(argstr='-X %d', desc='max x')
save = traits.Bool(desc = 'save the elements of the tensor',
argstr='--save_tensor')
sse = traits.Bool(desc = 'output sum of squared errors', argstr='--sse')
cni = File(exists=True, desc = 'input counfound regressors', argstr='-cni %s')
little_bit = traits.Bool(desc = 'only process small area of brain',
argstr='--littlebit')
class DTIFitOutputSpec(TraitedSpec):
V1 = File(exists = True, desc = 'path/name of file with the 1st eigenvector')
V2 = File(exists = True, desc = 'path/name of file with the 2nd eigenvector')
V3 = File(exists = True, desc = 'path/name of file with the 3rd eigenvector')
L1 = File(exists = True, desc = 'path/name of file with the 1st eigenvalue')
L2 = File(exists = True, desc = 'path/name of file with the 2nd eigenvalue')
L3 = File(exists = True, desc = 'path/name of file with the 3rd eigenvalue')
MD = File(exists = True, desc = 'path/name of file with the mean diffusivity')
FA = File(exists = True, desc = 'path/name of file with the fractional anisotropy')
S0 = File(exists = True, desc = 'path/name of file with the raw T2 signal with no '+
'diffusion weighting')
class DTIFit(FSLCommand):
""" Use FSL dtifit command for fitting a diffusion tensor model at each
voxel
Example
-------
>>> from nipype.interfaces import fsl
>>> dti = fsl.DTIFit()
>>> dti.inputs.dwi = 'diffusion.nii'
>>> dti.inputs.bvecs = 'bvecs'
>>> dti.inputs.bvals = 'bvals'
>>> dti.inputs.base_name = 'TP'
>>> dti.inputs.mask = 'mask.nii'
>>> dti.cmdline
'dtifit -k diffusion.nii -o TP -m mask.nii -r bvecs -b bvals'
"""
_cmd = 'dtifit'
input_spec = DTIFitInputSpec
output_spec = DTIFitOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
for k in outputs.keys():
if k not in ('outputtype','environ','args'):
outputs[k] = self._gen_fname(self.inputs.base_name,suffix = '_'+k)
return outputs
class EddyCorrectInputSpec(FSLCommandInputSpec):
in_file = File(exists=True,desc = '4D input file',argstr='%s', position=0, mandatory=True)
out_file = File(desc = '4D output file',argstr='%s', position=1, genfile=True)
ref_num = traits.Int(argstr='%d', position=2, desc='reference number',mandatory=True)
class EddyCorrectOutputSpec(TraitedSpec):
eddy_corrected = File(exists=True, desc='path/name of 4D eddy corrected output file')
class EddyCorrect(FSLCommand):
""" Use FSL eddy_correct command for correction of eddy current distortion
Example
-------
>>> from nipype.interfaces import fsl
>>> eddyc = fsl.EddyCorrect(in_file='diffusion.nii',out_file="diffusion_edc.nii", ref_num=0)
>>> eddyc.cmdline
'eddy_correct diffusion.nii diffusion_edc.nii 0'
"""
_cmd = 'eddy_correct'
input_spec = EddyCorrectInputSpec
output_spec = EddyCorrectOutputSpec
def _run_interface(self, runtime):
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self._gen_fname(self.inputs.in_file,suffix = '_edc')
runtime = super(EddyCorrect, self)._run_interface(runtime)
if runtime.stderr:
runtime.returncode = 1
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['eddy_corrected'] = self.inputs.out_file
if not isdefined(outputs['eddy_corrected']):
outputs['eddy_corrected'] = self._gen_fname(self.inputs.in_file,suffix = '_edc')
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._list_outputs()['eddy_corrected']
else:
return None
class BEDPOSTXInputSpec(FSLCommandInputSpec):
dwi = File(exists=True, desc = 'diffusion weighted image data file',mandatory=True)
mask = File(exists=True, desc = 'bet binary mask file',mandatory=True)
bvecs = File(exists=True, desc = 'b vectors file',mandatory=True)
bvals = File(exists=True,desc = 'b values file',mandatory=True)
bpx_directory = Directory('bedpostx',argstr='%s',usedefault=True,
desc='the name for this subject''s bedpostx folder')
fibres = traits.Int(1,argstr='-n %d', desc='number of fibres per voxel')
weight = traits.Float(1.00,argstr='-w %.2f', desc='ARD weight, more weight means less'+
' secondary fibres per voxel')
burn_period = traits.Int(1000,argstr='-b %d', desc='burnin period')
jumps = traits.Int(1250,argstr='-j %d', desc='number of jumps')
sampling = traits.Int(25,argstr='-s %d', desc='sample every')
class BEDPOSTXOutputSpec(TraitedSpec):
bpx_out_directory = Directory(exists=True, field='dir', desc = 'path/name of directory with all '+
'bedpostx output files for this subject')
xfms_directory = Directory(exists=True, field='dir', desc = 'path/name of directory with the '+
'tranformation matrices')
merged_thsamples = traits.List(File, exists=True,
desc='a list of path/name of 4D volume with samples from the distribution on theta')
merged_phsamples = traits.List(File, exists=True,
desc='a list of path/name of file with samples from the distribution on phi')
merged_fsamples = traits.List(File, exists=True,
desc='a list of path/name of 4D volume with samples from the distribution on'+
' anisotropic volume fraction')
mean_thsamples = traits.List(File, exists=True,
desc='a list of path/name of 3D volume with mean of distribution on theta')
mean_phsamples = traits.List(File, exists=True,
desc='a list of path/name of 3D volume with mean of distribution on phi')
mean_fsamples = traits.List(File, exists=True,
desc='a list of path/name of 3D volume with mean of distribution on f anisotropy')
dyads = traits.List(File, exists=True, desc='a list of path/name of mean of PDD distribution in vector form')
class BEDPOSTX(FSLCommand):
""" Use FSL bedpostx command for local modelling of diffusion parameters
Example
-------
>>> from nipype.interfaces import fsl
>>> bedp = fsl.BEDPOSTX(bpx_directory='subjdir', bvecs='bvecs', bvals='bvals', dwi='diffusion.nii', \
mask='mask.nii', fibres=1)
>>> bedp.cmdline
'bedpostx subjdir -n 1'
"""
_cmd = 'bedpostx'
input_spec = BEDPOSTXInputSpec
output_spec = BEDPOSTXOutputSpec
can_resume = True
def _run_interface(self, runtime):
#create the subject specific bpx_directory
bpx_directory = os.path.join(os.getcwd(),self.inputs.bpx_directory)
self.inputs.bpx_directory = bpx_directory
if not os.path.exists(bpx_directory):
os.makedirs(bpx_directory)
# copy the dwi,bvals,bvecs, and mask files to that directory
shutil.copyfile(self.inputs.mask,self._gen_fname('nodif_brain_mask',suffix='',cwd=self.inputs.bpx_directory))
shutil.copyfile(self.inputs.dwi,self._gen_fname('data',suffix='',cwd=self.inputs.bpx_directory))
shutil.copyfile(self.inputs.bvals,os.path.join(self.inputs.bpx_directory,'bvals'))
shutil.copyfile(self.inputs.bvecs,os.path.join(self.inputs.bpx_directory,'bvecs'))
return super(BEDPOSTX, self)._run_interface(runtime)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['bpx_out_directory'] = os.path.join(os.getcwd(),self.inputs.bpx_directory+'.bedpostX')
outputs['xfms_directory'] = os.path.join(os.getcwd(),self.inputs.bpx_directory+'.bedpostX','xfms')
for k in outputs.keys():
if k not in ('outputtype','environ','args','bpx_out_directory','xfms_directory'):
outputs[k]=[]
for n in range(self.inputs.fibres):
outputs['merged_thsamples'].append(self._gen_fname('merged_th'+repr(n+1)+'samples',suffix='',cwd=outputs['bpx_out_directory']))
outputs['merged_phsamples'].append(self._gen_fname('merged_ph'+repr(n+1)+'samples',suffix='',cwd=outputs['bpx_out_directory']))
outputs['merged_fsamples'].append(self._gen_fname('merged_f'+repr(n+1)+'samples',suffix='',cwd=outputs['bpx_out_directory']))
outputs['mean_thsamples'].append(self._gen_fname('mean_th'+repr(n+1)+'samples',suffix='',cwd=outputs['bpx_out_directory']))
outputs['mean_phsamples'].append(self._gen_fname('mean_ph'+repr(n+1)+'samples',suffix='',cwd=outputs['bpx_out_directory']))
outputs['mean_fsamples'].append(self._gen_fname('mean_f'+repr(n+1)+'samples',suffix='',cwd=outputs['bpx_out_directory']))
outputs['dyads'].append(self._gen_fname('dyads'+repr(n+1),suffix='',cwd=outputs['bpx_out_directory']))
return outputs
class TBSS1PreprocInputSpec(FSLCommandInputSpec):
img_list = traits.List(File(exists=True), mandatory=True,
desc = 'list with filenames of the FA images', sep = " ", argstr="%s")
class TBSS1PreprocOutputSpec(TraitedSpec):
tbss_dir = Directory(exists=True, field='dir',
desc='path/name of directory with FA images')
class TBSS1Preproc(FSLCommand):
"""XXX UNSTABLE DO NOT USE
Use FSL TBSS1Preproc for preparing your FA data in your TBSS working
directory in the right format
Example
-------
>>> import nipype.interfaces.fsl.dti as fsl
>>> tbss1 = fsl.TBSS1Preproc(img_list=['functional.nii','functional2.nii','functional3.nii'])
>>> tbss1.cmdline
'tbss_1_preproc functional.nii functional2.nii functional3.nii'
"""
_cmd = 'tbss_1_preproc'
input_spec = TBSS1PreprocInputSpec
output_spec = TBSS1PreprocOutputSpec
def _run_interface(self, runtime):
for n in self.inputs.img_list:
shutil.copyfile(n,os.path.basename(n))
runtime = super(TBSS1Preproc, self)._run_interface(runtime)
if runtime.stderr:
runtime.returncode = 1
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['tbss_dir'] = os.getcwd()
return outputs
def _format_arg(self, name, spec, value):
if name == "img_list":
new_list = [os.path.basename(fname) for fname in self.inputs.img_list]
return super(TBSS1Preproc, self)._format_arg("img_list", spec, new_list)
return super(TBSS1Preproc, self)._format_arg(name, spec, value)
class TBSS2RegInputSpec(FSLCommandInputSpec):
tbss_dir = Directory(exists=True, field='dir',
desc = 'path/name of directory containing the FA and origdata folders '+
'generated by tbss_1_preproc',
mandatory=True)
_xor_inputs = ('FMRIB58FA', 'target_img','find_target')
FMRIB58FA = traits.Bool(desc='use FMRIB58_FA_1mm as target for nonlinear registrations',
argstr='-T', xor=_xor_inputs)
target_img = traits.Str(desc='use given image as target for nonlinear registrations',
argstr='-t %s', xor=_xor_inputs)
find_target = traits.Bool(desc='find best target from all images in FA',
argstr='-n', xor=_xor_inputs)
class TBSS2RegOutputSpec(TraitedSpec):
tbss_dir = Directory(exists=True, field='dir',
desc='path/name of directory containing the FA and origdata folders '+
'generated by tbss_1_preproc')
class TBSS2Reg(FSLCommand):
""" XXX UNSTABLE DO NOT USE
Use FSL TBSS2Reg for applying nonlinear registration of all FA images
into standard space
Example
-------
>>> import nipype.interfaces.fsl.dti as fsl
>>> tbss2 = fsl.TBSS2Reg(tbss_dir=os.getcwd(),FMRIB58FA=True)
>>> tbss2.cmdline
'tbss_2_reg -T'
"""
_cmd = 'tbss_2_reg'
input_spec = TBSS2RegInputSpec
output_spec = TBSS2RegOutputSpec
def _run_interface(self, runtime):
runtime.cwd = self.inputs.tbss_dir
return super(TBSS2Reg, self)._run_interface(runtime)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['tbss_dir'] = self.inputs.tbss_dir
return outputs
class TBSS3PostregInputSpec(FSLCommandInputSpec):
tbss_dir = Directory(exists=True, field='dir',
desc = 'path/name of directory containing the FA and origdata '+
'folders generated by tbss_1_preproc',
mandatory=True)
_xor_inputs = ('subject_mean', 'FMRIB58FA')
subject_mean = traits.Bool(desc='derive mean_FA and mean_FA_skeleton from mean of all subjects in study',
argstr='-S', xor=_xor_inputs)
FMRIB58FA = traits.Bool(desc='use FMRIB58_FA and its skeleton instead of study-derived mean and skeleton',
argstr='-T', xor=_xor_inputs)
class TBSS3PostregOutputSpec(TraitedSpec):
tbss_dir = Directory(exists=True, field='dir',
desc='path/name of directory containing the FA, origdata, and '+
'stats folders generated by tbss_1_preproc and this command')
all_FA = File(exists=True, desc='path/name of 4D volume with all FA images')
mean_FA_skeleton = File(exists=True, desc='path/name of 3D volume with mean FA skeleton')
mean_FA = File(exists=True, desc='path/name of 3D volume with mean FA image')
class TBSS3Postreg(FSLCommand):
""" XXX UNSTABLE DO NOT USE
Use FSL TBSS3Postreg for creating the mean FA image and skeletonise it
Example
-------
>>> import nipype.interfaces.fsl.dti as fsl
>>> tbss3 = fsl.TBSS3Postreg(subject_mean=True, tbss_dir='tbss_dir')
>>> tbss3.cmdline
'tbss_3_postreg -S'
"""
_cmd = 'tbss_3_postreg'
input_spec = TBSS3PostregInputSpec
output_spec = TBSS3PostregOutputSpec
def _run_interface(self, runtime):
runtime.cwd = self.inputs.tbss_dir
runtime = super(TBSS3Postreg, self)._run_interface(runtime)
if runtime.stderr:
runtime.returncode = 1
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['tbss_dir'] = self.inputs.tbss_dir
stats = os.path.join(self.inputs.tbss_dir,'stats')
outputs['all_FA'] = self._gen_fname('all_FA',
cwd=os.path.abspath(stats),suffix='' )
outputs['mean_FA_skeleton'] = self._gen_fname('mean_FA_skeleton',
cwd=os.path.abspath(stats),suffix='' )
outputs['mean_FA'] = self._gen_fname('mean_FA',
cwd=os.path.abspath(stats),suffix='' )
return outputs
class TBSS4PrestatsInputSpec(FSLCommandInputSpec):
tbss_dir = Directory(exists=True, field='dir',
desc = 'path/name of directory containing the FA, origdata, and '+
'stats folders generated by tbss_1_preproc and tbss_3_postreg',
mandatory=True)
threshold = traits.Float(argstr='%.3f', desc='threshold value',mandatory=True)
class TBSS4PrestatsOutputSpec(TraitedSpec):
all_FA_skeletonised = File(exists=True, desc='path/name of 4D volume with all FA images skeletonized')
mean_FA_skeleton_mask = File(exists=True, desc='path/name of mean FA skeleton mask')
tbss_dir = Directory(exists=True, field='dir',
desc = 'path/name of directory containing the FA, origdata, and stats '+
'folders generated by tbss_1_preproc and tbss_3_postreg')
class TBSS4Prestats(FSLCommand):
"""XXX UNSTABLE DO NOT USE
Use FSL TBSS4Prestats thresholds the mean FA skeleton image at the
chosen threshold
Example
-------
>>> import nipype.interfaces.fsl.dti as fsl
>>> tbss4 = fsl.TBSS4Prestats(threshold=0.3, tbss_dir="tbss_dir")
>>> tbss4.cmdline
'tbss_4_prestats 0.300'
"""
_cmd = 'tbss_4_prestats'
input_spec = TBSS4PrestatsInputSpec
output_spec = TBSS4PrestatsOutputSpec
def _run_interface(self, runtime):
runtime.cwd = self.inputs.tbss_dir
return super(TBSS4Prestats, self)._run_interface(runtime)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['tbss_dir'] = self.inputs.tbss_dir
stats = os.path.join(self.inputs.tbss_dir,'stats')
outputs['all_FA_skeletonised'] = self._gen_fname('all_FA_skeletonised',
cwd=os.path.abspath(stats),
suffix='' )
outputs['mean_FA_skeleton_mask'] = self._gen_fname('mean_FA_skeleton_mask',
cwd=os.path.abspath(stats),
suffix='' )
return outputs
class RandomiseInputSpec(FSLCommandInputSpec):
in_file = File(exists=True,desc = '4D input file',argstr='-i %s', position=0, mandatory=True)
base_name = traits.Str('tbss_',desc = 'the rootname that all generated files will have',
argstr='-o %s', position=1, usedefault=True)
design_mat = File(exists=True,desc = 'design matrix file',argstr='-d %s', position=2, mandatory=True)
tcon = File(exists=True,desc = 't contrasts file',argstr='-t %s', position=3, mandatory=True)
fcon = File(exists=True,desc = 'f contrasts file',argstr='-f %s')
mask = File(exists=True,desc = 'mask image',argstr='-m %s')
x_block_labels = File(exists=True,desc = 'exchangeability block labels file',argstr='-e %s')
demean = traits.Bool(desc = 'demean data temporally before model fitting', argstr='-D')
one_sample_group_mean = traits.Bool(desc = 'perform 1-sample group-mean test instead of generic permutation test',
argstr='-l')
show_total_perms = traits.Bool(desc = 'print out how many unique permutations would be generated and exit',
argstr='-q')
show_info_parallel_mode = traits.Bool(desc = 'print out information required for parallel mode and exit',
argstr='-Q')
vox_p_values = traits.Bool(desc = 'output voxelwise (corrected and uncorrected) p-value images',
argstr='-x')
tfce = traits.Bool(desc = 'carry out Threshold-Free Cluster Enhancement', argstr='-T')
tfce2D = traits.Bool(desc = 'carry out Threshold-Free Cluster Enhancement with 2D optimisation',
argstr='--T2')
f_only = traits.Bool(desc = 'calculate f-statistics only', argstr='--f_only')
raw_stats_imgs = traits.Bool(desc = 'output raw ( unpermuted ) statistic images', argstr='-R')
p_vec_n_dist_files = traits.Bool(desc = 'output permutation vector and null distribution text files',
argstr='-P')
num_perm = traits.Int(argstr='-n %d', desc='number of permutations (default 5000, set to 0 for exhaustive)')
seed = traits.Int(argstr='--seed %d', desc='specific integer seed for random number generator')
var_smooth = traits.Int(argstr='-v %d', desc='use variance smoothing (std is in mm)')
c_thresh = traits.Float(argstr='-c %.2f', desc='carry out cluster-based thresholding')
cm_thresh = traits.Float(argstr='-C %.2f', desc='carry out cluster-mass-based thresholding')
f_c_thresh = traits.Float(argstr='-F %.2f', desc='carry out f cluster thresholding')
f_cm_thresh = traits.Float(argstr='-S %.2f', desc='carry out f cluster-mass thresholding')
tfce_H = traits.Float(argstr='--tfce_H %.2f', desc='TFCE height parameter (default=2)')
tfce_E = traits.Float(argstr='--tfce_E %.2f', desc='TFCE extent parameter (default=0.5)')
tfce_C = traits.Float(argstr='--tfce_C %.2f', desc='TFCE connectivity (6 or 26; default=6)')
vxl = traits.List(traits.Int,argstr='--vxl %d', desc='list of numbers indicating voxelwise EVs'+
'position in the design matrix (list order corresponds to files in vxf option)')
vxf = traits.List(traits.Int,argstr='--vxf %d', desc='list of 4D images containing voxelwise EVs'+
'(list order corresponds to numbers in vxl option)')
class RandomiseOutputSpec(TraitedSpec):
tstat1_file = File(exists=True,desc = 'path/name of tstat image corresponding to the first t contrast')
class Randomise(FSLCommand):
"""XXX UNSTABLE DO NOT USE
FSL Randomise: feeds the 4D projected FA data into GLM
modelling and thresholding
in order to find voxels which correlate with your model
Example
-------
>>> import nipype.interfaces.fsl.dti as fsl
>>> rand = fsl.Randomise(in_file='allFA.nii', \
mask = 'mask.nii', \
tcon='design.con', \
design_mat='design.mat')
>>> rand.cmdline
'randomise -i allFA.nii -o tbss_ -d design.mat -t design.con -m mask.nii'
"""
_cmd = 'randomise'
input_spec = RandomiseInputSpec
output_spec = RandomiseOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['tstat1_file'] = self._gen_fname(self.inputs.base_name,suffix='_tstat1')
return outputs
class ProbTrackXInputSpec(FSLCommandInputSpec):
samplesbase_name = traits.Str(desc = 'the rootname/base_name for samples files',argstr='-s %s')
bpx_directory = Directory(exists=True, field='dir', desc = 'path/name of directory with all '+
'bedpostx output files',mandatory=True)
mask = File(exists=True, desc='bet binary mask file in diffusion space',
argstr='-m %s', mandatory=True)
seed_file = File(exists=True, desc='seed volume, or voxel, or ascii file with multiple'+
'volumes, or freesurfer label file',argstr='-x %s', mandatory=True)
mode = traits.Str(desc='options: simple (single seed voxel), seedmask (mask of seed voxels),'+
'twomask_symm (two bet binary masks) ', argstr='--mode=%s')
target_masks = InputMultiPath(File(exits=True),desc='list of target masks - '+
'required for seeds_to_targets classification', argstr='--targetmasks=%s')
mask2 =File(exists=True,desc='second bet binary mask (in diffusion space) in twomask_symm mode',
argstr='--mask2=%s')
waypoints = File(exists=True, desc='waypoint mask or ascii list of waypoint masks - '+
'only keep paths going through ALL the masks',argstr='--waypoints=%s')
network = traits.Bool(desc='activate network mode - only keep paths going through '+
'at least one seed mask (required if multiple seed masks)',
argstr='--network')
mesh = File(exists=True,desc='Freesurfer-type surface descriptor (in ascii format)',
argstr='--mesh=%s')
seed_ref = File(exists=True, desc='reference vol to define seed space in '+
'simple mode - diffusion space assumed if absent',
argstr='--seedref=%s')
out_dir = Directory(os.getcwd(),exists=True,argstr='--dir=%s',usedefault=True,
desc='directory to put the final volumes in')
force_dir = traits.Bool(desc='use the actual directory name given - i.e. '+
'do not add + to make a new directory',argstr='--forcedir')
opd = traits.Bool(desc='outputs path distributions',argstr='--opd')
correct_path_distribution = traits.Bool(desc='correct path distribution for the length of the pathways',
argstr='--pd')
os2t = traits.Bool(desc='Outputs seeds to targets',argstr='--os2t')
paths_file = File('nipype_fdtpaths',usedefault=True,argstr='--out=%s',
desc='produces an output file (default is fdt_paths)')
avoid_mp = File(exists=True, desc='reject pathways passing through locations given by this mask',
argstr='--avoid=%s')
stop_mask = File(exists=True,argstr='--stop=%s',
desc='stop tracking at locations given by this mask file')
xfm = File(exists=True, argstr='--xfm=%s',
desc='transformation matrix taking seed space to DTI space '+
'(either FLIRT matrix or FNIRT warp_field) - default is identity')
inv_xfm = File( argstr='--invxfm=%s',desc='transformation matrix taking DTI space to seed'+
' space (compulsory when using a warp_field for seeds_to_dti)')
n_samples = traits.Int(argstr='--nsamples=%d',desc='number of samples - default=5000')
n_steps = traits.Int(argstr='--nsteps=%d',desc='number of steps per sample - default=2000')
dist_thresh = traits.Float(argstr='--distthresh=%.3f',desc='discards samples shorter than '+
'this threshold (in mm - default=0)')
c_thresh = traits.Float(argstr='--cthr=%.3f',desc='curvature threshold - default=0.2')
sample_random_points = traits.Bool(argstr='--sampvox',desc='sample random points within seed voxels')
step_length = traits.Float(argstr='--steplength=%.3f',desc='step_length in mm - default=0.5')
loop_check = traits.Bool(argstr='--loopcheck',desc='perform loop_checks on paths -'+
' slower, but allows lower curvature threshold')
use_anisotropy = traits.Bool(argstr='--usef',desc='use anisotropy to constrain tracking')
rand_fib = traits.Enum(0,1,2,3,argstr='--randfib %d',desc='options: 0 - default, 1 - to randomly sample'+
' initial fibres (with f > fibthresh), 2 - to sample in '+
'proportion fibres (with f>fibthresh) to f, 3 - to sample ALL '+
'populations at random (even if f<fibthresh)')
fibst = traits.Int(argstr='--fibst=%d',desc='force a starting fibre for tracking - '+
'default=1, i.e. first fibre orientation. Only works if randfib==0')
mod_euler = traits.Bool(argstr='--modeuler',desc='use modified euler streamlining')
random_seed = traits.Bool(argstr='--rseed',desc='random seed')
s2tastext = traits.Bool(argstr='--s2tastext',desc='output seed-to-target counts as a'+
' text file (useful when seeding from a mesh)')
class ProbTrackXOutputSpec(TraitedSpec):
log = File(exists=True, desc='path/name of a text record of the command that was run')
fdt_paths = File(exists=True, desc='path/name of a 3D image file containing the output '+
'connectivity distribution to the seed mask')
way_total = File(exists=True, desc='path/name of a text file containing a single number '+
'corresponding to the total number of generated tracts that '+
'have not been rejected by inclusion/exclusion mask criteria')
targets = traits.List(File,exists=True,desc='a list with all generated seeds_to_target files')
class ProbTrackX(FSLCommand):
""" Use FSL probtrackx for tractography on bedpostx results
Examples
--------
>>> from nipype.interfaces import fsl
>>> pbx = fsl.ProbTrackX(samplesbase_name='merged', mask='mask.nii', \
seed_file='MASK_average_thal_right.nii', mode='seedmask', \
xfm='trans.mat', n_samples=3, n_steps=10, force_dir=True, opd=True, os2t=True, \
bpx_directory='bedpostxout', target_masks = ['targets_MASK1.nii','targets_MASK2.nii'], \
paths_file='nipype_fdtpaths', out_dir='.')
>>> pbx.cmdline
'probtrackx --forcedir -m mask.nii --mode=seedmask --nsamples=3 --nsteps=10 --opd --os2t --dir=. --out=nipype_fdtpaths -s merged -x MASK_average_thal_right.nii --targetmasks=targets.txt --xfm=trans.mat'
"""
_cmd = 'probtrackx'
input_spec = ProbTrackXInputSpec
output_spec = ProbTrackXOutputSpec
def _run_interface(self, runtime):
if not isdefined(self.inputs.samplesbase_name):
self.inputs.samplesbase_name = os.path.join(self.inputs.bpx_directory,'merged')
if isdefined(self.inputs.target_masks):
f = open("targets.txt","w")
for target in self.inputs.target_masks:
f.write("%s\n"%target)
f.close()
return super(ProbTrackX, self)._run_interface(runtime)
def _format_arg(self, name, spec, value):
if name == 'target_masks' and isdefined(value):
fname = "targets.txt"
return super(ProbTrackX, self)._format_arg(name, spec, [fname])
else:
return super(ProbTrackX, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['log'] = self._gen_fname('probtrackx',cwd=self.inputs.out_dir,
suffix='.log',change_ext=False)
outputs['way_total'] = self._gen_fname('waytotal',cwd=self.inputs.out_dir,
suffix='',change_ext=False)
outputs['fdt_paths'] = self._gen_fname(self.inputs.paths_file,
cwd=self.inputs.out_dir,suffix='')
# handle seeds-to-target output files
if isdefined(self.inputs.target_masks):
outputs['targets']=[]
for target in self.inputs.target_masks:
outputs['targets'].append(self._gen_fname('seeds_to_'+os.path.split(target)[1],
cwd=self.inputs.out_dir,suffix=''))
return outputs
class VecRegInputSpec(FSLCommandInputSpec):
in_file = File(exists=True,argstr='-i %s',desc='filename for input vector or tensor field',
mandatory=True)
out_file = File(argstr='-o %s',desc='filename for output registered vector or tensor field',
genfile=True)
ref_vol = File(exists=True,argstr='-r %s',desc='filename for reference (target) volume',
mandatory=True)
affine_mat = File(exists=True,argstr='-t %s',desc='filename for affine transformation matrix')
warp_field = File(exists=True,argstr='-w %s',desc='filename for 4D warp field for nonlinear registration')
rotation_mat = File(exists=True,argstr='--rotmat=%s',desc='filename for secondary affine matrix'+
'if set, this will be used for the rotation of the vector/tensor field')
rotation_warp = File(exists=True,argstr='--rotwarp=%s',desc='filename for secondary warp field'+
'if set, this will be used for the rotation of the vector/tensor field')
interpolation = traits.Enum("nearestneighbour", "trilinear", "sinc", "spline",argstr='--interp=%s',desc='interpolation method : '+
'nearestneighbour, trilinear (default), sinc or spline')
mask = File(exists=True,argstr='-m %s',desc='brain mask in input space')
ref_mask = File(exists=True,argstr='--refmask=%s',desc='brain mask in output space '+
'(useful for speed up of nonlinear reg)')
class VecRegOutputSpec(TraitedSpec):
out_file = File(exists=True,desc='path/name of filename for the registered vector or tensor field')
class VecReg(FSLCommand):
"""Use FSL vecreg for registering vector data
For complete details, see the `FDT Documentation
<http://www.fmrib.ox.ac.uk/fsl/fdt/fdt_vecreg.html>`_
Example
-------
>>> from nipype.interfaces import fsl
>>> vreg = fsl.VecReg(in_file='diffusion.nii', \
affine_mat='trans.mat', \
ref_vol='mni.nii', \
out_file='diffusion_vreg.nii')
>>> vreg.cmdline
'vecreg -t trans.mat -i diffusion.nii -o diffusion_vreg.nii -r mni.nii'
"""
_cmd = 'vecreg'
input_spec = VecRegInputSpec
output_spec = VecRegOutputSpec
def _run_interface(self, runtime):
if not isdefined(self.inputs.out_file):
pth,base_name = os.path.split(self.inputs.in_file)
self.inputs.out_file = self._gen_fname(base_name,cwd=os.path.abspath(pth),
suffix = '_vreg')
return super(VecReg, self)._run_interface(runtime)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self.inputs.out_file
if not isdefined(outputs['out_file']) and isdefined(self.inputs.in_file):
pth,base_name = os.path.split(self.inputs.in_file)
outputs['out_file'] = self._gen_fname(base_name,cwd=os.path.abspath(pth),
suffix = '_vreg')
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._list_outputs()[name]
else:
return None
class ProjThreshInputSpec(FSLCommandInputSpec):
in_files = traits.List(File,exists=True,argstr='%s',desc='a list of input volumes',
mandatory=True,position=0)
threshold = traits.Int(argstr='%d',desc='threshold indicating minimum '+
'number of seed voxels entering this mask region',
mandatory=True,position=1)
class ProjThreshOuputSpec(TraitedSpec):
out_files = traits.List(File,exists=True,desc='path/name of output volume after thresholding')
class ProjThresh(FSLCommand):
"""Use FSL proj_thresh for thresholding some outputs of probtrack
For complete details, see the `FDT Documentation
<http://www.fmrib.ox.ac.uk/fsl/fdt/fdt_thresh.html>`_
Example
-------
>>> from nipype.interfaces import fsl
>>> ldir = ['seeds_to_M1.nii', 'seeds_to_M2.nii']
>>> pThresh = fsl.ProjThresh(in_files=ldir,threshold=3)
>>> pThresh.cmdline
'proj_thresh seeds_to_M1.nii seeds_to_M2.nii 3'
"""
_cmd = 'proj_thresh'
input_spec = ProjThreshInputSpec
output_spec = ProjThreshOuputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_files'] = []
for name in self.inputs.in_files:
cwd,base_name = os.path.split(name)
outputs['out_files'].append(self._gen_fname(base_name,cwd=cwd,suffix='_proj_seg_thr_'+
repr(self.inputs.threshold)))
return outputs
class FindTheBiggestInputSpec(FSLCommandInputSpec):
in_files = traits.List(File,exists=True,argstr='%s',desc='a list of input volumes or a singleMatrixFile',
position=0,mandatory=True)
out_file = File(argstr='%s',desc='file with the resulting segmentation',position=2,genfile=True)
class FindTheBiggestOutputSpec(TraitedSpec):
out_file = File(exists=True,argstr='%s',desc='output file indexed in order of input files')
class FindTheBiggest(FSLCommand):
"""
Use FSL find_the_biggest for performing hard segmentation on
the outputs of connectivity-based thresholding in probtrack.
For complete details, see the `FDT
Documentation. <http://www.fmrib.ox.ac.uk/fsl/fdt/fdt_biggest.html>`_
Example
-------
>>> from nipype.interfaces import fsl
>>> ldir = ['seeds_to_M1.nii', 'seeds_to_M2.nii']
>>> fBig = fsl.FindTheBiggest(in_files=ldir, out_file='biggestSegmentation')
>>> fBig.cmdline
'find_the_biggest seeds_to_M1.nii seeds_to_M2.nii biggestSegmentation'
"""
_cmd='find_the_biggest'
input_spec = FindTheBiggestInputSpec
output_spec = FindTheBiggestOutputSpec
def _run_interface(self, runtime):
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self._gen_fname('biggestSegmentation',suffix='')
return super(FindTheBiggest, self)._run_interface(runtime)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self.inputs.out_file
if not isdefined(outputs['out_file']):
outputs['out_file'] = self._gen_fname('biggestSegmentation',suffix = '')
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._list_outputs()[name]
else:
return None
| {
"content_hash": "6cc444438cbd64366033cf10821f3f32",
"timestamp": "",
"source": "github",
"line_count": 779,
"max_line_length": 206,
"avg_line_length": 50.03209242618742,
"alnum_prop": 0.6096215522771007,
"repo_name": "satra/NiPypeold",
"id": "85c59fa61e630dcb3c46e79f0a9c2b2fc34a29cb",
"size": "39089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nipype/interfaces/fsl/dti.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "931"
},
{
"name": "Objective-C",
"bytes": "4736"
},
{
"name": "Python",
"bytes": "1389618"
},
{
"name": "Tcl",
"bytes": "43377"
}
],
"symlink_target": ""
} |
from GitClient import GitClient
from JumpScale import j
class GitFactory:
def __init__(self):
j.logger.consolelogCategories.append("git")
def getClient(self, basedir, remoteUrl="", branchname='master', cleandir=False,login=None,passwd=None):
"""
return a mercurial tool which you can help to manipulate a hg repository
@param base dir where local gitrepository will be stored
@param remote url of git repository, e.g. https://login:[email protected]/despiegk/ssospecs/ #DO NOT FORGET LOGIN PASSWD
"""
if not isinstance(cleandir, bool):
raise ValueError("cleandir needs to be boolean")
return GitClient(basedir, remoteUrl, branchname, cleandir,login=login,passwd=passwd)
def log(self,msg,category="",level=5):
category="git.%s"%category
category=category.rstrip(".")
j.logger.log(msg,category=category,level=level)
| {
"content_hash": "c8c52ec8cd91cb10dbfec8904908cb2b",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 132,
"avg_line_length": 42.45454545454545,
"alnum_prop": 0.684154175588865,
"repo_name": "Jumpscale/jumpscale6_core",
"id": "adf8719c51dc3f1650693e0c333702e2399b47d3",
"size": "934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/baselib/git/GitFactory.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "11738"
},
{
"name": "JavaScript",
"bytes": "70132"
},
{
"name": "Lua",
"bytes": "2162"
},
{
"name": "Python",
"bytes": "5848017"
},
{
"name": "Shell",
"bytes": "7692"
}
],
"symlink_target": ""
} |
"""Globals"""
from django.conf import settings
KEY_STUBS_OPEN_METEAR_API = "open_metear_api"
URL_STUBS_CHANGE_ROUTE_CONFIG = "%s/stubs/change_route_configuration/" % settings.SITE_URL
| {
"content_hash": "5e1d9c4935bf889d92a01ba7ffefcba7",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 90,
"avg_line_length": 26.714285714285715,
"alnum_prop": 0.7433155080213903,
"repo_name": "timevortexproject/timevortex",
"id": "7140d54dd9362cbc55187737acda63d952f4d32f",
"size": "274",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "stubs/utils/globals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45763"
},
{
"name": "Cucumber",
"bytes": "10679"
},
{
"name": "HTML",
"bytes": "714"
},
{
"name": "JavaScript",
"bytes": "88987"
},
{
"name": "Python",
"bytes": "173584"
},
{
"name": "Ruby",
"bytes": "3140"
},
{
"name": "Shell",
"bytes": "511"
}
],
"symlink_target": ""
} |
"""
Support for interface with an Harman/Kardon or JBL AVR.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.harman_kardon_avr/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
SUPPORT_TURN_OFF, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_STEP,
SUPPORT_TURN_ON, SUPPORT_SELECT_SOURCE)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PORT, STATE_OFF, STATE_ON)
REQUIREMENTS = ['hkavr==0.0.5']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Harman Kardon AVR'
DEFAULT_PORT = 10025
SUPPORT_HARMAN_KARDON_AVR = SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_OFF | SUPPORT_TURN_ON | \
SUPPORT_SELECT_SOURCE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
def setup_platform(hass, config, add_entities, discover_info=None):
"""Set up the AVR platform."""
import hkavr
name = config[CONF_NAME]
host = config[CONF_HOST]
port = config[CONF_PORT]
avr = hkavr.HkAVR(host, port, name)
avr_device = HkAvrDevice(avr)
add_entities([avr_device], True)
class HkAvrDevice(MediaPlayerDevice):
"""Representation of a Harman Kardon AVR / JBL AVR TV."""
def __init__(self, avr):
"""Initialize a new HarmanKardonAVR."""
self._avr = avr
self._name = avr.name
self._host = avr.host
self._port = avr.port
self._source_list = avr.sources
self._state = None
self._muted = avr.muted
self._current_source = avr.current_source
def update(self):
"""Update the state of this media_player."""
if self._avr.is_on():
self._state = STATE_ON
elif self._avr.is_off():
self._state = STATE_OFF
else:
self._state = None
self._muted = self._avr.muted
self._current_source = self._avr.current_source
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def is_volume_muted(self):
"""Muted status not available."""
return self._muted
@property
def source(self):
"""Return the current input source."""
return self._current_source
@property
def source_list(self):
"""Available sources."""
return self._source_list
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_HARMAN_KARDON_AVR
def turn_on(self):
"""Turn the AVR on."""
self._avr.power_on()
def turn_off(self):
"""Turn off the AVR."""
self._avr.power_off()
def select_source(self, source):
"""Select input source."""
return self._avr.select_source(source)
def volume_up(self):
"""Volume up the AVR."""
return self._avr.volume_up()
def volume_down(self):
"""Volume down AVR."""
return self._avr.volume_down()
def mute_volume(self, mute):
"""Send mute command."""
return self._avr.mute(mute)
| {
"content_hash": "86c96bd3eb9b7425c9fc12266b153c77",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 74,
"avg_line_length": 27.06766917293233,
"alnum_prop": 0.6191666666666666,
"repo_name": "PetePriority/home-assistant",
"id": "334757c086dbaf670512c8776d55e08dbc1cb8a0",
"size": "3600",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/media_player/harman_kardon_avr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1073"
},
{
"name": "Python",
"bytes": "13985647"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
} |
"""Common runtime ctypes."""
# pylint: disable=invalid-name
from __future__ import absolute_import
import ctypes
import json
import numpy as np
from .base import _LIB, check_call
from .. import _api_internal
tvm_shape_index_t = ctypes.c_int64
class TypeCode(object):
"""Type code used in API calls"""
INT = 0
UINT = 1
FLOAT = 2
HANDLE = 3
NULL = 4
TVM_TYPE = 5
TVM_CONTEXT = 6
ARRAY_HANDLE = 7
NODE_HANDLE = 8
MODULE_HANDLE = 9
FUNC_HANDLE = 10
STR = 11
BYTES = 12
NDARRAY_CONTAINER = 13
OBJECT = 14
EXT_BEGIN = 15
class TVMByteArray(ctypes.Structure):
"""Temp data structure for byte array."""
_fields_ = [("data", ctypes.POINTER(ctypes.c_byte)),
("size", ctypes.c_size_t)]
class TVMType(ctypes.Structure):
"""TVM datatype structure"""
_fields_ = [("type_code", ctypes.c_uint8),
("bits", ctypes.c_uint8),
("lanes", ctypes.c_uint16)]
CODE2STR = {
0 : 'int',
1 : 'uint',
2 : 'float',
4 : 'handle'
}
def __init__(self, type_str):
super(TVMType, self).__init__()
if isinstance(type_str, np.dtype):
type_str = str(type_str)
if type_str == "bool":
self.bits = 1
self.type_code = 1
self.lanes = 1
return
arr = type_str.split("x")
head = arr[0]
self.lanes = int(arr[1]) if len(arr) > 1 else 1
bits = 32
if head.startswith("int"):
self.type_code = 0
head = head[3:]
elif head.startswith("uint"):
self.type_code = 1
head = head[4:]
elif head.startswith("float"):
self.type_code = 2
head = head[5:]
elif head.startswith("handle"):
self.type_code = 4
bits = 64
head = ""
elif head.startswith("custom"):
low, high = head.find('['), head.find(']')
if not low or not high or low >= high:
raise ValueError("Badly formatted custom type string %s" % type_str)
type_name = head[low + 1:high]
self.type_code = _api_internal._datatype_get_type_code(type_name)
head = head[high+1:]
else:
raise ValueError("Do not know how to handle type %s" % type_str)
bits = int(head) if head else bits
self.bits = bits
def __repr__(self):
if self.bits == 1 and self.lanes == 1:
return "bool"
if self.type_code in TVMType.CODE2STR:
type_name = TVMType.CODE2STR[self.type_code]
else:
type_name = "custom[%s]" % \
_api_internal._datatype_get_type_name(self.type_code)
x = "%s%d" % (type_name, self.bits)
if self.lanes != 1:
x += "x%d" % self.lanes
return x
def __eq__(self, other):
return (self.bits == other.bits and
self.type_code == other.type_code and
self.lanes == other.lanes)
def __ne__(self, other):
return not self.__eq__(other)
RPC_SESS_MASK = 128
class TVMContext(ctypes.Structure):
"""TVM context strucure."""
_fields_ = [("device_type", ctypes.c_int),
("device_id", ctypes.c_int)]
MASK2STR = {
1 : 'cpu',
2 : 'gpu',
4 : 'opencl',
5 : 'aocl',
6 : 'sdaccel',
7 : 'vulkan',
8 : 'metal',
9 : 'vpi',
10: 'rocm',
11: 'opengl',
12: 'ext_dev',
}
STR2MASK = {
'llvm': 1,
'stackvm': 1,
'cpu': 1,
'c': 1,
'gpu': 2,
'cuda': 2,
'nvptx': 2,
'cl': 4,
'opencl': 4,
'aocl' : 5,
'aocl_sw_emu' : 5,
'sdaccel': 6,
'vulkan': 7,
'metal': 8,
'vpi': 9,
'rocm': 10,
'opengl': 11,
'ext_dev': 12,
}
def __init__(self, device_type, device_id):
super(TVMContext, self).__init__()
self.device_type = device_type
self.device_id = device_id
@property
def exist(self):
"""Whether this device exist."""
return _api_internal._GetDeviceAttr(
self.device_type, self.device_id, 0) != 0
@property
def max_threads_per_block(self):
"""Maximum number of threads on each block."""
return _api_internal._GetDeviceAttr(
self.device_type, self.device_id, 1)
@property
def warp_size(self):
"""Number of threads that executes in concurrent."""
return _api_internal._GetDeviceAttr(
self.device_type, self.device_id, 2)
@property
def max_shared_memory_per_block(self):
"""Total amount of shared memory per block in bytes."""
return _api_internal._GetDeviceAttr(
self.device_type, self.device_id, 3)
@property
def compute_version(self):
"""Get compute verison number in string.
Currently used to get compute capability of CUDA device.
Returns
-------
version : str
The version string in `major.minor` format.
"""
return _api_internal._GetDeviceAttr(
self.device_type, self.device_id, 4)
@property
def device_name(self):
"""Return the string name of device."""
return _api_internal._GetDeviceAttr(
self.device_type, self.device_id, 5)
@property
def max_clock_rate(self):
"""Return the max clock frequency of device."""
return _api_internal._GetDeviceAttr(
self.device_type, self.device_id, 6)
@property
def multi_processor_count(self):
"""Return the number of compute units of device."""
return _api_internal._GetDeviceAttr(
self.device_type, self.device_id, 7)
@property
def max_thread_dimensions(self):
"""Return the maximum size of each thread axis
Returns
-------
dims: List of int
The maximum length of threadIdx.x, threadIdx.y, threadIdx.z
"""
return json.loads(_api_internal._GetDeviceAttr(
self.device_type, self.device_id, 8))
def sync(self):
"""Synchronize until jobs finished at the context."""
check_call(_LIB.TVMSynchronize(self.device_type, self.device_id, None))
def __eq__(self, other):
return (isinstance(other, TVMContext) and
self.device_id == other.device_id and
self.device_type == other.device_type)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
if self.device_type >= RPC_SESS_MASK:
tbl_id = self.device_type / RPC_SESS_MASK - 1
dev_type = self.device_type % RPC_SESS_MASK
return "remote[%d]:%s(%d)" % (
tbl_id, TVMContext.MASK2STR[dev_type], self.device_id)
return "%s(%d)" % (
TVMContext.MASK2STR[self.device_type], self.device_id)
class TVMArray(ctypes.Structure):
"""TVMValue in C API"""
_fields_ = [("data", ctypes.c_void_p),
("ctx", TVMContext),
("ndim", ctypes.c_int),
("dtype", TVMType),
("shape", ctypes.POINTER(tvm_shape_index_t)),
("strides", ctypes.POINTER(tvm_shape_index_t)),
("byte_offset", ctypes.c_uint64)]
TVMArrayHandle = ctypes.POINTER(TVMArray)
class TVMNDArrayContainer(ctypes.Structure):
"""TVM NDArray::Container"""
_fields_ = [("dl_tensor", TVMArray),
("manager_ctx", ctypes.c_void_p),
("deleter", ctypes.c_void_p),
("array_type_info", ctypes.c_int32)]
TVMNDArrayContainerHandle = ctypes.POINTER(TVMNDArrayContainer)
| {
"content_hash": "c95cba26a0e819cec737938519d9cecd",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 84,
"avg_line_length": 29.528301886792452,
"alnum_prop": 0.5304792332268371,
"repo_name": "mlperf/training_results_v0.7",
"id": "72cff1a10eadbd7d75c23276b043e47cf2d88d16",
"size": "8610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/python/tvm/_ffi/runtime_ctypes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Awk",
"bytes": "14530"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "172914"
},
{
"name": "C++",
"bytes": "13037795"
},
{
"name": "CMake",
"bytes": "113458"
},
{
"name": "CSS",
"bytes": "70255"
},
{
"name": "Clojure",
"bytes": "622652"
},
{
"name": "Cuda",
"bytes": "1974745"
},
{
"name": "Dockerfile",
"bytes": "149523"
},
{
"name": "Groovy",
"bytes": "160449"
},
{
"name": "HTML",
"bytes": "171537"
},
{
"name": "Java",
"bytes": "189275"
},
{
"name": "JavaScript",
"bytes": "98224"
},
{
"name": "Julia",
"bytes": "430755"
},
{
"name": "Jupyter Notebook",
"bytes": "11091342"
},
{
"name": "Lua",
"bytes": "17720"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "215967"
},
{
"name": "Perl",
"bytes": "1551186"
},
{
"name": "PowerShell",
"bytes": "13906"
},
{
"name": "Python",
"bytes": "36943114"
},
{
"name": "R",
"bytes": "134921"
},
{
"name": "Raku",
"bytes": "7280"
},
{
"name": "Ruby",
"bytes": "4930"
},
{
"name": "SWIG",
"bytes": "140111"
},
{
"name": "Scala",
"bytes": "1304960"
},
{
"name": "Shell",
"bytes": "1312832"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "Starlark",
"bytes": "69877"
},
{
"name": "TypeScript",
"bytes": "243012"
}
],
"symlink_target": ""
} |
import Adafruit_Nokia_LCD as LCD
import Adafruit_GPIO.SPI as SPI
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from re import sub
# Edison software SPI config:
SCLK = 35 # 10
DIN = 26 # 11
DC = 25 # 32
RST = 45 # 46
CS = 31 # 23
disp = LCD.PCD8544(DC, RST, SCLK, DIN, CS)
with open('contrast.txt', "r") as f:
contrast = int(sub('\\n', '', f.read()))
disp.begin(contrast = contrast)
image = Image.open('splash.png').resize((LCD.LCDWIDTH, LCD.LCDHEIGHT), Image.ANTIALIAS).convert('1')
draw = ImageDraw.Draw(image)
disp.image(image)
disp.display()
| {
"content_hash": "31e987504572a5bdce4921282070cd81",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 100,
"avg_line_length": 24.375,
"alnum_prop": 0.6905982905982906,
"repo_name": "projectbuendia/server-status",
"id": "01b78579525ad7dec2895be9788bea08c2cdf83c",
"size": "608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "splash_screen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "195458"
},
{
"name": "R",
"bytes": "1111"
},
{
"name": "Shell",
"bytes": "6172"
}
],
"symlink_target": ""
} |
"""A library containing exception types used by Endpoints ProtoRPC services."""
import httplib
from protorpc import remote
class ServiceException(remote.ApplicationError):
"""Base class for request/service exceptions in Endpoints."""
def __init__(self, message=None):
super(ServiceException, self).__init__(message,
httplib.responses[self.http_status])
class BadRequestException(ServiceException):
"""Bad request exception that is mapped to a 400 response."""
http_status = httplib.BAD_REQUEST
class UnauthorizedException(ServiceException):
"""Unauthorized exception that is mapped to a 401 response."""
http_status = httplib.UNAUTHORIZED
class ForbiddenException(ServiceException):
"""Forbidden exception that is mapped to a 403 response."""
http_status = httplib.FORBIDDEN
class NotFoundException(ServiceException):
"""Not found exception that is mapped to a 404 response."""
http_status = httplib.NOT_FOUND
class ConflictException(ServiceException):
"""Conflict exception that is mapped to a 409 response."""
http_status = httplib.CONFLICT
class GoneException(ServiceException):
"""Resource Gone exception that is mapped to a 410 response."""
http_status = httplib.GONE
class PreconditionFailedException(ServiceException):
"""Precondition Failed exception that is mapped to a 412 response."""
http_status = httplib.PRECONDITION_FAILED
class RequestEntityTooLargeException(ServiceException):
"""Request entity too large exception that is mapped to a 413 response."""
http_status = httplib.REQUEST_ENTITY_TOO_LARGE
class InternalServerErrorException(ServiceException):
"""Internal server exception that is mapped to a 500 response."""
http_status = httplib.INTERNAL_SERVER_ERROR
| {
"content_hash": "4ccfbdaa72df80bd448e08a2579bc29c",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 79,
"avg_line_length": 28.93548387096774,
"alnum_prop": 0.750278706800446,
"repo_name": "justingrayston/gae-python-endpoints-patch",
"id": "25c6513ff33ff705ac1ab4c191fee7a0d0700848",
"size": "2395",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "endpoints-1.0/endpoints/api_exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "135881"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickformatstop(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "indicator.gauge.axis"
_path_str = "indicator.gauge.axis.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
# dtickrange
# ----------
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
# enabled
# -------
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# value
# -----
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.indicator.gaug
e.axis.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super(Tickformatstop, self).__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.indicator.gauge.axis.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.indicator.gauge.axis.Tickformatstop`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("dtickrange", None)
_v = dtickrange if dtickrange is not None else _v
if _v is not None:
self["dtickrange"] = _v
_v = arg.pop("enabled", None)
_v = enabled if enabled is not None else _v
if _v is not None:
self["enabled"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "35f2d68f9a639bf2f0f37e6cb583f00d",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 82,
"avg_line_length": 33.745583038869256,
"alnum_prop": 0.5631413612565445,
"repo_name": "plotly/python-api",
"id": "c1229fce583d3e09171760092e83644dab96ac5a",
"size": "9550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/indicator/gauge/axis/_tickformatstop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import os
import pytest
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def fixtures_dir():
return os.path.join(TESTS_DIR, 'fixtures')
| {
"content_hash": "bbaccb2b706d0ab817ff2472333b1b3d",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 54,
"avg_line_length": 18.333333333333332,
"alnum_prop": 0.7090909090909091,
"repo_name": "red-hat-storage/rhcephcompose",
"id": "b217a51a0479bfb18f18871cebe6f245fca14274",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rhcephcompose/tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53761"
},
{
"name": "Shell",
"bytes": "691"
}
],
"symlink_target": ""
} |
"""
sentry.conf.server
~~~~~~~~~~~~~~~~~~
These settings act as the default (base) settings for the Sentry-provided web-server
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.conf.global_settings import * # NOQA
from datetime import timedelta
import hashlib
import os
import os.path
import socket
import sys
import tempfile
import urlparse
import sentry
gettext_noop = lambda s: s
socket.setdefaulttimeout(5)
DEBUG = False
TEMPLATE_DEBUG = True
MAINTENANCE = False
ADMINS = ()
INTERNAL_IPS = ('127.0.0.1',)
MANAGERS = ADMINS
APPEND_SLASH = True
PROJECT_ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir))
NODE_MODULES_ROOT = os.path.join(PROJECT_ROOT, os.pardir, os.pardir, 'node_modules')
sys.path.insert(0, os.path.normpath(os.path.join(PROJECT_ROOT, os.pardir)))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'sentry.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
'AUTOCOMMIT': True,
'ATOMIC_REQUESTS': False,
}
}
if 'DATABASE_URL' in os.environ:
url = urlparse.urlparse(os.environ['DATABASE_URL'])
# Ensure default database exists.
DATABASES['default'] = DATABASES.get('default', {})
# Update with environment configuration.
DATABASES['default'].update({
'NAME': url.path[1:],
'USER': url.username,
'PASSWORD': url.password,
'HOST': url.hostname,
'PORT': url.port,
})
if url.scheme == 'postgres':
DATABASES['default']['ENGINE'] = 'sentry.db.postgres'
if url.scheme == 'mysql':
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
EMAIL_SUBJECT_PREFIX = '[Sentry] '
# This should always be UTC.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
LANGUAGES = (
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sv-se', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-cn', gettext_noop('Traditional Chinese')),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
USE_TZ = True
# Make this unique, and don't share it with anybody.
SECRET_KEY = hashlib.md5(socket.gethostname() + ')*)&8a36)6%74e@-ne5(-!8a(vv#tkv)(eyg&@0=zd^pl!7=y@').hexdigest()
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'sentry.middleware.maintenance.ServicesUnavailableMiddleware',
'sentry.middleware.env.SentryEnvMiddleware',
'sentry.middleware.proxy.SetRemoteAddrFromForwardedFor',
'sentry.middleware.debug.NoIfModifiedSinceMiddleware',
'sentry.middleware.stats.RequestTimingMiddleware',
'sentry.middleware.stats.ResponseCodeMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'sentry.middleware.auth.AuthenticationMiddleware',
'sentry.middleware.sudo.SudoMiddleware',
'sentry.middleware.locale.SentryLocaleMiddleware',
'sentry.middleware.social_auth.SentrySocialAuthExceptionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'sentry.debug.middleware.DebugMiddleware',
)
ROOT_URLCONF = 'sentry.conf.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.csrf',
'social_auth.context_processors.social_auth_by_name_backends',
'social_auth.context_processors.social_auth_backends',
'social_auth.context_processors.social_auth_by_type_backends',
'social_auth.context_processors.social_auth_login_redirect'
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'captcha',
'crispy_forms',
'debug_toolbar',
'gunicorn',
'kombu.transport.django',
'raven.contrib.django.raven_compat',
'rest_framework',
'sentry',
'sentry.nodestore',
'sentry.search',
'sentry.lang.javascript',
'sentry.plugins.sentry_interface_types',
'sentry.plugins.sentry_mail',
'sentry.plugins.sentry_urls',
'sentry.plugins.sentry_useragents',
'sentry.plugins.sentry_webhooks',
'social_auth',
'south',
'sudo',
)
STATIC_ROOT = os.path.realpath(os.path.join(PROJECT_ROOT, 'static'))
STATIC_URL = '/_static/'
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
)
ASSET_VERSION = 0
# setup a default media root to somewhere useless
MEDIA_ROOT = '/tmp/sentry-media'
LOCALE_PATHS = (
os.path.join(PROJECT_ROOT, 'locale'),
)
CSRF_FAILURE_VIEW = 'sentry.web.frontend.csrf_failure.view'
CSRF_COOKIE_NAME = 'csrf'
# Auth configuration
try:
from django.core.urlresolvers import reverse_lazy
except ImportError:
LOGIN_REDIRECT_URL = '/login-redirect/'
LOGIN_URL = '/auth/login/'
else:
LOGIN_REDIRECT_URL = reverse_lazy('sentry-login-redirect')
LOGIN_URL = reverse_lazy('sentry-login')
AUTHENTICATION_BACKENDS = (
'social_auth.backends.twitter.TwitterBackend',
'social_auth.backends.facebook.FacebookBackend',
# TODO: migrate to GoogleOAuth2Backend
'social_auth.backends.google.GoogleBackend',
'social_auth.backends.contrib.github.GithubBackend',
'social_auth.backends.contrib.bitbucket.BitbucketBackend',
'social_auth.backends.contrib.trello.TrelloBackend',
'sentry.utils.auth.EmailAuthBackend',
)
SOCIAL_AUTH_USER_MODEL = AUTH_USER_MODEL = 'sentry.User'
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
SESSION_COOKIE_NAME = "sentrysid"
SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer"
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
FACEBOOK_APP_ID = ''
FACEBOOK_API_SECRET = ''
FACEBOOK_EXTENDED_PERMISSIONS = ['email']
GOOGLE_OAUTH2_CLIENT_ID = ''
GOOGLE_OAUTH2_CLIENT_SECRET = ''
GITHUB_APP_ID = ''
GITHUB_API_SECRET = ''
TRELLO_API_KEY = ''
TRELLO_API_SECRET = ''
BITBUCKET_CONSUMER_KEY = ''
BITBUCKET_CONSUMER_SECRET = ''
MAILGUN_API_KEY = ''
SOCIAL_AUTH_PIPELINE = (
'social_auth.backends.pipeline.user.get_username',
'social_auth.backends.pipeline.social.social_auth_user',
'social_auth.backends.pipeline.associate.associate_by_email',
'social_auth.backends.pipeline.misc.save_status_to_session',
'social_auth.backends.pipeline.social.associate_user',
'social_auth.backends.pipeline.social.load_extra_data',
'social_auth.backends.pipeline.user.update_user_details',
'social_auth.backends.pipeline.misc.save_status_to_session',
)
INITIAL_CUSTOM_USER_MIGRATION = '0108_fix_user'
# Auth engines and the settings required for them to be listed
AUTH_PROVIDERS = {
'github': ('GITHUB_APP_ID', 'GITHUB_API_SECRET'),
'trello': ('TRELLO_API_KEY', 'TRELLO_API_SECRET'),
'bitbucket': ('BITBUCKET_CONSUMER_KEY', 'BITBUCKET_CONSUMER_SECRET'),
}
import random
SOCIAL_AUTH_DEFAULT_USERNAME = lambda: random.choice(['Darth Vader', 'Obi-Wan Kenobi', 'R2-D2', 'C-3PO', 'Yoda'])
SOCIAL_AUTH_PROTECTED_USER_FIELDS = ['email']
# Queue configuration
from kombu import Exchange, Queue
BROKER_URL = "django://"
BROKER_TRANSPORT_OPTIONS = {}
CELERY_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
CELERY_IGNORE_RESULT = True
CELERY_SEND_EVENTS = False
CELERY_RESULT_BACKEND = None
CELERY_TASK_RESULT_EXPIRES = 1
CELERY_DISABLE_RATE_LIMITS = True
CELERY_DEFAULT_QUEUE = "default"
CELERY_DEFAULT_EXCHANGE = "default"
CELERY_DEFAULT_EXCHANGE_TYPE = "direct"
CELERY_DEFAULT_ROUTING_KEY = "default"
CELERY_CREATE_MISSING_QUEUES = True
CELERY_IMPORTS = (
'sentry.tasks.beacon',
'sentry.tasks.check_auth',
'sentry.tasks.deletion',
'sentry.tasks.email',
'sentry.tasks.index',
'sentry.tasks.merge',
'sentry.tasks.store',
'sentry.tasks.options',
'sentry.tasks.ping',
'sentry.tasks.post_process',
'sentry.tasks.process_buffer',
'sentry.tasks.sync_docs',
)
CELERY_QUEUES = [
Queue('default', routing_key='default'),
Queue('alerts', routing_key='alerts'),
Queue('auth', routing_key='auth'),
Queue('cleanup', routing_key='cleanup'),
Queue('search', routing_key='search'),
Queue('events', routing_key='events'),
Queue('update', routing_key='update'),
Queue('email', routing_key='email'),
Queue('options', routing_key='options'),
]
CELERY_ROUTES = ('sentry.queue.routers.SplitQueueRouter',)
def create_partitioned_queues(name):
exchange = Exchange(name, type='direct')
for num in range(1):
CELERY_QUEUES.append(Queue(
'{0}-{1}'.format(name, num),
exchange=exchange,
))
create_partitioned_queues('counters')
create_partitioned_queues('triggers')
CELERYBEAT_SCHEDULE_FILENAME = os.path.join(tempfile.gettempdir(), 'sentry-celerybeat')
CELERYBEAT_SCHEDULE = {
'check-auth': {
'task': 'sentry.tasks.check_auth',
'schedule': timedelta(minutes=1),
'options': {
'expires': 60,
'queue': 'auth',
}
},
'send-beacon': {
'task': 'sentry.tasks.send_beacon',
'schedule': timedelta(hours=1),
'options': {
'expires': 3600,
},
},
'send-ping': {
'task': 'sentry.tasks.send_ping',
'schedule': timedelta(minutes=1),
'options': {
'expires': 60,
},
},
'flush-buffers': {
'task': 'sentry.tasks.process_buffer.process_pending',
'schedule': timedelta(seconds=10),
'options': {
'expires': 10,
'queue': 'counters-0',
}
},
'sync-docs': {
'task': 'sentry.tasks.sync_docs',
'schedule': timedelta(seconds=3600),
'options': {
'expires': 3600,
'queue': 'update',
}
},
'sync-options': {
'task': 'sentry.tasks.options.sync_options',
'schedule': timedelta(seconds=10),
'options': {
'expires': 10,
'queue': 'options',
}
},
}
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'handlers': {
'console': {
'level': 'WARNING',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'sentry': {
'level': 'ERROR',
'filters': ['sentry:internal'],
'class': 'raven.contrib.django.handlers.SentryHandler',
},
'audit': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'console:api': {
'level': 'WARNING',
'class': 'logging.StreamHandler',
'formatter': 'client_info',
},
},
'filters': {
'sentry:internal': {
'()': 'sentry.utils.raven.SentryInternalFilter',
},
},
'formatters': {
'simple': {
'format': '[%(levelname)s] %(message)s',
},
'client_info': {
'format': '[%(levelname)s] [%(project)s] [%(agent)s] %(message)s',
},
},
'root': {
'handlers': ['console', 'sentry'],
},
'loggers': {
'sentry': {
'level': 'ERROR',
},
'sentry.api': {
'handlers': ['console:api', 'sentry'],
'propagate': False,
},
'sentry.deletions': {
'handlers': ['audit'],
},
'sentry.errors': {
'handlers': ['console'],
'propagate': False,
},
'sentry.rules': {
'handlers': ['console'],
'propagate': False,
},
'static_compiler': {
'level': 'INFO',
},
'django.request': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'toronado.cssutils': {
'level': 'ERROR',
'propagate': False,
},
}
}
# django-rest-framework
REST_FRAMEWORK = {
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
'DEFAULT_PERMISSION_CLASSES': (
'sentry.api.permissions.NoPermission',
)
}
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# django-recaptcha
RECAPTCHA_PUBLIC_KEY = None
RECAPTCHA_PRIVATE_KEY = None
NOCAPTCHA = True
CAPTCHA_WIDGET_TEMPLATE = "sentry/partial/form_captcha.html"
# Debugger
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.timer.TimerPanel',
'sentry.debug.panels.route.RoutePanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.sql.SQLPanel',
# TODO(dcramer): https://github.com/getsentry/sentry/issues/1722
# 'sentry.debug.panels.redis.RedisPanel',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# Sentry and Raven configuration
SENTRY_CLIENT = 'sentry.utils.raven.SentryInternalClient'
SENTRY_FEATURES = {
'auth:register': True,
'organizations:create': True,
'organizations:sso': True,
'projects:quotas': True,
'projects:user-reports': True,
'projects:plugins': True,
}
# Default time zone for localization in the UI.
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
SENTRY_DEFAULT_TIME_ZONE = 'UTC'
# Enable the Sentry Debugger (Beta)
SENTRY_DEBUGGER = False
SENTRY_IGNORE_EXCEPTIONS = (
'OperationalError',
)
# Absolute URL to the sentry root directory. Should not include a trailing slash.
SENTRY_URL_PREFIX = ''
# Should we send the beacon to the upstream server?
SENTRY_BEACON = True
# The administrative contact for this installation
SENTRY_ADMIN_EMAIL = ''
# Allow access to Sentry without authentication.
SENTRY_PUBLIC = False
# Instruct Sentry that this install intends to be run by a single organization
# and thus various UI optimizations should be enabled.
SENTRY_SINGLE_ORGANIZATION = False
# Login url (defaults to LOGIN_URL)
SENTRY_LOGIN_URL = None
# Default project ID (for internal errors)
SENTRY_PROJECT = 1
# Project ID for recording frontend (javascript) exceptions
SENTRY_FRONTEND_PROJECT = None
# Only store a portion of all messages per unique group.
SENTRY_SAMPLE_DATA = True
# The following values control the sampling rates
SENTRY_SAMPLE_RATES = (
# up until N events, store 1 in M
(50, 1),
(1000, 2),
(10000, 10),
(100000, 50),
(1000000, 300),
(10000000, 2000),
)
SENTRY_MAX_SAMPLE_RATE = 10000
SENTRY_SAMPLE_TIMES = (
(3600, 1),
(360, 10),
(60, 60),
)
SENTRY_MAX_SAMPLE_TIME = 10000
# Web Service
SENTRY_WEB_HOST = 'localhost'
SENTRY_WEB_PORT = 9000
SENTRY_WEB_OPTIONS = {}
# SMTP Service
SENTRY_ENABLE_EMAIL_REPLIES = False
SENTRY_SMTP_HOSTNAME = 'localhost'
SENTRY_SMTP_HOST = 'localhost'
SENTRY_SMTP_PORT = 1025
SENTRY_INTERFACES = {
'exception': 'sentry.interfaces.exception.Exception',
'logentry': 'sentry.interfaces.message.Message',
'request': 'sentry.interfaces.http.Http',
'stacktrace': 'sentry.interfaces.stacktrace.Stacktrace',
'template': 'sentry.interfaces.template.Template',
'query': 'sentry.interfaces.query.Query',
'user': 'sentry.interfaces.user.User',
'csp': 'sentry.interfaces.csp.Csp',
'sentry.interfaces.Exception': 'sentry.interfaces.exception.Exception',
'sentry.interfaces.Message': 'sentry.interfaces.message.Message',
'sentry.interfaces.Stacktrace': 'sentry.interfaces.stacktrace.Stacktrace',
'sentry.interfaces.Template': 'sentry.interfaces.template.Template',
'sentry.interfaces.Query': 'sentry.interfaces.query.Query',
'sentry.interfaces.Http': 'sentry.interfaces.http.Http',
'sentry.interfaces.User': 'sentry.interfaces.user.User',
'sentry.interfaces.Csp': 'sentry.interfaces.csp.Csp',
}
# Should users without superuser permissions be allowed to
# make projects public
SENTRY_ALLOW_PUBLIC_PROJECTS = True
# Can users be invited to organizations?
SENTRY_ENABLE_INVITES = True
# Default to not sending the Access-Control-Allow-Origin header on api/store
SENTRY_ALLOW_ORIGIN = None
# Enable scraping of javascript context for source code
SENTRY_SCRAPE_JAVASCRIPT_CONTEXT = True
# Redis connection information (see Nydus documentation)
SENTRY_REDIS_OPTIONS = {}
# Buffer backend
SENTRY_BUFFER = 'sentry.buffer.Buffer'
SENTRY_BUFFER_OPTIONS = {}
# Cache backend
# XXX: We explicitly require the cache to be configured as its not optional
# and causes serious confusion with the default django cache
SENTRY_CACHE = None
SENTRY_CACHE_OPTIONS = {}
# The internal Django cache is still used in many places
# TODO(dcramer): convert uses over to Sentry's backend
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
# The cache version affects both Django's internal cache (at runtime) as well
# as Sentry's cache. This automatically overrides VERSION on the default
# CACHES backend.
CACHE_VERSION = 1
# Quota backend
SENTRY_QUOTAS = 'sentry.quotas.Quota'
SENTRY_QUOTA_OPTIONS = {}
# Rate limiting backend
SENTRY_RATELIMITER = 'sentry.ratelimits.base.RateLimiter'
SENTRY_RATELIMITER_OPTIONS = {}
# The default value for project-level quotas
SENTRY_DEFAULT_MAX_EVENTS_PER_MINUTE = '90%'
# The maximum number of events per minute the system should accept.
SENTRY_SYSTEM_MAX_EVENTS_PER_MINUTE = 0
# Node storage backend
SENTRY_NODESTORE = 'sentry.nodestore.django.DjangoNodeStorage'
SENTRY_NODESTORE_OPTIONS = {}
# Search backend
SENTRY_SEARCH = 'sentry.search.django.DjangoSearchBackend'
SENTRY_SEARCH_OPTIONS = {}
# SENTRY_SEARCH_OPTIONS = {
# 'urls': ['http://localhost:9200/'],
# 'timeout': 5,
# }
# Time-series storage backend
SENTRY_TSDB = 'sentry.tsdb.dummy.DummyTSDB'
SENTRY_TSDB_OPTIONS = {}
# rollups must be ordered from highest granularity to lowest
SENTRY_TSDB_ROLLUPS = (
# (time in seconds, samples to keep)
(10, 360), # 60 minutes at 10 seconds
(3600, 24 * 7), # 7 days at 1 hour
(3600 * 24, 60), # 60 days at 1 day
)
# File storage
SENTRY_FILESTORE = 'django.core.files.storage.FileSystemStorage'
SENTRY_FILESTORE_OPTIONS = {'location': '/tmp/sentry-files'}
# Internal metrics
SENTRY_METRICS_BACKEND = 'sentry.metrics.dummy.DummyMetricsBackend'
SENTRY_METRICS_OPTIONS = {}
SENTRY_METRICS_SAMPLE_RATE = 1.0
SENTRY_METRICS_PREFIX = 'sentry.'
# URL to embed in js documentation
SENTRY_RAVEN_JS_URL = 'cdn.ravenjs.com/1.1.20/jquery,native/raven.min.js'
# URI Prefixes for generating DSN URLs
# (Defaults to URL_PREFIX by default)
SENTRY_ENDPOINT = None
SENTRY_PUBLIC_ENDPOINT = None
# Prevent variables (e.g. context locals, http data, etc) from exceeding this
# size in characters
SENTRY_MAX_VARIABLE_SIZE = 512
# Prevent variables within extra context from exceeding this size in
# characters
SENTRY_MAX_EXTRA_VARIABLE_SIZE = 4096
# For changing the amount of data seen in Http Response Body part.
SENTRY_MAX_HTTP_BODY_SIZE = 4096 * 4 # 16kb
# For various attributes we don't limit the entire attribute on size, but the
# individual item. In those cases we also want to limit the maximum number of
# keys
SENTRY_MAX_DICTIONARY_ITEMS = 50
SENTRY_MAX_MESSAGE_LENGTH = 1024 * 8
SENTRY_MAX_STACKTRACE_FRAMES = 25
SENTRY_MAX_EXCEPTIONS = 25
# Gravatar service base url
SENTRY_GRAVATAR_BASE_URL = 'https://secure.gravatar.com'
# Timeout (in seconds) for fetching remote source files (e.g. JS)
SENTRY_SOURCE_FETCH_TIMEOUT = 5
# http://en.wikipedia.org/wiki/Reserved_IP_addresses
SENTRY_DISALLOWED_IPS = (
'0.0.0.0/8',
'10.0.0.0/8',
'100.64.0.0/10',
'127.0.0.0/8',
'169.254.0.0/16',
'172.16.0.0/12',
'192.0.0.0/29',
'192.0.2.0/24',
'192.88.99.0/24',
'192.168.0.0/16',
'198.18.0.0/15',
'198.51.100.0/24',
'224.0.0.0/4',
'240.0.0.0/4',
'255.255.255.255/32',
)
# Fields which managed users cannot change via Sentry UI. Username and password
# cannot be changed by managed users. Optionally include 'email' and
# 'first_name' in SENTRY_MANAGED_USER_FIELDS.
SENTRY_MANAGED_USER_FIELDS = ('email',)
# See sentry/options/__init__.py for more information
SENTRY_OPTIONS = {}
# You should not change this setting after your database has been created
# unless you have altered all schemas first
SENTRY_USE_BIG_INTS = False
# Delay (in ms) to induce on API responses
SENTRY_API_RESPONSE_DELAY = 0
# Watchers for various application purposes (such as compiling static media)
SENTRY_WATCHERS = (
[os.path.join(NODE_MODULES_ROOT, '.bin', 'webpack'), '-d', '--watch',
"--config={}".format(os.path.join(PROJECT_ROOT, os.pardir, os.pardir, "webpack.config.js"))],
)
def get_raven_config():
return {
'release': sentry.__build__,
'register_signals': True,
'include_paths': [
'sentry',
],
}
RAVEN_CONFIG = get_raven_config()
| {
"content_hash": "7abf6141d1d1618de90b1770df689a48",
"timestamp": "",
"source": "github",
"line_count": 838,
"max_line_length": 113,
"avg_line_length": 29.011933174224342,
"alnum_prop": 0.6535867061533399,
"repo_name": "BayanGroup/sentry",
"id": "38ac7714a33cdc3573f545c04aa08f71552c789c",
"size": "24312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/conf/server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "156607"
},
{
"name": "HTML",
"bytes": "188852"
},
{
"name": "JavaScript",
"bytes": "443758"
},
{
"name": "Makefile",
"bytes": "4647"
},
{
"name": "Python",
"bytes": "7069971"
}
],
"symlink_target": ""
} |
"""Utility to retrieve function args."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import time
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def _is_bounded_method(fn):
_, fn = tf_decorator.unwrap(fn)
return tf_inspect.ismethod(fn) and (fn.__self__ is not None)
def _is_callable_object(obj):
return hasattr(obj, '__call__') and tf_inspect.ismethod(obj.__call__)
def fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
if isinstance(fn, functools.partial):
args = fn_args(fn.func)
args = [a for a in args[len(fn.args):] if a not in (fn.keywords or [])]
else:
if _is_callable_object(fn):
fn = fn.__call__
args = tf_inspect.getfullargspec(fn).args
if _is_bounded_method(fn):
args.remove('self')
return tuple(args)
# When we create a timestamped directory, there is a small chance that the
# directory already exists because another process is also creating these
# directories. In this case we just wait one second to get a new timestamp and
# try again. If this fails several times in a row, then something is seriously
# wrong.
MAX_DIRECTORY_CREATION_ATTEMPTS = 10
def get_timestamped_dir(dir_base):
"""Builds a path to a new subdirectory within the base directory.
The subdirectory will be named using the current time.
This guarantees monotonically increasing directory numbers even across
multiple runs of the pipeline.
The timestamp used is the number of seconds since epoch UTC.
Args:
dir_base: A string containing a directory to create the subdirectory under.
Returns:
The full path of the new subdirectory (which is not actually created yet).
Raises:
RuntimeError: if repeated attempts fail to obtain a unique timestamped
directory name.
"""
attempts = 0
while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS:
timestamp = int(time.time())
result_dir = os.path.join(
compat.as_bytes(dir_base), compat.as_bytes(str(timestamp)))
if not gfile.Exists(result_dir):
# Collisions are still possible (though extremely unlikely): this
# directory is not actually created yet, but it will be almost
# instantly on return from this function.
return result_dir
time.sleep(1)
attempts += 1
logging.warn('Directory {} already exists; retrying (attempt {}/{})'.format(
result_dir, attempts, MAX_DIRECTORY_CREATION_ATTEMPTS))
raise RuntimeError('Failed to obtain a unique export directory name after '
'{} attempts.'.format(MAX_DIRECTORY_CREATION_ATTEMPTS))
| {
"content_hash": "463fd66b846bc1f17c3debd2479aca24",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 80,
"avg_line_length": 32.88172043010753,
"alnum_prop": 0.7164813603662524,
"repo_name": "allenlavoie/tensorflow",
"id": "bb4bdd3fdfb2e19dbc1c581d7771f2e1ac4442ba",
"size": "3748",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/estimator/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "340645"
},
{
"name": "C++",
"bytes": "40746519"
},
{
"name": "CMake",
"bytes": "198073"
},
{
"name": "Go",
"bytes": "1047216"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "735737"
},
{
"name": "Jupyter Notebook",
"bytes": "2117270"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "34933340"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "426884"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
} |
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import os
import re
import sys
import threading
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import tape
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import error_interpolation
from tensorflow.python.framework import errors
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import traceable_stack
from tensorflow.python.framework import versions
from tensorflow.python.ops import control_flow_util
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import function_utils
from tensorflow.python.util import lock_util
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_stack
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.tf_export import tf_export
# Temporary global switches determining if we should enable the work-in-progress
# calls to the C API. These will be removed once all functionality is supported.
_USE_C_API = True
_USE_C_SHAPES = os.getenv("TF_C_API_GRAPH_CONSTRUCTION_SHAPES", "1") != "0"
def tensor_id(tensor):
"""Returns a unique identifier for this Tensor."""
return tensor._id # pylint: disable=protected-access
class _UserDeviceSpec(object):
"""Store user-specified device and provide computation of merged device."""
def __init__(self, device_name_or_function):
self._device_name_or_function = device_name_or_function
self.display_name = str(self._device_name_or_function)
if callable(self._device_name_or_function):
dev_func = self._device_name_or_function
func_name = function_utils.get_func_name(dev_func)
func_code = function_utils.get_func_code(dev_func)
if func_code:
fname = func_code.co_filename
lineno = func_code.co_firstlineno
else:
fname = "unknown"
lineno = -1
self.display_name = "%s<%s, %d>" % (func_name, fname, lineno)
self.function = self._device_name_or_function
if not (self._device_name_or_function is None or
callable(self._device_name_or_function)):
self.function = pydev.merge_device(self._device_name_or_function)
class _NullContextmanager(object):
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator has already been overwritten,
or if operator is not allowed to be overwritten.
"""
existing = getattr(clazz_object, operator, None)
if existing is not None:
# Check to see if this is a default method-wrapper or slot wrapper which
# will be true for the comparison operators.
if not isinstance(existing, type(object.__lt__)):
raise ValueError("operator %s cannot be overwritten again on class %s." %
(operator, clazz_object))
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
_TENSOR_LIKE_TYPES = tuple()
def is_dense_tensor_like(t):
"""EXPERIMENTAL: Returns true if `t` implements the tensor interface.
See `register_dense_tensor_like_type()` for the current definition of a
"tensor-like type".
Args:
t: An object.
Returns:
True iff `t` is an instance of one of the registered "tensor-like" types.
"""
return isinstance(t, _TENSOR_LIKE_TYPES)
def register_dense_tensor_like_type(tensor_type):
"""EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.
A "tensor-like type" can represent a single dense tensor, and implements
the `name` and `dtype` properties.
Args:
tensor_type: A type implementing the tensor interface.
Raises:
TypeError: If `tensor_type` does not implement the tensor interface.
"""
try:
if not isinstance(tensor_type.name, property):
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
try:
if not isinstance(tensor_type.dtype, property):
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
# We expect this list to be small, so choose quadratic complexity
# for registration, so that we have a tuple that can be used for
# more efficient `isinstance` checks later.
global _TENSOR_LIKE_TYPES
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
def uid():
"""A unique (within this program execution) integer."""
return c_api.TFE_Py_UID()
def numpy_text(tensor, is_repr=False):
"""Human readable representation of a tensor's numpy value."""
if tensor.dtype.is_numpy_compatible:
text = repr(tensor.numpy()) if is_repr else str(tensor.numpy())
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
# NOTE(ebrevdo): Do not subclass this. If you do, I will break you on purpose.
class _TensorLike(object):
"""Internal cls for grouping Tensor, SparseTensor, ..., for is_instance."""
pass
@tf_export("Tensor")
class Tensor(_TensorLike):
"""Represents one of the outputs of an `Operation`.
A `Tensor` is a symbolic handle to one of the outputs of an
`Operation`. It does not hold the values of that operation's output,
but instead provides a means of computing those values in a
TensorFlow `tf.Session`.
This class has two primary purposes:
1. A `Tensor` can be passed as an input to another `Operation`.
This builds a dataflow connection between operations, which
enables TensorFlow to execute an entire `Graph` that represents a
large, multi-step computation.
2. After the graph has been launched in a session, the value of the
`Tensor` can be computed by passing it to
`tf.Session.run`.
`t.eval()` is a shortcut for calling
`tf.get_default_session().run(t)`.
In the following example, `c`, `d`, and `e` are symbolic `Tensor`
objects, whereas `result` is a numpy array that stores a concrete
value:
```python
# Build a dataflow graph.
c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
e = tf.matmul(c, d)
# Construct a `Session` to execute the graph.
sess = tf.Session()
# Execute the graph and store the value that `e` represents in `result`.
result = sess.run(e)
```
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
# Unary.
"__invert__",
"__neg__",
"__abs__",
"__matmul__",
"__rmatmul__"
}
def __init__(self, op, value_index, dtype):
"""Creates a new `Tensor`.
Args:
op: An `Operation`. `Operation` that computes this tensor.
value_index: An `int`. Index of the operation's endpoint that produces
this tensor.
dtype: A `DType`. Type of elements stored in this tensor.
Raises:
TypeError: If the op is not an `Operation`.
"""
if not isinstance(op, Operation):
raise TypeError("op needs to be an Operation: %s" % op)
self._op = op
self._value_index = value_index
self._dtype = dtypes.as_dtype(dtype)
# This will be set by self.shape().
self._shape_val = None
# List of operations that use this Tensor as input. We maintain this list
# to easily navigate a computation graph.
self._consumers = []
if not _USE_C_SHAPES:
# Attributes used for C++ shape inference. Not inspected, only forwarded.
# If set, will be a HandleData object from cpp_shape_inference.proto.
self._handle_data = None
self._id = uid()
@property
def op(self):
"""The `Operation` that produces this tensor as an output."""
return self._op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._dtype
@property
def graph(self):
"""The `Graph` that contains this tensor."""
return self._op.graph
@property
def name(self):
"""The string name of this tensor."""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
return "%s:%d" % (self._op.name, self._value_index)
@property
def device(self):
"""The name of the device on which this tensor will be produced, or None."""
return self._op.device
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of this tensor.
The shape is computed using shape inference functions that are
registered in the Op for each `Operation`. See
`tf.TensorShape`
for more details of what a shape represents.
The inferred shape of a tensor is used to provide shape
information without having to launch the graph in a session. This
can be used for debugging, and providing early error messages. For
example:
```python
c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
print(c.shape)
==> TensorShape([Dimension(2), Dimension(3)])
d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
print(d.shape)
==> TensorShape([Dimension(4), Dimension(2)])
# Raises a ValueError, because `c` and `d` do not have compatible
# inner dimensions.
e = tf.matmul(c, d)
f = tf.matmul(c, d, transpose_a=True, transpose_b=True)
print(f.shape)
==> TensorShape([Dimension(3), Dimension(4)])
```
In some cases, the inferred shape may have unknown dimensions. If
the caller has additional information about the values of these
dimensions, `Tensor.set_shape()` can be used to augment the
inferred shape.
Returns:
A `TensorShape` representing the shape of this tensor.
"""
if self._shape_val is None:
if _USE_C_SHAPES:
self._shape_val = self._c_api_shape()
else:
# Call set_shape_and_handle_data_for_outputs in topological order on all
# ops that are needed to compute self.op's shape. We do this instead of
# having set_shape_and_handle_data_for_outputs recursively call
# Operation.shape on self.op.inputs to overflowing the call stack.
need_shapes = self._get_input_ops_without_shapes(self.op)
need_shapes.sort(key=lambda op: op._id)
for op in need_shapes:
set_shape_and_handle_data_for_outputs(op)
return self._shape_val
def _get_input_ops_without_shapes(self, target_op):
"""Returns ops needing shape inference to compute target_op's shape."""
result = []
stack = [self._op]
visited = set()
while stack:
op = stack.pop()
if op in visited: continue
result.append(op)
stack.extend(t.op for t in op.inputs if t._shape_val is None)
visited.add(op)
return result
def _c_api_shape(self):
"""Returns the TensorShape of this tensor according to the C API."""
c_graph = self._op._graph._c_graph # pylint: disable=protected-access
shape_vector, unknown_shape = c_api.TF_GraphGetTensorShapeHelper(
c_graph, self._as_tf_output())
if unknown_shape:
return tensor_shape.unknown_shape()
else:
shape_vector = [None if d == -1 else d for d in shape_vector]
return tensor_shape.TensorShape(shape_vector)
@property
def _shape(self):
logging.warning("Tensor._shape is private, use Tensor.shape "
"instead. Tensor._shape will eventually be removed.")
return self.shape
@_shape.setter
def _shape(self, value):
raise ValueError(
"Tensor._shape cannot be assigned, use Tensor.set_shape instead.")
def __iter__(self):
if not context.executing_eagerly():
raise TypeError(
"Tensor objects are only iterable when eager execution is "
"enabled. To iterate over this tensor use tf.map_fn.")
shape = self._shape_tuple()
if shape is None:
raise TypeError("Cannot iterate over a tensor with unknown shape.")
if not shape:
raise TypeError("Cannot iterate over a scalar tensor.")
if shape[0] is None:
raise TypeError(
"Cannot iterate over a tensor with unknown first dimension.")
for i in xrange(shape[0]):
yield self[i]
def _shape_as_list(self):
if self.shape.ndims is not None:
return [dim.value for dim in self.shape.dims]
else:
return None
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
def _rank(self):
"""Integer rank of this Tensor, if known, else None.
Returns:
Integer rank or None
"""
return self.shape.ndims
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def set_shape(self, shape):
"""Updates the shape of this tensor.
This method can be called multiple times, and will merge the given
`shape` with the current shape of this tensor. It can be used to
provide additional information about the shape of this tensor that
cannot be inferred from the graph alone. For example, this can be used
to provide additional information about the shapes of images:
```python
_, image_data = tf.TFRecordReader(...).read(...)
image = tf.image.decode_png(image_data, channels=3)
# The height and width dimensions of `image` are data dependent, and
# cannot be computed without executing the op.
print(image.shape)
==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])
# We know that each image in this dataset is 28 x 28 pixels.
image.set_shape([28, 28, 3])
print(image.shape)
==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])
```
Args:
shape: A `TensorShape` representing the shape of this tensor, a
`TensorShapeProto`, a list, a tuple, or None.
Raises:
ValueError: If `shape` is not compatible with the current shape of
this tensor.
"""
if _USE_C_SHAPES: # pylint: disable=protected-access
# Reset cached shape.
self._shape_val = None
else:
self._shape_val = self.shape.merge_with(shape)
# Update C shape even if _USE_C_SHAPES = False, since we still want
# set_shape to be reflected in the C API graph for when we run it.
if not isinstance(shape, tensor_shape.TensorShape):
shape = tensor_shape.TensorShape(shape)
dim_list = []
if shape.dims is None:
unknown_shape = True
else:
unknown_shape = False
for dim in shape.dims:
if dim.value is None:
dim_list.append(-1)
else:
dim_list.append(dim.value)
try:
c_api.TF_GraphSetTensorShape_wrapper(
self._op._graph._c_graph, # pylint: disable=protected-access
self._as_tf_output(),
dim_list,
unknown_shape)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
@property
def value_index(self):
"""The index of this tensor in the outputs of its `Operation`."""
return self._value_index
def consumers(self):
"""Returns a list of `Operation`s that consume this tensor.
Returns:
A list of `Operation`s.
"""
consumer_names = c_api.TF_OperationOutputConsumers_wrapper(
self._as_tf_output())
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(name)
for name in consumer_names
]
# pylint: enable=protected-access
def _as_node_def_input(self):
"""Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
"""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
if self._value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self._value_index)
def _as_tf_output(self):
# pylint: disable=protected-access
return c_api_util.tf_output(self.op._c_op, self.value_index)
# pylint: enable=protected-access
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name, (", shape=%s" % self.get_shape())
if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name)
if self._dtype else "", (", device=%s" % self.device)
if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(),
self._dtype.name)
def __hash__(self):
# Necessary to support Python's collection membership operators
return id(self)
def __eq__(self, other):
# Necessary to support Python's collection membership operators
return id(self) == id(other)
def __copy__(self):
# Make sure _shape_val is computed before we copy.
# TODO(b/77597810): get rid of Tensor copies.
if self._shape_val is None:
set_shape_and_handle_data_for_outputs(self.op)
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
# NOTE(mrry): This enables the Tensor's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Tensors interact
# with ndarrays.
__array_priority__ = 100
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __bool__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This overload raises a `TypeError` when the user inadvertently
treats a `Tensor` as a boolean (e.g. in an `if` statement). For
example:
```python
if tf.constant(True): # Will raise.
# ...
if tf.constant(5) < tf.constant(7): # Will raise.
# ...
```
This disallows ambiguities between testing the Python value vs testing the
dynamic condition of the `Tensor`.
Raises:
`TypeError`.
"""
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.")
def __nonzero__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This is the Python 2.x counterpart to `__bool__()` above.
Raises:
`TypeError`.
"""
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.")
def eval(self, feed_dict=None, session=None):
"""Evaluates this tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `Tensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See `tf.Session.run` for a
description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
Returns:
A numpy array corresponding to the value of this tensor.
"""
return _eval_using_default_session(self, feed_dict, self.graph, session)
# TODO(agarwal): consider getting rid of this.
class _EagerTensorBase(Tensor):
"""Base class for EagerTensor."""
@property
def dtype(self):
# Note: using the intern table directly here as this is
# performance-sensitive in some models.
return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access
def numpy(self):
"""Returns a numpy array or a scalar with the same contents as the Tensor.
TODO(ashankar,agarwal): Perhaps this should NOT reference the underlying
buffer but instead always explicitly copy? Note that currently it may or may
not copy based on whether the numpy data is properly aligned or not.
Returns:
A numpy array or a scalar. Numpy array may share memory with the
Tensor object. Any changes to one may be reflected in the other. A scalar
value is returned when self has rank 0.
Raises:
ValueError: if the type of this Tensor is not representable in numpy.
"""
if self.dtype == dtypes.resource:
raise ValueError("Resource handles are not convertible to numpy.")
return self._cpu_nograd()._numpy() # pylint: disable=protected-access
# __int__, __float__ and __index__ may copy the tensor to CPU and
# only work for scalars; values are cast as per numpy.
def __int__(self):
return int(self.numpy())
def __float__(self):
return float(self.numpy())
def __index__(self):
return int(self.numpy())
def __array__(self, dtype=None):
return np.array(self.numpy(), dtype=dtype)
def __format__(self, format_spec):
return self.numpy().__format__(format_spec)
def _numpy(self):
raise NotImplementedError()
def __copy__(self):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
return self
def __deepcopy__(self, memo):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
del memo
return self
def _datatype_enum(self):
raise NotImplementedError()
def _shape_tuple(self):
"""The shape of this Tensor, as a tuple.
This is more performant than tuple(shape().as_list()) as it avoids
two list and one object creation. Marked private for now as from an API
perspective, it would be better to have a single performant way of
getting a shape rather than exposing shape() and shape_tuple()
(and heaven forbid, shape_list() etc. as well!). Punting on that for now,
but ideally one would work things out and remove the need for this method.
Returns:
tuple with the shape.
"""
raise NotImplementedError()
def _rank(self):
"""Integer rank of this Tensor.
Unlike regular Tensors, the rank is always known for EagerTensors.
This is more performant than len(self._shape_tuple())
Returns:
Integer rank
"""
raise NotImplementedError()
def _copy_to_device(self, context, device): # pylint: disable=redefined-outer-name
raise NotImplementedError()
def __str__(self):
return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self),
self.shape,
self.dtype.name)
def __repr__(self):
return "<tf.Tensor: id=%s, shape=%s, dtype=%s, numpy=%s>" % (
self._id, self.shape, self.dtype.name, numpy_text(self, is_repr=True))
@staticmethod
def _override_operator(name, func):
setattr(_EagerTensorBase, name, func)
def _copy_nograd(self, ctx=None, device_name=None):
"""Copies tensor to dest device, but doesn't record the operation."""
# pylint: disable=protected-access
# Creates a new tensor on the dest device.
if ctx is None:
ctx = context.context()
if device_name is None:
device_name = ctx.device_name
# pylint: disable=protected-access
try:
new_tensor = self._copy_to_device(context=ctx._handle, device=device_name)
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
return new_tensor
def _copy(self, ctx=None, device_name=None):
"""Copies tensor to dest device."""
new_tensor = self._copy_nograd(ctx, device_name)
# Record the copy on tape and define backprop copy as well.
if context.executing_eagerly():
self_device = self.device
def grad_fun(dresult):
return [dresult._copy(device_name=self_device)]
tape.record_operation("_copy", [new_tensor], [self], grad_fun)
return new_tensor
# pylint: enable=protected-access
@property
def shape(self):
if self._tensor_shape is None: # pylint: disable=access-member-before-definition
# `_tensor_shape` is declared and defined in the definition of
# `EagerTensor`, in C.
self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple())
return self._tensor_shape
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def _shape_as_list(self):
"""The shape of the tensor as a list."""
return list(self._shape_tuple())
@property
def ndim(self):
"""Returns the number of Tensor dimensions."""
return self.shape.ndims
def _cpu_nograd(self):
"""A copy of this Tensor with contents backed by host memory.
The copy cannot be differentiated through.
Returns:
A CPU-memory backed Tensor object with the same contents as this Tensor.
"""
return self._copy_nograd(context.context(), "CPU:0")
def cpu(self):
"""A copy of this Tensor with contents backed by host memory."""
return self._copy(context.context(), "CPU:0")
def gpu(self, gpu_index=0):
"""A copy of this Tensor with contents backed by memory on the GPU.
Arguments:
gpu_index: Identifies which GPU to place the contents on the returned
Tensor in.
Returns:
A GPU-memory backed Tensor object initialized with the same contents
as this Tensor.
"""
return self._copy(context.context(), "GPU:" + str(gpu_index))
def __bool__(self):
if self._shape_tuple() != (): # pylint: disable=g-explicit-bool-comparison
raise ValueError(
"Non-scalar tensor %s cannot be converted to boolean." % repr(self))
if self.dtype != dtypes.bool:
raise ValueError(
"Non-boolean tensor %s cannot be converted to boolean." % repr(self))
return bool(self.cpu().numpy())
def __nonzero__(self):
return self.__bool__()
def set_shape(self, shape):
if not self.shape.is_compatible_with(shape):
raise ValueError(
"Tensor's shape %s is not compatible with supplied shape %s" %
(self.shape, shape))
# Methods not supported / implemented for Eager Tensors.
@property
def op(self):
raise AttributeError(
"Tensor.op is meaningless when eager execution is enabled.")
@property
def graph(self):
raise AttributeError(
"Tensor.graph is meaningless when eager execution is enabled.")
@property
def name(self):
raise AttributeError(
"Tensor.name is meaningless when eager execution is enabled.")
@property
def value_index(self):
raise AttributeError(
"Tensor.value_index is meaningless when eager execution is enabled.")
def consumers(self):
raise NotImplementedError(
"Tensor.consumers is meaningless when eager execution is enabled.")
def _add_consumer(self, consumer):
raise NotImplementedError(
"_add_consumer not supported when eager execution is enabled.")
def _as_node_def_input(self):
raise NotImplementedError(
"_as_node_def_input not supported when eager execution is enabled.")
def _as_tf_output(self):
raise NotImplementedError(
"_as_tf_output not supported when eager execution is enabled.")
def eval(self, feed_dict=None, session=None):
raise NotImplementedError(
"eval is not supported when eager execution is enabled, "
"is .numpy() what you're looking for?"
)
# This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and
# registers it with the current module.
EagerTensor = c_api.TFE_Py_InitEagerTensor(_EagerTensorBase)
def _TensorTensorConversionFunction(t, dtype=None, name=None, as_ref=False):
_ = name, as_ref
if dtype and not dtype.is_compatible_with(t.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtype.name, t.dtype.name, str(t)))
return t
_tensor_conversion_func_registry = {
0: [(Tensor, _TensorTensorConversionFunction)]
}
_tensor_conversion_func_cache = {}
_tensor_conversion_func_lock = threading.Lock()
register_dense_tensor_like_type(Tensor)
@tf_export("convert_to_tensor")
def convert_to_tensor(value, dtype=None, name=None, preferred_dtype=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
An `Output` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value`.
RuntimeError: If a registered conversion function returns an invalid value.
"""
return internal_convert_to_tensor(
value=value,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def _error_prefix(name):
return "" if name is None else "%s: " % name
def internal_convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
"""Converts the given `value` to an `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
This function can be useful when composing a new operation in Python
All standard Python op constructors apply this function to each of their
Tensor-valued inputs, which allows those ops to accept numpy arrays, Python
lists, and scalars in addition to `Tensor` objects.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
as_ref: True if we want the mutable view of Variables, if applicable.
preferred_dtype: Optional element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
ctx: Optional: The value of context.context().
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value`.
RuntimeError: If a registered conversion function returns an invalid value.
"""
if ctx is None: ctx = context.context()
if isinstance(value, EagerTensor):
if ctx.executing_eagerly():
# Fast path for EagerTensors that don't need any conversion.
# Note that we don't check that value's dtype matches the dtype
# argument. We expect that the C runtime will do that checking
# when we execute the kernel.
return value
else:
graph = get_default_graph()
if not graph.building_function:
raise RuntimeError("Attempting to capture an EagerTensor without "
"building a function.")
return graph.capture(value, name=name)
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
unwrapped_type = type(value)
conversion_func_list = _tensor_conversion_func_cache.get(unwrapped_type, None)
if conversion_func_list is None:
with _tensor_conversion_func_lock:
conversion_func_list = []
for _, funcs_at_priority in sorted(
_tensor_conversion_func_registry.items()):
for base_type, conversion_func in funcs_at_priority:
if isinstance(value, base_type):
conversion_func_list.append((base_type, conversion_func))
_tensor_conversion_func_cache[unwrapped_type] = conversion_func_list
for base_type, conversion_func in conversion_func_list:
# If dtype is None but preferred_dtype is not None, we try to
# cast to preferred_dtype first.
ret = None
if dtype is None and preferred_dtype is not None:
try:
ret = conversion_func(
value, dtype=preferred_dtype, name=name, as_ref=as_ref)
except (TypeError, ValueError, errors.UnimplementedError,
errors.InvalidArgumentError):
# Could not coerce the conversion to use the preferred dtype.
ret = None
if ret is not None and ret is not NotImplemented:
if (ret.dtype.base_dtype !=
dtypes.as_dtype(preferred_dtype).base_dtype):
raise TypeError("convert_to_tensor did not convert to "
"the preferred dtype: %s vs %s " %
(ret.dtype.base_dtype,
dtypes.as_dtype(preferred_dtype).base_dtype))
if ret is None:
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
if ret is NotImplemented:
continue
if not isinstance(ret, Tensor):
raise RuntimeError(
"%sConversion function %r for type %s returned non-Tensor: %r" %
(_error_prefix(name), conversion_func, base_type, ret))
if dtype and not dtype.is_compatible_with(ret.dtype):
raise RuntimeError(
"%sConversion function %r for type %s returned incompatible "
"dtype: requested = %s, actual = %s" %
(_error_prefix(name), conversion_func, base_type, dtype.name,
ret.dtype.name))
return ret
raise TypeError("%sCannot convert %r with type %s to Tensor: "
"no conversion function registered." %
(_error_prefix(name), value, unwrapped_type))
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
preferred_dtype: Optional element type for the returned tensors,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
ctx: The value of context.context().
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a list.")
ret = []
if ctx is None: ctx = context.context()
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor(
value,
dtype=dtype,
name=n,
as_ref=as_ref,
preferred_dtype=preferred_dtype,
ctx=ctx))
return ret
def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
preferred_dtype: Optional element type for the returned tensors,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
can be used as a soft preference. If the conversion to
`preferred_dtype` is not possible, this argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor(
values=values,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
@tf_export("convert_to_tensor_or_indexed_slices")
def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_indexed_slices(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_indexed_slices(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to an `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, EagerTensor) and not context.executing_eagerly():
return internal_convert_to_tensor(
value, dtype=dtype, name=name, as_ref=as_ref)
elif isinstance(value, _TensorLike):
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return internal_convert_to_tensor(
value, dtype=dtype, name=name, as_ref=as_ref)
def internal_convert_n_to_tensor_or_indexed_slices(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a list.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_indexed_slices(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_indexed_slices(
values=values, dtype=dtype, name=name, as_ref=False)
# TODO(josh11b): Add ctx argument to conversion_func() signature.
@tf_export("register_tensor_conversion_function")
def register_tensor_conversion_function(base_type,
conversion_func,
priority=100):
"""Registers a function for converting objects of `base_type` to `Tensor`.
The conversion function must have the following signature:
```python
def conversion_func(value, dtype=None, name=None, as_ref=False):
# ...
```
It must return a `Tensor` with the given `dtype` if specified. If the
conversion function creates a new `Tensor`, it should use the given
`name` if specified. All exceptions will be propagated to the caller.
The conversion function may return `NotImplemented` for some
inputs. In this case, the conversion process will continue to try
subsequent conversion functions.
If `as_ref` is true, the function must return a `Tensor` reference,
such as a `Variable`.
NOTE: The conversion functions will execute in order of priority,
followed by order of registration. To ensure that a conversion function
`F` runs before another conversion function `G`, ensure that `F` is
registered with a smaller priority than `G`.
Args:
base_type: The base type or tuple of base types for all objects that
`conversion_func` accepts.
conversion_func: A function that converts instances of `base_type` to
`Tensor`.
priority: Optional integer that indicates the priority for applying this
conversion function. Conversion functions with smaller priority values
run earlier than conversion functions with larger priority values.
Defaults to 100.
Raises:
TypeError: If the arguments do not have the appropriate type.
"""
global _tensor_conversion_func_cache
with _tensor_conversion_func_lock:
if not (isinstance(base_type, type) or
(isinstance(base_type, tuple) and
all(isinstance(x, type) for x in base_type))):
raise TypeError("base_type must be a type or a tuple of types.")
if not callable(conversion_func):
raise TypeError("conversion_func must be callable.")
# context._context is checked so that we don't inadvertently create it.
# This is because enable_eager_execution will fail when called from the main
# function if the context._context is already created, and the
# register_tensor_conversion_function calls happen when the module is
# imported.
if context._context is not None and context.executing_eagerly(
) and isinstance(base_type, six.integer_types + (
float,
np.ndarray,
)):
# TODO(nareshmodi): consider setting a context variable which disables the
# fastpath instead.
raise TypeError(
"Cannot register conversions for numpy arrays, python number types "
"when executing eagerly.")
try:
funcs_at_priority = _tensor_conversion_func_registry[priority]
except KeyError:
funcs_at_priority = []
_tensor_conversion_func_registry[priority] = funcs_at_priority
funcs_at_priority.append((base_type, conversion_func))
_tensor_conversion_func_cache = {}
@tf_export("IndexedSlices")
class IndexedSlices(_TensorLike):
"""A sparse representation of a set of tensor slices at given indices.
This class is a simple wrapper for a pair of `Tensor` objects:
* `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`.
* `indices`: A 1-D integer `Tensor` with shape `[D0]`.
An `IndexedSlices` is typically used to represent a subset of a larger
tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`.
The values in `indices` are the indices in the first dimension of
the slices that have been extracted from the larger tensor.
The dense tensor `dense` represented by an `IndexedSlices` `slices` has
```python
dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]
```
The `IndexedSlices` class is used principally in the definition of
gradients for operations that have sparse gradients
(e.g. `tf.gather`).
Contrast this representation with
`tf.SparseTensor`,
which uses multi-dimensional indices and scalar values.
"""
def __init__(self, values, indices, dense_shape=None):
"""Creates an `IndexedSlices`."""
_get_graph_from_inputs([values, indices, dense_shape])
self._values = values
self._indices = indices
self._dense_shape = dense_shape
@property
def values(self):
"""A `Tensor` containing the values of the slices."""
return self._values
@property
def indices(self):
"""A 1-D `Tensor` containing the indices of the slices."""
return self._indices
@property
def dense_shape(self):
"""A 1-D `Tensor` containing the shape of the corresponding dense tensor."""
return self._dense_shape
@property
def name(self):
"""The name of this `IndexedSlices`."""
return self.values.name
@property
def device(self):
"""The name of the device on which `values` will be produced, or `None`."""
return self.values.device
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self.values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self.values.dtype
@property
def graph(self):
"""The `Graph` that contains the values, indices, and shape tensors."""
return self._values.graph
def __str__(self):
return "IndexedSlices(indices=%s, values=%s%s)" % (
self._indices, self._values, (", dense_shape=%s" % self._dense_shape)
if self._dense_shape is not None else "")
def __neg__(self):
return IndexedSlices(-self.values, self.indices, self.dense_shape)
IndexedSlicesValue = collections.namedtuple(
"IndexedSlicesValue", ["values", "indices", "dense_shape"])
def _device_string(dev_spec):
if isinstance(dev_spec, pydev.DeviceSpec):
return dev_spec.to_string()
else:
return dev_spec
def _NodeDef(op_type, name, device=None, attrs=None): # pylint: disable=redefined-outer-name
"""Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
device: string, device, or function from NodeDef to string.
Value for the "device" attribute of the NodeDef proto.
attrs: Optional dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
"""
node_def = node_def_pb2.NodeDef()
node_def.op = compat.as_bytes(op_type)
node_def.name = compat.as_bytes(name)
if attrs is not None:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
if device is not None:
if callable(device):
node_def.device = device(node_def)
else:
node_def.device = _device_string(device)
return node_def
# Copied from core/framework/node_def_util.cc
# TODO(mrry,josh11b): Consolidate this validation in C++ code.
_VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/]*$")
_VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/]*$")
def _create_c_op(graph, node_def, inputs, control_inputs):
"""Creates a TF_Operation.
Args:
graph: a `Graph`.
node_def: `node_def_pb2.NodeDef` for the operation to create.
inputs: A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs, e.g. "int64 * N",
"list(int64)"). The length of the list should be equal to the number of
inputs specified by this operation's op def.
control_inputs: A list of `Operation`s to set as control dependencies.
Returns:
A wrapped TF_Operation*.
"""
# pylint: disable=protected-access
op_desc = c_api.TF_NewOperation(graph._c_graph,
compat.as_str(node_def.op),
compat.as_str(node_def.name))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
c_api.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input])
else:
c_api.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
c_api.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
c_api.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized)
try:
c_op = c_api.TF_FinishOperation(op_desc)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
return c_op
@tf_export("Operation")
class Operation(object):
"""Represents a graph node that performs computation on tensors.
An `Operation` is a node in a TensorFlow `Graph` that takes zero or
more `Tensor` objects as input, and produces zero or more `Tensor`
objects as output. Objects of type `Operation` are created by
calling a Python op constructor (such as
`tf.matmul`)
or `tf.Graph.create_op`.
For example `c = tf.matmul(a, b)` creates an `Operation` of type
"MatMul" that takes tensors `a` and `b` as input, and produces `c`
as output.
After the graph has been launched in a session, an `Operation` can
be executed by passing it to
`tf.Session.run`.
`op.run()` is a shortcut for calling `tf.get_default_session().run(op)`.
"""
def __init__(self,
node_def,
g,
inputs=None,
output_types=None,
control_inputs=None,
input_types=None,
original_op=None,
op_def=None):
r"""Creates an `Operation`.
NOTE: This constructor validates the name of the `Operation` (passed
as `node_def.name`). Valid `Operation` names match the following
regular expression:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]*
Args:
node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`.
Used for attributes of `node_def_pb2.NodeDef`, typically `name`,
`op`, and `device`. The `input` attribute is irrelevant here
as it will be computed when generating the model.
g: `Graph`. The parent graph.
inputs: list of `Tensor` objects. The inputs to this `Operation`.
output_types: list of `DType` objects. List of the types of the
`Tensors` computed by this operation. The length of this list indicates
the number of output endpoints of the `Operation`.
control_inputs: list of operations or tensors from which to have a
control dependency.
input_types: List of `DType` objects representing the
types of the tensors accepted by the `Operation`. By default
uses `[x.dtype.base_dtype for x in inputs]`. Operations that expect
reference-typed inputs must specify these explicitly.
original_op: Optional. Used to associate the new `Operation` with an
existing `Operation` (for example, a replica with the op that was
replicated).
op_def: Optional. The `op_def_pb2.OpDef` proto that describes the
op type that this `Operation` represents.
Raises:
TypeError: if control inputs are not Operations or Tensors,
or if `node_def` is not a `NodeDef`,
or if `g` is not a `Graph`,
or if `inputs` are not tensors,
or if `inputs` and `input_types` are incompatible.
ValueError: if the `node_def` name is not valid.
"""
# For internal use only: `node_def` can be set to a TF_Operation to create
# an Operation for that op. This is useful for creating Operations for ops
# indirectly created by C API methods, e.g. the ops created by
# TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields
# should be None.
if isinstance(node_def, node_def_pb2.NodeDef):
if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
if not _VALID_OP_NAME_REGEX.match(node_def.name):
raise ValueError("'%s' is not a valid node name" % node_def.name)
c_op = None
elif type(node_def).__name__ == "SwigPyObject":
assert inputs is None
assert output_types is None
assert control_inputs is None
assert input_types is None
assert original_op is None
assert op_def is None
c_op = node_def
else:
raise TypeError("node_def needs to be a NodeDef: %s" % node_def)
if not isinstance(g, Graph):
raise TypeError("g needs to be a Graph: %s" % g)
self._graph = g
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
raise TypeError("inputs needs to be a list of Tensors: %s" % inputs)
for a in inputs:
if not isinstance(a, Tensor):
raise TypeError("input needs to be a Tensor: %s" % a)
if input_types is None:
input_types = [i.dtype.base_dtype for i in inputs]
else:
if not all(
x.is_compatible_with(i.dtype)
for i, x in zip(inputs, input_types)):
raise TypeError("In op '%s', input types (%s) are not compatible "
"with expected types (%s)" %
(node_def.name, [i.dtype for i in inputs],
input_types))
# Build the list of control inputs.
control_input_ops = []
if control_inputs:
for c in control_inputs:
control_op = None
if isinstance(c, Operation):
control_op = c
elif isinstance(c, (Tensor, IndexedSlices)):
control_op = c.op
else:
raise TypeError("Control input must be an Operation, "
"a Tensor, or IndexedSlices: %s" % c)
control_input_ops.append(control_op)
# This will be set by self.inputs.
self._inputs_val = None
# pylint: disable=protected-access
self._id_value = self._graph._next_id()
self._original_op = original_op
self._traceback = tf_stack.extract_stack()
# List of _UserDevSpecs holding code location of device context manager
# invocations and the users original argument to them.
self._device_code_locations = None
# Dict mapping op name to file and line information for op colocation
# context managers.
self._colocation_code_locations = None
self._control_flow_context = self.graph._get_control_flow_context()
# pylint: enable=protected-access
# Initialize self._c_op.
if c_op:
self._c_op = c_op
else:
if op_def is None:
op_def = self._graph._get_op_def(node_def.op)
# TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs.
# Refactor so we don't have to do this here.
grouped_inputs = self._reconstruct_sequence_inputs(
op_def, inputs, node_def.attr)
self._c_op = _create_c_op(self._graph, node_def, grouped_inputs,
control_input_ops)
# Initialize self._outputs.
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
output_types = [
c_api.TF_OperationOutputType(c_api_util.tf_output(self._c_op, i))
for i in range(num_outputs)]
self._outputs = [
Tensor(self, i, output_type)
for i, output_type in enumerate(output_types)
]
self._graph._add_op(self) # pylint: disable=protected-access
if not c_op:
self._control_flow_post_processing()
def _control_flow_post_processing(self):
"""Add this op to its control flow context.
This may add new ops and change this op's inputs. self.inputs must be
available before calling this method.
"""
for input_tensor in self.inputs:
control_flow_util.CheckInputFromValidContext(self, input_tensor.op)
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
def _reconstruct_sequence_inputs(self, op_def, inputs, attrs):
"""Regroups a flat list of input tensors into scalar and sequence inputs.
Args:
op_def: The `op_def_pb2.OpDef` (for knowing the input types)
inputs: a list of input `Tensor`s to the op.
attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define
how long each sequence is)
Returns:
A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs).
"""
grouped_inputs = []
i = 0
for input_arg in op_def.input_arg:
if input_arg.number_attr:
input_len = attrs[input_arg.number_attr].i
is_sequence = True
elif input_arg.type_list_attr:
input_len = len(attrs[input_arg.type_list_attr].list.type)
is_sequence = True
else:
input_len = 1
is_sequence = False
if is_sequence:
grouped_inputs.append(inputs[i:i + input_len])
else:
grouped_inputs.append(inputs[i])
i += input_len
assert i == len(inputs)
return grouped_inputs
def colocation_groups(self):
"""Returns the list of colocation groups of the op."""
default_colocation_group = [
compat.as_bytes("loc:@%s" % self.name)
]
try:
class_attr = self.get_attr("_class")
except ValueError:
# This op has no explicit colocation group, so it is itself its
# own root of a colocation group.
return default_colocation_group
attr_groups = [
class_name for class_name in class_attr
if class_name.startswith(b"loc:@")
]
# If there are no colocation groups in the explicit _class field,
# return the default colocation group.
return attr_groups if attr_groups else default_colocation_group
def values(self):
"""DEPRECATED: Use outputs."""
return tuple(self.outputs)
def _get_control_flow_context(self):
"""Returns the control flow context of this op.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context of this op.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
@property
def name(self):
"""The full name of this operation."""
return c_api.TF_OperationName(self._c_op)
@property
def _id(self):
"""The unique integer id of this operation."""
return self._id_value
@property
def device(self):
"""The name of the device to which this op has been assigned, if any.
Returns:
The string name of the device to which this op has been
assigned, or an empty string if it has not been assigned to a
device.
"""
return c_api.TF_OperationDevice(self._c_op)
@property
def _device_assignments(self):
"""Code locations for device context managers active at op creation.
This property will return a list of traceable_stack.TraceableObject
instances where .obj is a string representing the assigned device
(or information about the function that would be applied to this op
to compute the desired device) and the filename and lineno members
record the location of the relevant device context manager.
For example, suppose file_a contained these lines:
file_a.py:
15: with tf.device('/gpu:0'):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the device context manager
would have these member values:
t_obj.obj -> '/gpu:0'
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._device_assignments would return the list [t_obj].
Returns:
[str: traceable_stack.TraceableObject, ...] as per this method's
description, above.
"""
return self._device_code_locations or []
@property
def _colocation_dict(self):
"""Code locations for colocation context managers active at op creation.
This property will return a dictionary for which the keys are nodes with
which this Operation is colocated, and for which the values are
traceable_stack.TraceableObject instances. The TraceableObject instances
record the location of the relevant colocation context manager but have the
"obj" field set to None to prevent leaking private data.
For example, suppose file_a contained these lines:
file_a.py:
14: node_a = tf.constant(3, name='NODE_A')
15: with tf.colocate_with(node_a):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the colocation context manager
would have these member values:
t_obj.obj -> None
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._colocation_dict would return the dictionary
{ 'NODE_A': t_obj }
Returns:
{str: traceable_stack.TraceableObject} as per this method's description,
above.
"""
locations_dict = self._colocation_code_locations or {}
return locations_dict.copy()
@property
def _output_types(self):
"""List this operation's output types.
Returns:
List of the types of the Tensors computed by this operation.
Each element in the list is an integer whose value is one of
the TF_DataType enums defined in c_api.h
The length of this list indicates the number of output endpoints
of the operation.
"""
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
output_types = [
c_api.TF_OperationOutputType(self._tf_output(i))
for i in xrange(num_outputs)
]
# In all the tests we have output_types that are passed into
# Operation.__init__ are a list of ints (which is illegal according
# to the docstring), but input_types are instances of DType.
# This extra assert is to catch if we ever use DType for output_types.
if output_types:
assert isinstance(output_types[0], int)
return output_types
def _tf_output(self, output_idx):
"""Create and return a new TF_Output for output_idx'th output of this op."""
tf_output = c_api.TF_Output()
tf_output.oper = self._c_op
tf_output.index = output_idx
return tf_output
def _tf_input(self, input_idx):
"""Create and return a new TF_Input for input_idx'th input of this op."""
tf_input = c_api.TF_Input()
tf_input.oper = self._c_op
tf_input.index = input_idx
return tf_input
def _set_device(self, device): # pylint: disable=redefined-outer-name
"""Set the device of this operation.
Args:
device: string or device.. The device to set.
"""
c_api.SetRequestedDevice(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
compat.as_str(_device_string(device)))
def _update_input(self, index, tensor):
"""Update the input to this operation at the given index.
NOTE: This is for TF internal use only. Please don't use it.
Args:
index: the index of the input to update.
tensor: the Tensor to be used as the input at the given index.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Make sure output shapes are already computed for this op in case we create
# a cycle (we cannot compute shapes for cycles). Usually shapes are computed
# lazily upon request.
if not _USE_C_SHAPES:
set_shape_and_handle_data_for_outputs(self)
# Reset cached inputs.
self._inputs_val = None
c_api.UpdateEdge(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._tf_input(index))
def _add_control_inputs(self, ops):
"""Add a list of new control inputs to this operation.
Args:
ops: the list of Operations to add as control input.
Raises:
TypeError: if ops is not a list of Operations.
ValueError: if any op in ops is from a different graph.
"""
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
def _add_control_input(self, op):
"""Add a new control input to this operation.
Args:
op: the Operation to add as control input.
Raises:
TypeError: if op is not an Operation.
ValueError: if op is from a different graph.
"""
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
def _remove_all_control_inputs(self):
"""Removes any control inputs to this operation."""
c_api.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access
def __str__(self):
return str(self.node_def)
def __repr__(self):
return "<tf.Operation '%s' type=%s>" % (self.name, self.type)
@property
def outputs(self):
"""The list of `Tensor` objects representing the outputs of this op."""
return self._outputs
# pylint: disable=protected-access
class _InputList(object):
"""Immutable input list wrapper."""
def __init__(self, inputs):
self._inputs = inputs
def __iter__(self):
return iter(self._inputs)
def __len__(self):
return len(self._inputs)
def __bool__(self):
return bool(self._inputs)
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __getitem__(self, i):
return self._inputs[i]
# pylint: enable=protected-access
@property
def inputs(self):
"""The list of `Tensor` objects representing the data inputs of this op."""
if self._inputs_val is None:
tf_outputs = c_api.GetOperationInputs(self._c_op)
# pylint: disable=protected-access
retval = [
self.graph._get_tensor_by_tf_output(tf_output)
for tf_output in tf_outputs
]
# pylint: enable=protected-access
self._inputs_val = Operation._InputList(retval)
return self._inputs_val
@property
def _inputs(self):
logging.warning("Operation._inputs is private, use Operation.inputs "
"instead. Operation._inputs will eventually be removed.")
return self.inputs
@_inputs.setter
def _inputs(self, value):
raise ValueError("Cannot assign _inputs")
@property
def _input_types(self):
num_inputs = c_api.TF_OperationNumInputs(self._c_op)
input_types = [
dtypes.as_dtype(c_api.TF_OperationInputType(self._tf_input(i)))
for i in xrange(num_inputs)
]
return input_types
@_input_types.setter
def _input_types(self, value):
raise ValueError("Cannot assign _input_types")
@property
def control_inputs(self):
"""The `Operation` objects on which this op has a control dependency.
Before this op is executed, TensorFlow will ensure that the
operations in `self.control_inputs` have finished executing. This
mechanism can be used to run ops sequentially for performance
reasons, or to ensure that the side effects of an op are observed
in the correct order.
Returns:
A list of `Operation` objects.
"""
control_c_ops = c_api.TF_OperationGetControlInputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(
c_api.TF_OperationName(c_op)) for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_outputs(self):
"""The `Operation` objects which have a control dependency on this op.
Before any of the ops in self._control_outputs can execute tensorflow will
ensure self has finished executing.
Returns:
A list of `Operation` objects.
"""
control_c_ops = c_api.TF_OperationGetControlOutputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(
c_api.TF_OperationName(c_op)) for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_inputs(self):
logging.warning("Operation._control_inputs is private, use "
"Operation.control_inputs instead. "
"Operation._control_inputs will eventually be removed.")
return self.control_inputs
@_control_inputs.setter
def _control_inputs(self, value):
logging.warning("Operation._control_inputs is private, use "
"Operation.control_inputs instead. "
"Operation._control_inputs will eventually be removed.")
# Copy value because it may be self._control_inputs_val (in particular if
# this is called from self._control_inputs += ...), and we don't want to
# clear value below.
value = copy.copy(value)
self._remove_all_control_inputs()
self._add_control_inputs(value)
@property
def type(self):
"""The type of the op (e.g. `"MatMul"`)."""
return c_api.TF_OperationOpType(self._c_op)
@property
def graph(self):
"""The `Graph` that contains this operation."""
return self._graph
@property
def node_def(self):
# pylint: disable=line-too-long
"""Returns the `NodeDef` representation of this operation.
Returns:
A
[`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
c_api.TF_OperationToNodeDef(self._c_op, buf)
data = c_api.TF_GetBuffer(buf)
node_def = node_def_pb2.NodeDef()
node_def.ParseFromString(compat.as_bytes(data))
return node_def
@property
def _node_def(self):
logging.warning("Operation._node_def is private, use Operation.node_def "
"instead. Operation._node_def will eventually be removed.")
return self.node_def
@property
def op_def(self):
# pylint: disable=line-too-long
"""Returns the `OpDef` proto that represents the type of this op.
Returns:
An
[`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
return self._graph._get_op_def(self.type)
@property
def _op_def(self):
logging.warning("Operation._op_def is private, use Operation.op_def "
"instead. Operation._op_def will eventually be removed.")
return self.op_def
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
return tf_stack.convert_stack(self._traceback)
@property
def traceback_with_start_lines(self):
"""Same as traceback but includes start line of function definition.
Returns:
A list of 5-tuples (filename, lineno, name, code, func_start_lineno).
"""
return tf_stack.convert_stack(self._traceback,
include_func_start_lineno=True)
def _set_attr(self, attr_name, attr_value):
"""Private method used to set an attribute in the node_def."""
buf = c_api.TF_NewBufferFromString(
compat.as_bytes(attr_value.SerializeToString()))
try:
# pylint: disable=protected-access
c_api.SetAttr(self._graph._c_graph, self._c_op, attr_name, buf)
# pylint: enable=protected-access
finally:
c_api.TF_DeleteBuffer(buf)
def get_attr(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ["s", "i", "f", "b", "type", "shape", "tensor", "func"]
try:
with c_api_util.tf_buffer() as buf:
c_api.TF_OperationGetAttrValueProto(self._c_op, name, buf)
data = c_api.TF_GetBuffer(buf)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
x = attr_value_pb2.AttrValue()
x.ParseFromString(data)
# Treat an empty oneof value as an empty list.
if not x.WhichOneof("value"):
return []
if x.HasField("list"):
for f in fields:
if getattr(x.list, f):
if f == "type":
return [dtypes.as_dtype(x) for x in list(getattr(x.list, f))]
else:
return list(getattr(x.list, f))
return []
else:
for f in fields:
if x.HasField(f):
if f == "type":
return dtypes.as_dtype(getattr(x, f))
else:
return getattr(x, f)
assert False, "Unsupported field type in " + str(x)
def run(self, feed_dict=None, session=None):
"""Runs this operation in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for this operation.
*N.B.* Before invoking `Operation.run()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
See `tf.Session.run`
for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
"""
_run_using_default_session(self, feed_dict, self.graph, session)
_gradient_registry = registry.Registry("gradient")
@tf_export("RegisterGradient")
class RegisterGradient(object):
"""A decorator for registering the gradient function for an op type.
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
inputs `x` and `y`, and return a single output `x - y`, the
following gradient function would be registered:
```python
@tf.RegisterGradient("Sub")
def _sub_grad(unused_op, grad):
return grad, tf.negative(grad)
```
The decorator argument `op_type` is the string type of an
operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
"""
def __init__(self, op_type):
"""Creates a new decorator with `op_type` as the Operation type.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers the function `f` as gradient function for `op_type`."""
_gradient_registry.register(f, self._op_type)
return f
@tf_export("NoGradient", "NotDifferentiable")
def NotDifferentiable(op_type):
"""Specifies that ops of type `op_type` is not differentiable.
This function should *not* be used for operations that have a
well-defined gradient that is not yet implemented.
This function is only used when defining a new op type. It may be
used for ops such as `tf.size()` that are not differentiable. For
example:
```python
tf.NotDifferentiable("Size")
```
The gradient computed for 'op_type' will then propagate zeros.
For ops that have a well-defined gradient but are not yet implemented,
no declaration should be made, and an error *must* be thrown if
an attempt to request its gradient is made.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not a string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
_gradient_registry.register(None, op_type)
# Alias for the old name, will be eventually removed.
NoGradient = NotDifferentiable
def get_gradient_function(op):
"""Returns the function that computes gradients for "op"."""
if not op.inputs:
return None
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return _gradient_registry.lookup(op_type)
_shape_registry = registry.Registry("shape functions")
_default_shape_function_registry = registry.Registry("default shape functions")
# These are set to common_shapes.call_cpp_shape_fn by op generated code
# (generated by python_op_gen.cc).
# It is set outside ops.py to avoid a circular dependency.
_call_cpp_shape_fn = None
_call_cpp_shape_fn_and_require_op = None
def _set_call_cpp_shape_fn(call_cpp_shape_fn):
"""Sets default shape fns from passed common_shapes.call_cpp_shape_fn."""
global _call_cpp_shape_fn, _call_cpp_shape_fn_and_require_op
if _call_cpp_shape_fn:
return # already registered
def call_without_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=False)
_call_cpp_shape_fn = call_without_requiring
def call_with_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=True)
_call_cpp_shape_fn_and_require_op = call_with_requiring
class RegisterShape(object):
"""No longer used. Was: A decorator for registering a shape function.
Shape functions must now be registered via the SetShapeFn on the
original Op specification in C++.
"""
def __init__(self, op_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers "f" as the shape function for "op_type"."""
if f is None:
assert _call_cpp_shape_fn
# None is a special "weak" value that provides a default shape function,
# and can be overridden by a non-None registration.
try:
_default_shape_function_registry.register(_call_cpp_shape_fn,
self._op_type)
except KeyError:
# Ignore duplicate registrations of the weak value. This can
# occur if the op library input to wrapper generation
# inadvertently links in one or more of the standard op
# libraries.
pass
else:
_shape_registry.register(f, self._op_type)
return f
# TODO(b/74620627): remove when _USE_C_SHAPES is removed
def _set_shape_and_handle_data_for_outputs_c_api(op):
"""Set shapes and resource handle data using info from the C API."""
assert not _USE_C_SHAPES
for output in op.outputs:
output._shape_val = output._c_api_shape()
# Set the resource handle data for compatibility with the Python shape
# inference code.
serialized = c_api.GetResourceHandleShapeAndType(op._graph._c_graph,
output._as_tf_output())
if serialized:
output._handle_data = (
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData
.FromString(compat.as_bytes(serialized)))
else:
output._handle_data = None
# TODO(b/74620627): remove when _USE_C_SHAPES is removed
def set_shape_and_handle_data_for_outputs(op):
"""Set the shapes and resource handle data for op's outputs.
When _USE_C_SHAPES = False, this is lazily called when a tensor's shape is
first requested. Usually this should work automatically, but some edge cases
may require manually calling this first to make sure Tensor._shape_val and
Tensor._handle_data are set (e.g. manually overriding _handle_data, copying a
Tensor).
"""
if _USE_C_SHAPES: return
if op.graph._is_function(op.type):
for output in op.outputs:
output._shape_val = tensor_shape.unknown_shape()
return
try:
shape_func = _shape_registry.lookup(op.type)
except LookupError:
try:
shape_func = _default_shape_function_registry.lookup(op.type)
except LookupError:
shape_func = _call_cpp_shape_fn_and_require_op
shapes = shape_func(op)
if shapes is None:
raise RuntimeError(
"Shape function for op %s did not return any shapes" % op)
elif isinstance(shapes, dict):
# Returned by call_cpp_shape_fn
shapes_dict = shapes
shapes = shapes_dict["shapes"]
handle_datas = shapes_dict["handle_data"]
for output, handle_data in zip(op.outputs, handle_datas):
# Don't override any existing handle data that may have been manually set.
# pylint: disable=protected-access
if output._handle_data is None:
output._handle_data = handle_data
# pylint: enable=protected-access
if len(op.outputs) != len(shapes):
raise RuntimeError(
"Shape function for op %s returned %d shapes but expected %d %s %s" %
(op, len(shapes), len(op.outputs), shape_func.__name__, str(shapes)))
for output, s in zip(op.outputs, shapes):
output._shape_val = tensor_shape.unknown_shape()
output._shape_val = output._shape_val.merge_with(s)
class OpStats(object):
"""A holder for statistics about an operator.
This class holds information about the resource requirements for an op,
including the size of its weight parameters on-disk and how many FLOPS it
requires to execute forward inference.
If you define a new operation, you can create a function that will return a
set of information about its usage of the CPU and disk space when serialized.
The function itself takes a Graph object that's been set up so you can call
methods like get_tensor_by_name to help calculate the results, and a NodeDef
argument.
"""
def __init__(self, statistic_type, value=None):
"""Sets up the initial placeholders for the statistics."""
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self, other):
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s." %
(self.statistic_type, other.statistic_type))
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value
return self
_stats_registry = registry.Registry("statistical functions")
class RegisterStatistics(object):
"""A decorator for registering the statistics function for an op type.
This decorator can be defined for an op type so that it gives a
report on the resources used by an instance of an operator, in the
form of an OpStats object.
Well-known types of statistics include these so far:
- flops: When running a graph, the bulk of the computation happens doing
numerical calculations like matrix multiplications. This type allows a node
to return how many floating-point operations it takes to complete. The
total number of FLOPs for a graph is a good guide to its expected latency.
You can add your own statistics just by picking a new type string, registering
functions for the ops you care about, and then calling get_stats_for_node_def.
If a statistic for an op is registered multiple times, a KeyError will be
raised.
Since the statistics is counted on a per-op basis. It is not suitable for
model parameters (capacity), which is expected to be counted only once, even
if it is shared by multiple ops. (e.g. RNN)
For example, you can define a new metric called doohickey for a Foo operation
by placing this in your code:
```python
@ops.RegisterStatistics("Foo", "doohickey")
def _calc_foo_bojangles(unused_graph, unused_node_def):
return ops.OpStats("doohickey", 20)
```
Then in client code you can retrieve the value by making this call:
```python
doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey")
```
If the NodeDef is for an op with a registered doohickey function, you'll get
back the calculated amount in doohickey.value, or None if it's not defined.
"""
def __init__(self, op_type, statistic_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string.")
if "," in op_type:
raise TypeError("op_type must not contain a comma.")
self._op_type = op_type
if not isinstance(statistic_type, six.string_types):
raise TypeError("statistic_type must be a string.")
if "," in statistic_type:
raise TypeError("statistic_type must not contain a comma.")
self._statistic_type = statistic_type
def __call__(self, f):
"""Registers "f" as the statistics function for "op_type"."""
_stats_registry.register(f, self._op_type + "," + self._statistic_type)
return f
def get_stats_for_node_def(graph, node, statistic_type):
"""Looks up the node's statistics function in the registry and calls it.
This function takes a Graph object and a NodeDef from a GraphDef, and if
there's an associated statistics method, calls it and returns a result. If no
function has been registered for the particular node type, it returns an empty
statistics object.
Args:
graph: A Graph object that's been set up with the node's graph.
node: A NodeDef describing the operator.
statistic_type: A string identifying the statistic we're interested in.
Returns:
An OpStats object containing information about resource usage.
"""
try:
stats_func = _stats_registry.lookup(node.op + "," + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
def _name_from_scope_name(name):
"""Returns the name of an op given the name of its scope.
Args:
name: the name of the scope.
Returns:
the name of the op (equal to scope name minus any trailing slash).
"""
return name[:-1] if (name and name[-1] == "/") else name
_MUTATION_LOCK_GROUP = 0
_SESSION_RUN_LOCK_GROUP = 1
@tf_export("Graph")
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
A `Graph` contains a set of
`tf.Operation` objects,
which represent units of computation; and
`tf.Tensor` objects, which represent
the units of data that flow between operations.
A default `Graph` is always registered, and accessible by calling
`tf.get_default_graph`.
To add an operation to the default graph, simply call one of the functions
that defines a new `Operation`:
```python
c = tf.constant(4.0)
assert c.graph is tf.get_default_graph()
```
Another typical usage involves the
`tf.Graph.as_default`
context manager, which overrides the current default graph for the
lifetime of the context:
```python
g = tf.Graph()
with g.as_default():
# Define operations and tensors in `g`.
c = tf.constant(30.0)
assert c.graph is g
```
Important note: This class *is not* thread-safe for graph construction. All
operations should be created from a single thread, or external
synchronization must be provided. Unless otherwise specified, all methods
are not thread-safe.
A `Graph` instance supports an arbitrary number of "collections"
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
`tf.GraphKeys.GLOBAL_VARIABLES`) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
def __init__(self):
"""Creates a new, empty Graph."""
# Protects core state that can be returned via public accessors.
# Thread-safety is provided on a best-effort basis to support buggy
# programs, and is not guaranteed by the public `tf.Graph` API.
#
# NOTE(mrry): This does not protect the various stacks. A warning will
# be reported if these are used from multiple threads
self._lock = threading.RLock()
# The group lock synchronizes Session.run calls with methods that create
# and mutate ops (e.g. Graph.create_op()). This synchronization is
# necessary because it's illegal to modify an operation after it's been run.
# The group lock allows any number of threads to mutate ops at the same time
# but if any modification is going on, all Session.run calls have to wait.
# Similarly, if one or more Session.run calls are going on, all mutate ops
# have to wait until all Session.run calls have finished.
self._group_lock = lock_util.GroupLock(num_groups=2)
self._nodes_by_id = dict() # GUARDED_BY(self._lock)
self._next_id_counter = 0 # GUARDED_BY(self._lock)
self._nodes_by_name = dict() # GUARDED_BY(self._lock)
self._version = 0 # GUARDED_BY(self._lock)
# Maps a name used in the graph to the next id to use for that name.
self._names_in_use = {}
self._stack_state_is_thread_local = False
self._thread_local = threading.local()
# Functions that will be applied to choose a device if none is specified.
# After switch_to_thread_local(), self._thread_local._device_function_stack
# is used instead.
self._graph_device_function_stack = traceable_stack.TraceableStack()
# Default original_op applied to new ops.
self._default_original_op = None
# Current control flow context. It could be either CondContext or
# WhileContext defined in ops/control_flow_ops.py
self._control_flow_context = None
# A new node will depend of the union of all of the nodes in the stack.
# After switch_to_thread_local(),
# self._thread_local._control_dependencies_stack is used instead.
self._graph_control_dependencies_stack = []
# Arbitrary collections of objects.
self._collections = {}
# The graph-level random seed
self._seed = None
# A dictionary of attributes that should be applied to all ops.
self._attr_scope_map = {}
# A map from op type to the kernel label that should be used.
self._op_to_kernel_label_map = {}
# A map from op type to an alternative op type that should be used when
# computing gradients.
self._gradient_override_map = {}
# True if the graph is considered "finalized". In that case no
# new operations can be added.
self._finalized = False
# Functions defined in the graph
self._functions = collections.OrderedDict()
# Default GraphDef versions
self._graph_def_versions = versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)
self._building_function = False
# Stack of colocate_with ops. After switch_to_thread_local(),
# self._thread_local._colocation_stack is used instead.
self._graph_colocation_stack = traceable_stack.TraceableStack()
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = set()
# Set of operations that are dangerous to fetch!
self._unfetchable_ops = set()
# A map of tensor handle placeholder to tensor dtype.
self._handle_feeders = {}
# A map from tensor handle to its read op.
self._handle_readers = {}
# A map from tensor handle to its move op.
self._handle_movers = {}
# A map from tensor handle to its delete op.
self._handle_deleters = {}
# Allow optimizers and other objects to pseudo-uniquely key graphs (this key
# will be shared when defining function graphs, for example, so optimizers
# being called inside function definitions behave as if they were seeing the
# actual outside graph).
self._graph_key = "grap-key-%d/" % (uid(),)
# A string with the last reduction method passed to
# losses.compute_weighted_loss(), or None.
self._last_loss_reduction = None
self._container = ""
self._registered_ops = op_def_registry.get_registered_ops()
# TODO(skyewm): fold as much of the above as possible into the C
# implementation
if self._use_c_api_hack():
self._scoped_c_graph = c_api_util.ScopedTFGraph()
# The C API requires all ops to have shape functions. Disable this
# requirement (many custom ops do not have shape functions, and we don't
# want to break these existing cases).
c_api.SetRequireShapeInferenceFns(self._c_graph, False)
else:
self._scoped_c_graph = None
# TODO(apassos) remove once the C API is used by default.
def _use_c_api_hack(self):
"""Temporary hack; can be overridden to force C API usage."""
return _USE_C_API
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@tf_contextlib.contextmanager
def _variable_creator_scope(self, creator):
# This step makes a copy of the existing stack, and it also initializes
# self._thread_local._variable_creator_stack if it doesn't exist yet.
old = list(self._variable_creator_stack)
self._thread_local._variable_creator_stack.append(creator) # pylint: disable=protected-access
try:
yield
finally:
self._thread_local._variable_creator_stack = old # pylint: disable=protected-access
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@property
def _variable_creator_stack(self):
if not hasattr(self._thread_local, "_variable_creator_stack"):
self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access
return list(self._thread_local._variable_creator_stack) # pylint: disable=protected-access
@_variable_creator_stack.setter
def _variable_creator_stack(self, variable_creator_stack):
self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access
def _check_not_finalized(self):
"""Check if the graph is finalized.
Raises:
RuntimeError: If the graph finalized.
"""
if self._finalized:
raise RuntimeError("Graph is finalized and cannot be modified.")
def _add_op(self, op):
"""Adds 'op' to the graph.
Args:
op: the Operator or Tensor to add.
Raises:
TypeError: if op is not an Operation or Tensor.
ValueError: if the op.name or op._id are already used.
"""
self._check_not_finalized()
if not isinstance(op, (Tensor, Operation)):
raise TypeError("op must be a Tensor or Operation: %s" % op)
with self._lock:
# pylint: disable=protected-access
if op._id in self._nodes_by_id:
raise ValueError("cannot add an op with id %d as it already "
"exists in the graph" % op._id)
if op.name in self._nodes_by_name:
raise ValueError("cannot add op with name %s as that name "
"is already used" % op.name)
self._nodes_by_id[op._id] = op
self._nodes_by_name[op.name] = op
self._version = max(self._version, op._id)
# pylint: enable=protected-access
@property
def _c_graph(self):
if self._scoped_c_graph:
return self._scoped_c_graph.graph
return None
@property
def version(self):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
`tf.Graph.graph_def_versions`.
Returns:
An integer version that increases as ops are added to the graph.
"""
if self._finalized:
return self._version
with self._lock:
return self._version
@property
def graph_def_versions(self):
# pylint: disable=line-too-long
"""The GraphDef version information of this graph.
For details on the meaning of each version, see
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
Returns:
A `VersionDef`.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
c_api.TF_GraphVersions(self._c_graph, buf)
data = c_api.TF_GetBuffer(buf)
version_def = versions_pb2.VersionDef()
version_def.ParseFromString(compat.as_bytes(data))
return version_def
@property
def seed(self):
"""The graph-level random seed of this graph."""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def finalized(self):
"""True if this graph has been finalized."""
return self._finalized
def finalize(self):
"""Finalizes this graph, making it read-only.
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
when using a `tf.train.QueueRunner`.
"""
self._finalized = True
def _unsafe_unfinalize(self):
"""Opposite of `finalize`. Internal interface.
NOTE: Unfinalizing a graph could have negative impact on performance,
especially in a multi-threaded environment. Unfinalizing a graph
when it is in use by a Session may lead to undefined behavior. Ensure
that all sessions using a graph are closed before calling this method.
"""
self._finalized = False
def _get_control_flow_context(self):
"""Returns the current control flow context.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
def _copy_functions_to_graph_def(self, graph_def, starting_bytesize):
"""If this graph contains functions, copy them to `graph_def`."""
bytesize = starting_bytesize
for f in self._functions.values():
bytesize += f.definition.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph_def.library.function.extend([f.definition])
if f.grad_func_name:
grad_def = function_pb2.GradientDef()
grad_def.function_name = f.name
grad_def.gradient_func = f.grad_func_name
graph_def.library.gradient.extend([grad_def])
def _as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef`
containing only the nodes that were added to this graph since
its `version` property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each
node with the inferred shapes of each of its outputs.
Returns:
A tuple containing a
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and the version of the graph to which that
`GraphDef` corresponds.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
with self._lock:
with c_api_util.tf_buffer() as buf:
c_api.TF_GraphToGraphDef(self._c_graph, buf)
data = c_api.TF_GetBuffer(buf)
graph = graph_pb2.GraphDef()
graph.ParseFromString(compat.as_bytes(data))
# Strip the experimental library field iff it's empty.
if not graph.library.function:
graph.ClearField("library")
if add_shapes:
for node in graph.node:
op = self._nodes_by_name[node.name]
if op.outputs:
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
return graph, self._version
def as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef`
containing only the nodes that were added to this graph since
its `version` property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each
node with the inferred shapes of each of its outputs.
Returns:
A
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
result, _ = self._as_graph_def(from_version, add_shapes)
return result
def _is_function(self, name):
"""Tests whether 'name' is registered in this graph's function library.
Args:
name: string op name.
Returns:
bool indicating whether or not 'name' is registered in function library.
"""
return name in self._functions
def _get_function(self, name):
"""Returns the function definition for 'name'.
Args:
name: string function name.
Returns:
The function def proto.
"""
return self._functions.get(name, None)
def _add_function(self, function):
"""Adds a function to the graph.
After the function has been added, you can call to the function by
passing the function name in place of an op name to
`Graph.create_op()`.
Args:
function: A `_DefinedFunction` object.
Raises:
ValueError: if another function is defined with the same name.
"""
name = function.name
# Sanity checks on gradient definition.
if (function.grad_func_name is not None) and (function.python_grad_func is
not None):
raise ValueError("Gradient defined twice for function %s" % name)
# Add function to graph
# pylint: disable=protected-access
# Handle functions created without using the C API. TODO(apassos,skyewm)
# remove this when all functions are generated using the C API by default
# as this will be unnecessary.
if not function._c_func:
serialized = function.definition.SerializeToString()
c_func = c_api.TF_FunctionImportFunctionDef(serialized)
function._c_func = c_api_util.ScopedTFFunction(c_func)
gradient = (function._grad_func._c_func.func if function._grad_func
else None)
c_api.TF_GraphCopyFunction(self._c_graph, function._c_func.func, gradient)
# pylint: enable=protected-access
self._functions[name] = function
# Need a new-enough consumer to support the functions we add to the graph.
if self._graph_def_versions.min_consumer < 12:
self._graph_def_versions.min_consumer = 12
@property
def building_function(self):
"""Returns True iff this graph represents a function."""
return self._building_function
# Helper functions to create operations.
@deprecated_args(None,
"Shapes are always computed; don't use the compute_shapes "
"as it has no effect.", "compute_shapes")
def create_op(
self,
op_type,
inputs,
dtypes, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Creates an `Operation` in this graph.
This is a low-level interface for creating an `Operation`. Most
programs will not call this method directly, and instead use the
Python op constructors, such as `tf.constant()`, which add ops to
the default graph.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: A list of `DType` objects that will be the types of the tensors
that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Raises:
TypeError: if any of the inputs is not a `Tensor`.
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
del compute_shapes
self._check_not_finalized()
for idx, a in enumerate(inputs):
if not isinstance(a, Tensor):
raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
if name is None:
name = op_type
# If a names ends with a '/' it is a "name scope" and we use it as-is,
# after removing the trailing '/'.
if name and name[-1] == "/":
name = _name_from_scope_name(name)
else:
name = self.unique_name(name)
node_def = _NodeDef(op_type, name, device=None, attrs=attrs)
input_ops = set([t.op for t in inputs])
control_inputs = self._control_dependencies_for_inputs(input_ops)
# _create_op_helper mutates the new Operation. `_mutation_lock` ensures a
# Session.run call cannot occur between creating and mutating the op.
with self._mutation_lock():
ret = Operation(
node_def,
self,
inputs=inputs,
output_types=dtypes,
control_inputs=control_inputs,
input_types=input_types,
original_op=self._default_original_op,
op_def=op_def)
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_from_tf_operation(self, c_op, compute_device=True):
"""Creates an `Operation` in this graph from the supplied TF_Operation.
This method is like create_op() except the new Operation is constructed
using `c_op`. The returned Operation will have `c_op` as its _c_op
field. This is used to create Operation objects around TF_Operations created
indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).
This function does not call Operation._control_flow_post_processing or
Graph._control_dependencies_for_inputs (since the inputs may not be
available yet). The caller is responsible for calling these methods.
Args:
c_op: a wrapped TF_Operation
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
ret = Operation(c_op, self)
# If a name_scope was created with ret.name but no nodes were created in it,
# the name will still appear in _names_in_use even though the name hasn't
# been used. This is ok, just leave _names_in_use as-is in this case.
# TODO(skyewm): make the C API guarantee no name conflicts.
name_key = ret.name.lower()
if name_key not in self._names_in_use:
self._names_in_use[name_key] = 1
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _make_colocation_conflict_message(self, op, colocation_op):
"""Return detailed error message about device conflict due to colocation."""
# Example error message:
# Tried to colocate op 'a' (defined at file1.py:149) having device
# '/device:GPU:0' with op 'b' (defined at file2:96) which had an
# incompatible device '/device:CPU:0'.
#
# No node-device colocations were active during op 'a' creation.
# Device assignments active during op 'a' creation:
# with tf.device(/device:GPU:0): file1.py:148>
#
# Node-device colocations active during op 'b' creation:
# with tf.colocate_with(a): file2.py:93>
# Device assignments active during op 'b' creation:
# with tf.device(/cpu:0): file2.py:94
op_info = error_interpolation.compute_field_dict(op)
coloc_op_info = error_interpolation.compute_field_dict(colocation_op)
msg = ("Tried to colocate op '{op_name}'{op_loc} having device '{op_dev}' "
"with op '{coloc_op_name}'{coloc_op_loc} which had an incompatible "
"device '{coloc_op_dev}'.\n\n{op_summary}\n\n{coloc_op_summary}"
.format(op_name=op.name,
op_loc=op_info["defined_at"],
op_dev=op.device,
op_summary=op_info["devs_and_colocs"],
coloc_op_name=colocation_op.name,
coloc_op_loc=coloc_op_info["defined_at"],
coloc_op_dev=colocation_op.device,
coloc_op_summary=coloc_op_info["devs_and_colocs"]))
return msg
def _create_op_helper(self, op, compute_device=True):
"""Common logic for creating an op in this graph."""
# Apply any additional attributes requested. Do not overwrite any existing
# attributes.
for key, value in self._attr_scope_map.items():
try:
op.get_attr(key)
except ValueError:
if callable(value):
value = value(op.node_def)
if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):
raise TypeError(
"Callable for scope map key '%s' must return either None or "
"an AttrValue protocol buffer; but it returned: %s" % (key,
value))
if value:
op._set_attr(key, value) # pylint: disable=protected-access
# Apply a kernel label if one has been specified for this op type.
try:
kernel_label = self._op_to_kernel_label_map[op.type]
op._set_attr("_kernel", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))
except KeyError:
pass
# Apply the overriding op type for gradients if one has been specified for
# this op type.
try:
mapped_op_type = self._gradient_override_map[op.type]
op._set_attr("_gradient_op_type", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))
except KeyError:
pass
self._record_op_seen_by_control_dependencies(op)
if compute_device:
self._apply_device_functions(op)
# Snapshot the colocation stack metadata before we might generate error
# messages using it. Note that this snapshot depends on the actual stack
# and is independent of the op's _class attribute.
# pylint: disable=protected-access
op._colocation_code_locations = self._snapshot_colocation_stack_metadata()
# pylint: enable=protected-access
if self._colocation_stack:
all_colocation_groups = []
for colocation_op in self._colocation_stack.peek_objs():
all_colocation_groups.extend(colocation_op.colocation_groups())
if colocation_op.device:
if (op.device and pydev.canonical_name(op.device) !=
pydev.canonical_name(colocation_op.device)):
msg = self._make_colocation_conflict_message(op, colocation_op)
logging.warning(msg)
else:
op._set_device(colocation_op.device) # pylint: disable=protected-access
all_colocation_groups = sorted(set(all_colocation_groups))
# pylint: disable=protected-access
op._set_attr("_class", attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))
# pylint: enable=protected-access
# Sets "container" attribute if
# (1) self._container is not None
# (2) "is_stateful" is set in OpDef
# (3) "container" attribute is in OpDef
# (4) "container" attribute is None
if self._container and op.op_def.is_stateful:
try:
container_attr = op.get_attr("container")
except ValueError:
# "container" attribute is not in OpDef
pass
else:
if not container_attr:
op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access
s=compat.as_bytes(self._container)))
def _add_new_tf_operations(self, compute_devices=True):
"""Creates `Operations` in this graph for any new TF_Operations.
This is useful for when TF_Operations are indirectly created by the C API
outside of the Operation constructor (e.g. by TF_ImportGraphDef,
TF_FinishWhile). This ensures there are corresponding Operations for all
TF_Operations in the underlying TF_Graph.
Args:
compute_devices: (Optional.) If True, device functions will be executed
to compute the device properties of each new Operation.
Returns:
A list of the new `Operation` objects.
"""
# Create all Operation objects before accessing their inputs since an op may
# be created before its inputs.
new_ops = [
self._create_op_from_tf_operation(c_op, compute_device=compute_devices)
for c_op in c_api_util.new_tf_operations(self)
]
# pylint: disable=protected-access
for op in new_ops:
# Operations created by the C API always retrieve shapes from the C API so
# we preserve the shapes of ops created in import_graph_def (from the
# "_output_shapes" attr of the imported NodeDef).
if not _USE_C_SHAPES:
_set_shape_and_handle_data_for_outputs_c_api(op)
new_control_inputs = self._control_dependencies_for_inputs(op.inputs)
op._add_control_inputs(new_control_inputs)
op._control_flow_post_processing()
# pylint: enable=protected-access
return new_ops
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
"""Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
This function validates that `obj` represents an element of this
graph, and gives an informative error message if it is not.
This function is the canonical way to get/validate an object of
one of the allowed types from an external argument reference in the
Session API.
This method may be called concurrently from multiple threads.
Args:
obj: A `Tensor`, an `Operation`, or the name of a tensor or operation.
Can also be any object with an `_as_graph_element()` method that returns
a value of one of these types.
allow_tensor: If true, `obj` may refer to a `Tensor`.
allow_operation: If true, `obj` may refer to an `Operation`.
Returns:
The `Tensor` or `Operation` in the Graph corresponding to `obj`.
Raises:
TypeError: If `obj` is not a type we support attempting to convert
to types.
ValueError: If `obj` is of an appropriate type but invalid. For
example, an invalid string.
KeyError: If `obj` is not an object in the graph.
"""
if self._finalized:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
with self._lock:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
"""See `Graph.as_graph_element()` for details."""
# The vast majority of this function is figuring
# out what an API user might be doing wrong, so
# that we can give helpful error messages.
#
# Ideally, it would be nice to split it up, but we
# need context to generate nice error messages.
if allow_tensor and allow_operation:
types_str = "Tensor or Operation"
elif allow_tensor:
types_str = "Tensor"
elif allow_operation:
types_str = "Operation"
else:
raise ValueError("allow_tensor and allow_operation can't both be False.")
temp_obj = _as_graph_element(obj)
if temp_obj is not None:
obj = temp_obj
# If obj appears to be a name...
if isinstance(obj, compat.bytes_or_text_types):
name = compat.as_str(obj)
if ":" in name and allow_tensor:
# Looks like a Tensor name and can be a Tensor.
try:
op_name, out_n = name.split(":")
out_n = int(out_n)
except:
raise ValueError("The name %s looks a like a Tensor name, but is "
"not a valid one. Tensor names must be of the "
"form \"<op_name>:<output_index>\"." % repr(name))
if op_name in self._nodes_by_name:
op = self._nodes_by_name[op_name]
else:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, does not exist in the "
"graph." % (repr(name), repr(op_name)))
try:
return op.outputs[out_n]
except:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, exists but only has "
"%s outputs." % (repr(name), repr(op_name),
len(op.outputs)))
elif ":" in name and not allow_tensor:
# Looks like a Tensor name but can't be a Tensor.
raise ValueError("Name %s appears to refer to a Tensor, not a %s." %
(repr(name), types_str))
elif ":" not in name and allow_operation:
# Looks like an Operation name and can be an Operation.
if name not in self._nodes_by_name:
raise KeyError("The name %s refers to an Operation not in the "
"graph." % repr(name))
return self._nodes_by_name[name]
elif ":" not in name and not allow_operation:
# Looks like an Operation name but can't be an Operation.
if name in self._nodes_by_name:
# Yep, it's an Operation name
err_msg = ("The name %s refers to an Operation, not a %s." %
(repr(name), types_str))
else:
err_msg = ("The name %s looks like an (invalid) Operation name, "
"not a %s." % (repr(name), types_str))
err_msg += (" Tensor names must be of the form "
"\"<op_name>:<output_index>\".")
raise ValueError(err_msg)
elif isinstance(obj, Tensor) and allow_tensor:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Tensor %s is not an element of this graph." % obj)
return obj
elif isinstance(obj, Operation) and allow_operation:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Operation %s is not an element of this graph." % obj)
return obj
else:
# We give up!
raise TypeError("Can not convert a %s into a %s." % (type(obj).__name__,
types_str))
def get_operations(self):
"""Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
"""
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
def get_operation_by_name(self, name):
"""Returns the `Operation` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to an operation in this graph.
"""
if not isinstance(name, six.string_types):
raise TypeError("Operation names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def _get_operation_by_name_unsafe(self, name):
"""Returns the `Operation` with the given `name`.
This is a internal unsafe version of get_operation_by_name. It skips many
checks and does not have user friedly error messages but runs considerably
faster. This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
KeyError: If `name` does not correspond to an operation in this graph.
"""
if self._finalized:
return self._nodes_by_name[name]
with self._lock:
return self._nodes_by_name[name]
def _get_operation_by_tf_operation(self, tf_oper):
op_name = c_api.TF_OperationName(tf_oper)
return self._get_operation_by_name_unsafe(op_name)
def get_tensor_by_name(self, name):
"""Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Tensor` to return.
Returns:
The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to a tensor in this graph.
"""
# Names should be strings.
if not isinstance(name, six.string_types):
raise TypeError("Tensor names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
def _get_tensor_by_tf_output(self, tf_output):
"""Returns the `Tensor` representing `tf_output`.
Note that there is only one such `Tensor`, i.e. multiple calls to this
function with the same TF_Output value will always return the same `Tensor`
object.
Args:
tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`).
Returns:
The `Tensor` that represents `tf_output`.
"""
op = self._get_operation_by_tf_operation(tf_output.oper)
return op.outputs[tf_output.index]
def _next_id(self):
"""Id for next Operation instance. Also increments the internal id."""
self._check_not_finalized()
with self._lock:
self._next_id_counter += 1
return self._next_id_counter
@property
def _last_id(self):
return self._next_id_counter
def _get_op_def(self, type): # pylint: disable=redefined-builtin
"""Returns the `OpDef` proto for `type`. `type` is a string."""
with c_api_util.tf_buffer() as buf:
# pylint: disable=protected-access
c_api.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type), buf)
# pylint: enable=protected-access
data = c_api.TF_GetBuffer(buf)
op_def = op_def_pb2.OpDef()
op_def.ParseFromString(compat.as_bytes(data))
return op_def
def as_default(self):
"""Returns a context manager that makes this `Graph` the default graph.
This method should be used if you want to create multiple graphs
in the same process. For convenience, a global default graph is
provided, and all ops will be added to this graph if you do not
create a new graph explicitly.
Use this method with the `with` keyword to specify that ops created within
the scope of a block should be added to this graph. In this case, once
the scope of the `with` is exited, the previous default graph is set again
as default. There is a stack, so it's ok to have multiple nested levels
of `as_default` calls.
The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
The following code examples are equivalent:
```python
# 1. Using Graph.as_default():
g = tf.Graph()
with g.as_default():
c = tf.constant(5.0)
assert c.graph is g
# 2. Constructing and making default:
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
assert c.graph is g
```
If eager execution is enabled ops created under this context manager will be
added to the graph instead of executed eagerly.
Returns:
A context manager for using this graph as the default graph.
"""
return _default_graph_stack.get_controller(self)
@property
def collections(self):
"""Returns the names of the collections known to this graph."""
return list(self._collections)
def add_to_collection(self, name, value):
"""Stores `value` in the collection with the given `name`.
Note that collections are not sets, so it is possible to add a value to
a collection several times.
Args:
name: The key for the collection. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection.
""" # pylint: disable=g-doc-exception
self._check_not_finalized()
with self._lock:
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
"""Stores `value` in the collections given by `names`.
Note that collections are not sets, so it is possible to add a value to
a collection several times. This function makes sure that duplicates in
`names` are ignored, but it will not check for pre-existing membership of
`value` in any of the collections in `names`.
`names` can be any iterable, but if `names` is a string, it is treated as a
single collection name.
Args:
names: The keys for the collections to add to. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
# Make sure names are unique, but treat strings as a single collection name
names = (names,) if isinstance(names, six.string_types) else set(names)
for name in names:
self.add_to_collection(name, value)
def get_collection_ref(self, name):
"""Returns a list of values in the collection with the given `name`.
If the collection exists, this returns the list itself, which can
be modified in place to change the collection. If the collection does
not exist, it is created as an empty list and the list is returned.
This is different from `get_collection()` which always returns a copy of
the collection list if it exists and never creates an empty collection.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection.
""" # pylint: disable=g-doc-exception
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
"""Returns a list of values in the collection with the given `name`.
This is different from `get_collection_ref()` which always returns the
actual collection list if it exists in that it returns a new list each time
it is called.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
""" # pylint: disable=g-doc-exception
with self._lock:
collection = self._collections.get(name, None)
if collection is None:
return []
if scope is None:
return list(collection)
else:
c = []
regex = re.compile(scope)
for item in collection:
if hasattr(item, "name") and regex.match(item.name):
c.append(item)
return c
def get_all_collection_keys(self):
"""Returns a list of collections used in this graph."""
with self._lock:
return [x for x in self._collections if isinstance(x, six.string_types)]
def clear_collection(self, name):
"""Clears all values in a collection.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
"""
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
@tf_contextlib.contextmanager
def _original_op(self, op):
"""Python 'with' handler to help annotate ops with their originator.
An op may have an 'original_op' property that indicates the op on which
it was based. For example a replica op is based on the op that was
replicated and a gradient op is based on the op that was differentiated.
All ops created in the scope of this 'with' handler will have
the given 'op' as their original op.
Args:
op: The Operation that all ops created in this scope will have as their
original op.
Yields:
Nothing.
"""
old_original_op = self._default_original_op
self._default_original_op = op
try:
yield
finally:
self._default_original_op = old_original_op
@property
def _name_stack(self):
# This may be called from a thread where name_stack doesn't yet exist.
if not hasattr(self._thread_local, "_name_stack"):
self._thread_local._name_stack = ""
return self._thread_local._name_stack
@_name_stack.setter
def _name_stack(self, name_stack):
self._thread_local._name_stack = name_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_contextlib.contextmanager
def name_scope(self, name):
r"""Returns a context manager that creates hierarchical names for operations.
A graph maintains a stack of name scopes. A `with name_scope(...):`
statement pushes a new name onto the stack for the lifetime of the context.
The `name` argument will be interpreted as follows:
* A string (not ending with '/') will create a new name scope, in which
`name` is appended to the prefix of all operations created in the
context. If `name` has been used before, it will be made unique by
calling `self.unique_name(name)`.
* A scope previously captured from a `with g.name_scope(...) as
scope:` statement will be treated as an "absolute" name scope, which
makes it possible to re-enter existing scopes.
* A value of `None` or the empty string will reset the current name scope
to the top-level (empty) name scope.
For example:
```python
with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c.op.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.op.name == "c_1"
# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.op.name == "nested/c"
# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.op.name == "nested/inner/c"
# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.op.name == "nested/inner_1/c"
# Treats `scope` as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.op.name == "nested/d"
with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.op.name == "e"
```
The name of the scope itself can be captured by `with
g.name_scope(...) as scope:`, which stores the name of the scope
in the variable `scope`. This value can be used to name an
operation that represents the overall result of executing the ops
in a scope. For example:
```python
inputs = tf.constant(...)
with g.name_scope('my_layer') as scope:
weights = tf.Variable(..., name="weights")
biases = tf.Variable(..., name="biases")
affine = tf.matmul(inputs, weights) + biases
output = tf.nn.relu(affine, name=scope)
```
NOTE: This constructor validates the given `name`. Valid scope
names match one of the following regular expressions:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root)
[A-Za-z0-9_.\\-/]* (for other scopes)
Args:
name: A name for the scope.
Returns:
A context manager that installs `name` as a new name scope.
Raises:
ValueError: If `name` is not a valid scope name, according to the rules
above.
"""
if name:
if isinstance(name, compat.bytes_or_text_types):
name = compat.as_str(name)
if self._name_stack:
# Scopes created in a nested scope may have initial characters
# that are illegal as the initial character of an op name
# (viz. '-', '\', '/', and '_').
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
# Scopes created in the root must match the more restrictive
# op name regex, which constrains the initial character.
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
old_stack = self._name_stack
if not name: # Both for name=None and name="" we re-set to empty scope.
new_stack = None
elif name[-1] == "/":
new_stack = _name_from_scope_name(name)
else:
new_stack = self.unique_name(name)
self._name_stack = new_stack
try:
yield "" if new_stack is None else new_stack + "/"
finally:
self._name_stack = old_stack
# pylint: enable=g-doc-return-or-yield,line-too-long
def unique_name(self, name, mark_as_used=True):
"""Return a unique operation name for `name`.
Note: You rarely need to call `unique_name()` directly. Most of
the time you just need to create `with g.name_scope()` blocks to
generate structured names.
`unique_name` is used to generate structured names, separated by
`"/"`, to help identify operations when debugging a graph.
Operation names are displayed in error messages reported by the
TensorFlow runtime, and in various visualization tools such as
TensorBoard.
If `mark_as_used` is set to `True`, which is the default, a new
unique name is created and marked as in use. If it's set to `False`,
the unique name is returned without actually being marked as used.
This is useful when the caller simply wants to know what the name
to be created will be.
Args:
name: The name for an operation.
mark_as_used: Whether to mark this name as being used.
Returns:
A string to be passed to `create_op()` that will be used
to name the operation being created.
"""
if self._name_stack:
name = self._name_stack + "/" + name
# For the sake of checking for names in use, we treat names as case
# insensitive (e.g. foo = Foo).
name_key = name.lower()
i = self._names_in_use.get(name_key, 0)
# Increment the number for "name_key".
if mark_as_used:
self._names_in_use[name_key] = i + 1
if i > 0:
base_name_key = name_key
# Make sure the composed name key is not already used.
while name_key in self._names_in_use:
name_key = "%s_%d" % (base_name_key, i)
i += 1
# Mark the composed name_key as used in case someone wants
# to call unique_name("name_1").
if mark_as_used:
self._names_in_use[name_key] = 1
# Return the new name with the original capitalization of the given name.
name = "%s_%d" % (name, i-1)
return name
def get_name_scope(self):
"""Returns the current name scope.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_default_graph().get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
return self._name_stack
@tf_contextlib.contextmanager
def _colocate_with_for_gradient(self, op, gradient_uid,
ignore_existing=False):
with self.colocate_with(op, ignore_existing):
if gradient_uid is not None and self._control_flow_context is not None:
self._control_flow_context.EnterGradientColocation(op, gradient_uid)
try:
yield
finally:
self._control_flow_context.ExitGradientColocation(op, gradient_uid)
else:
yield
@tf_contextlib.contextmanager
def colocate_with(self, op, ignore_existing=False):
"""Returns a context manager that specifies an op to colocate with.
Note: this function is not for public use, only for internal libraries.
For example:
```python
a = tf.Variable([1.0])
with g.colocate_with(a):
b = tf.constant(1.0)
c = tf.add(a, b)
```
`b` and `c` will always be colocated with `a`, no matter where `a`
is eventually placed.
**NOTE** Using a colocation scope resets any existing device constraints.
If `op` is `None` then `ignore_existing` must be `True` and the new
scope resets all colocation and device constraints.
Args:
op: The op to colocate all created ops with, or `None`.
ignore_existing: If true, only applies colocation of this op within
the context, rather than applying all colocation properties
on the stack. If `op` is `None`, this value must be `True`.
Raises:
ValueError: if op is None but ignore_existing is False.
Yields:
A context manager that specifies the op with which to colocate
newly created ops.
"""
if op is None and not ignore_existing:
raise ValueError("Trying to reset colocation (op is None) but "
"ignore_existing is not True")
if op is not None and not isinstance(op, Operation):
# We always want to colocate with the reference op.
op = internal_convert_to_tensor_or_indexed_slices(op, as_ref=True).op
# By default, colocate_with resets the device function stack,
# since colocate_with is typically used in specific internal
# library functions where colocation is intended to be "stronger"
# than device functions.
#
# In the future, a caller may specify that device_functions win
# over colocation, in which case we can add support.
device_fn_tmp = self._device_function_stack
self._device_function_stack = traceable_stack.TraceableStack()
if ignore_existing:
current_stack = self._colocation_stack
self._colocation_stack = traceable_stack.TraceableStack()
if op is not None:
# offset refers to the stack frame used for storing code location.
# We use 4, the sum of 1 to use our caller's stack frame and 3
# to jump over layers of context managers above us.
self._colocation_stack.push_obj(op, offset=4)
try:
yield
finally:
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
self._colocation_stack.pop_obj()
# Reset the colocation stack if requested.
if ignore_existing:
self._colocation_stack = current_stack
def _add_device_to_stack(self, device_name_or_function, offset=0):
"""Add device to stack manually, separate from a context manager."""
total_offset = 1 + offset
spec = _UserDeviceSpec(device_name_or_function)
self._device_function_stack.push_obj(spec, offset=total_offset)
return spec
@tf_contextlib.contextmanager
def device(self, device_name_or_function):
# pylint: disable=line-too-long
"""Returns a context manager that specifies the default device to use.
The `device_name_or_function` argument may either be a device name
string, a device function, or None:
* If it is a device name string, all operations constructed in
this context will be assigned to the device with that name, unless
overridden by a nested `device()` context.
* If it is a function, it will be treated as a function from
Operation objects to device name strings, and invoked each time
a new Operation is created. The Operation will be assigned to
the device with the returned name.
* If it is None, all `device()` invocations from the enclosing context
will be ignored.
For information about the valid syntax of device name strings, see
the documentation in
[`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).
For example:
```python
with g.device('/device:GPU:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.
# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/device:GPU:0"
else:
return "/cpu:0"
with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.
```
**N.B.** The device scope may be overridden by op wrappers or
other library code. For example, a variable assignment op
`v.assign()` must be colocated with the `tf.Variable` `v`, and
incompatible device scopes will be ignored.
Args:
device_name_or_function: The device name or function to use in
the context.
Yields:
A context manager that specifies the default device to use for newly
created ops.
"""
self._add_device_to_stack(device_name_or_function, offset=2)
try:
yield
finally:
self._device_function_stack.pop_obj()
def _apply_device_functions(self, op):
"""Applies the current device function stack to the given operation."""
# Apply any device functions in LIFO order, so that the most recently
# pushed function has the first chance to apply a device to the op.
# We apply here because the result can depend on the Operation's
# signature, which is computed in the Operation constructor.
# pylint: disable=protected-access
for device_spec in self._device_function_stack.peek_objs():
if device_spec.function is None:
break
op._set_device(device_spec.function(op))
op._device_code_locations = self._snapshot_device_function_stack_metadata()
# pylint: enable=protected-access
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Stateful operations, such as variables and queues, can maintain their
states on devices so that they can be shared by multiple processes.
A resource container is a string name under which these stateful
operations are tracked. These resources can be released or cleared
with `tf.Session.reset()`.
For example:
```python
with g.container('experiment0'):
# All stateful Operations constructed in this context will be placed
# in resource container "experiment0".
v1 = tf.Variable([1.0])
v2 = tf.Variable([2.0])
with g.container("experiment1"):
# All stateful Operations constructed in this context will be
# placed in resource container "experiment1".
v3 = tf.Variable([3.0])
q1 = tf.FIFOQueue(10, tf.float32)
# All stateful Operations constructed in this context will be
# be created in the "experiment0".
v4 = tf.Variable([4.0])
q1 = tf.FIFOQueue(20, tf.float32)
with g.container(""):
# All stateful Operations constructed in this context will be
# be placed in the default resource container.
v5 = tf.Variable([5.0])
q3 = tf.FIFOQueue(30, tf.float32)
# Resets container "experiment0", after which the state of v1, v2, v4, q1
# will become undefined (such as uninitialized).
tf.Session.reset(target, ["experiment0"])
```
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
self._container = container_name
try:
yield self._container
finally:
self._container = original_container
# pylint: enable=g-doc-return-or-yield
class _ControlDependenciesController(object):
"""Context manager for `control_dependencies()`."""
def __init__(self, graph, control_inputs):
"""Create a new `_ControlDependenciesController`.
A `_ControlDependenciesController` is the context manager for
`with tf.control_dependencies()` blocks. These normally nest,
as described in the documentation for `control_dependencies()`.
The `control_inputs` argument list control dependencies that must be
added to the current set of control dependencies. Because of
uniquification the set can be empty even if the caller passed a list of
ops. The special value `None` indicates that we want to start a new
empty set of control dependencies instead of extending the current set.
In that case we also clear the current control flow context, which is an
additional mechanism to add control dependencies.
Args:
graph: The graph that this controller is managing.
control_inputs: List of ops to use as control inputs in addition
to the current control dependencies. None to indicate that
the dependencies should be cleared.
"""
self._graph = graph
if control_inputs is None:
self._control_inputs_val = []
self._new_stack = True
else:
self._control_inputs_val = control_inputs
self._new_stack = False
self._seen_nodes = set()
self._old_stack = None
self._old_control_flow_context = None
# pylint: disable=protected-access
def __enter__(self):
if self._new_stack:
# Clear the control_dependencies graph.
self._old_stack = self._graph._control_dependencies_stack
self._graph._control_dependencies_stack = []
# Clear the control_flow_context too.
self._old_control_flow_context = self._graph._get_control_flow_context()
self._graph._set_control_flow_context(None)
self._graph._push_control_dependencies_controller(self)
def __exit__(self, unused_type, unused_value, unused_traceback):
self._graph._pop_control_dependencies_controller(self)
if self._new_stack:
self._graph._control_dependencies_stack = self._old_stack
self._graph._set_control_flow_context(self._old_control_flow_context)
# pylint: enable=protected-access
@property
def control_inputs(self):
return self._control_inputs_val
def add_op(self, op):
self._seen_nodes.add(op)
def op_in_group(self, op):
return op in self._seen_nodes
def _push_control_dependencies_controller(self, controller):
self._control_dependencies_stack.append(controller)
def _pop_control_dependencies_controller(self, controller):
assert self._control_dependencies_stack[-1] is controller
self._control_dependencies_stack.pop()
def _current_control_dependencies(self):
ret = set()
for controller in self._control_dependencies_stack:
for op in controller.control_inputs:
ret.add(op)
return ret
def _control_dependencies_for_inputs(self, input_ops):
"""For an op that takes `input_ops` as inputs, compute control inputs.
The returned control dependencies should yield an execution that
is equivalent to adding all control inputs in
self._control_dependencies_stack to a newly created op. However,
this function attempts to prune the returned control dependencies
by observing that nodes created within the same `with
control_dependencies(...):` block may have data dependencies that make
the explicit approach redundant.
Args:
input_ops: The data input ops for an op to be created.
Returns:
A list of control inputs for the op to be created.
"""
ret = []
for controller in self._control_dependencies_stack:
# If any of the input_ops already depends on the inputs from controller,
# we say that the new op is dominated (by that input), and we therefore
# do not need to add control dependencies for this controller's inputs.
dominated = False
for op in input_ops:
if controller.op_in_group(op):
dominated = True
break
if not dominated:
# Don't add a control input if we already have a data dependency on i.
# NOTE(mrry): We do not currently track transitive data dependencies,
# so we may add redundant control inputs.
ret.extend([c for c in controller.control_inputs if c not in input_ops])
return ret
def _record_op_seen_by_control_dependencies(self, op):
"""Record that the given op depends on all registered control dependencies.
Args:
op: An Operation.
"""
for controller in self._control_dependencies_stack:
controller.add_op(op)
def control_dependencies(self, control_inputs):
"""Returns a context manager that specifies control dependencies.
Use with the `with` keyword to specify that all operations constructed
within the context should have control dependencies on
`control_inputs`. For example:
```python
with g.control_dependencies([a, b, c]):
# `d` and `e` will only run after `a`, `b`, and `c` have executed.
d = ...
e = ...
```
Multiple calls to `control_dependencies()` can be nested, and in
that case a new `Operation` will have control dependencies on the union
of `control_inputs` from all active contexts.
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `a`, `b`, `c`, and `d`.
```
You can pass None to clear the control dependencies:
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies(None):
# Ops constructed here run normally, not waiting for either `a` or `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `c` and `d`, also not waiting
# for either `a` or `b`.
```
*N.B.* The control dependencies context applies *only* to ops that
are constructed within the context. Merely using an op or tensor
in the context does not add a control dependency. The following
example illustrates this point:
```python
# WRONG
def my_func(pred, tensor):
t = tf.matmul(tensor, tensor)
with tf.control_dependencies([pred]):
# The matmul op is created outside the context, so no control
# dependency will be added.
return t
# RIGHT
def my_func(pred, tensor):
with tf.control_dependencies([pred]):
# The matmul op is created in the context, so a control dependency
# will be added.
return tf.matmul(tensor, tensor)
```
Also note that though execution of ops created under this scope will trigger
execution of the dependencies, the ops created under this scope might still
be pruned from a normal tensorflow graph. For example, in the following
snippet of code the dependencies are never executed:
```python
loss = model.loss()
with tf.control_dependencies(dependencies):
loss = loss + tf.constant(1) # note: dependencies ignored in the
# backward pass
return tf.gradients(loss, model.variables)
```
This is because evaluating the gradient graph does not require evaluating
the constant(1) op created in the forward pass.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return self._ControlDependenciesController(self, None)
# First convert the inputs to ops, and deduplicate them.
# NOTE(mrry): Other than deduplication, we do not currently track direct
# or indirect dependencies between control_inputs, which may result in
# redundant control inputs.
control_ops = []
current = self._current_control_dependencies()
for c in control_inputs:
if isinstance(c, IndexedSlices):
c = c.op
c = self.as_graph_element(c)
if isinstance(c, Tensor):
c = c.op
elif not isinstance(c, Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
if c not in current:
control_ops.append(c)
current.add(c)
return self._ControlDependenciesController(self, control_ops)
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _attr_scope(self, attr_map):
"""EXPERIMENTAL: A context manager for setting attributes on operators.
This context manager can be used to add additional
attributes to operators within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # No extra attributes
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}):
f_2 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}):
f_3 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": None}):
f_4 = Foo() # No additional attributes.
Args:
attr_map: A dictionary mapping attr name strings to
AttrValue protocol buffers or None.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If attr_map is not a dictionary mapping
strings to AttrValue protobufs.
"""
if not isinstance(attr_map, dict):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers")
# The saved_attrs dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_attrs = {}
# Install the given attribute
for name, attr in attr_map.items():
if not (isinstance(name, six.string_types) and
(isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or
callable(attr))):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers or "
"callables that emit AttrValue protocol buffers")
try:
saved_attrs[name] = self._attr_scope_map[name]
except KeyError:
pass
if attr is None:
del self._attr_scope_map[name]
else:
self._attr_scope_map[name] = attr
try:
yield # The code within the context runs here.
finally:
# Remove the attributes set for this context, and restore any saved
# attributes.
for name, attr in attr_map.items():
try:
self._attr_scope_map[name] = saved_attrs[name]
except KeyError:
del self._attr_scope_map[name]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _kernel_label_map(self, op_to_kernel_label_map):
"""EXPERIMENTAL: A context manager for setting kernel labels.
This context manager can be used to select particular
implementations of kernels within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # Uses the default registered kernel for the Foo op.
with g.kernel_label_map({"Foo": "v_2"}):
f_2 = Foo() # Uses the registered kernel with label "v_2"
# for the Foo op.
with g.kernel_label_map({"Foo": "v_3"}):
f_3 = Foo() # Uses the registered kernel with label "v_3"
# for the Foo op.
with g.kernel_label_map({"Foo": ""}):
f_4 = Foo() # Uses the default registered kernel
# for the Foo op.
Args:
op_to_kernel_label_map: A dictionary mapping op type strings to
kernel label strings.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If op_to_kernel_label_map is not a dictionary mapping
strings to strings.
"""
if not isinstance(op_to_kernel_label_map, dict):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
# The saved_labels dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_labels = {}
# Install the given label
for op_type, label in op_to_kernel_label_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(label, six.string_types)):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
try:
saved_labels[op_type] = self._op_to_kernel_label_map[op_type]
except KeyError:
pass
self._op_to_kernel_label_map[op_type] = label
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, label in op_to_kernel_label_map.items():
try:
self._op_to_kernel_label_map[op_type] = saved_labels[op_type]
except KeyError:
del self._op_to_kernel_label_map[op_type]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def gradient_override_map(self, op_type_map):
"""EXPERIMENTAL: A context manager for overriding gradient functions.
This context manager can be used to override the gradient function
that will be used for ops within the scope of the context.
For example:
```python
@tf.RegisterGradient("CustomSquare")
def _custom_square_grad(op, grad):
# ...
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
s_1 = tf.square(c) # Uses the default gradient for tf.square.
with g.gradient_override_map({"Square": "CustomSquare"}):
s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the
# gradient of s_2.
```
Args:
op_type_map: A dictionary mapping op type strings to alternative op
type strings.
Returns:
A context manager that sets the alternative op type to be used for one
or more ops created in that context.
Raises:
TypeError: If `op_type_map` is not a dictionary mapping strings to
strings.
"""
if not isinstance(op_type_map, dict):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
# The saved_mappings dictionary stores any currently-set mappings that
# will be overridden by this context manager.
saved_mappings = {}
# Install the given label
for op_type, mapped_op_type in op_type_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(mapped_op_type, six.string_types)):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
try:
saved_mappings[op_type] = self._gradient_override_map[op_type]
except KeyError:
pass
self._gradient_override_map[op_type] = mapped_op_type
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, mapped_op_type in op_type_map.items():
try:
self._gradient_override_map[op_type] = saved_mappings[op_type]
except KeyError:
del self._gradient_override_map[op_type]
# pylint: enable=g-doc-return-or-yield
def prevent_feeding(self, tensor):
"""Marks the given `tensor` as unfeedable in this graph."""
self._unfeedable_tensors.add(tensor)
def is_feedable(self, tensor):
"""Returns `True` if and only if `tensor` is feedable."""
return tensor not in self._unfeedable_tensors
def prevent_fetching(self, op):
"""Marks the given `op` as unfetchable in this graph."""
self._unfetchable_ops.add(op)
def is_fetchable(self, tensor_or_op):
"""Returns `True` if and only if `tensor_or_op` is fetchable."""
if isinstance(tensor_or_op, Tensor):
return tensor_or_op.op not in self._unfetchable_ops
else:
return tensor_or_op not in self._unfetchable_ops
def switch_to_thread_local(self):
"""Make device, colocation and dependencies stacks thread-local.
Device, colocation and dependencies stacks are not thread-local be default.
If multiple threads access them, then the state is shared. This means that
one thread may affect the behavior of another thread.
After this method is called, the stacks become thread-local. If multiple
threads access them, then the state is not shared. Each thread uses its own
value; a thread doesn't affect other threads by mutating such a stack.
The initial value for every thread's stack is set to the current value
of the stack when `switch_to_thread_local()` was first called.
"""
if not self._stack_state_is_thread_local:
self._stack_state_is_thread_local = True
@property
def _device_function_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where device_function_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_device_function_stack"):
stack_copy_for_this_thread = self._graph_device_function_stack.copy()
self._thread_local._device_function_stack = stack_copy_for_this_thread
return self._thread_local._device_function_stack
# pylint: enable=protected-access
else:
return self._graph_device_function_stack
@property
def _device_functions_outer_to_inner(self):
user_device_specs = self._device_function_stack.peek_objs()
device_functions = [spec.function for spec in user_device_specs]
device_functions_outer_to_inner = list(reversed(device_functions))
return device_functions_outer_to_inner
def _snapshot_device_function_stack_metadata(self):
"""Return device function stack as a list of TraceableObjects.
Returns:
[traceable_stack.TraceableObject, ...] where each TraceableObject's .obj
member is a displayable name for the user's argument to Graph.device, and
the filename and lineno members point to the code location where
Graph.device was called directly or indirectly by the user.
"""
traceable_objects = self._device_function_stack.peek_traceable_objs()
snapshot = []
for obj in traceable_objects:
obj_copy = obj.copy_metadata()
obj_copy.obj = obj.obj.display_name
snapshot.append(obj_copy)
return snapshot
@_device_function_stack.setter
def _device_function_stack(self, device_function_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._device_function_stack = device_function_stack
# pylint: enable=protected-access
else:
self._graph_device_function_stack = device_function_stack
@property
def _colocation_stack(self):
"""Return thread-local copy of colocation stack."""
if self._stack_state_is_thread_local:
# This may be called from a thread where colocation_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_colocation_stack"):
stack_copy_for_this_thread = self._graph_colocation_stack.copy()
self._thread_local._colocation_stack = stack_copy_for_this_thread
return self._thread_local._colocation_stack
# pylint: enable=protected-access
else:
return self._graph_colocation_stack
def _snapshot_colocation_stack_metadata(self):
"""Return colocation stack metadata as a dictionary."""
traceable_objects = self._colocation_stack.peek_traceable_objs()
return {obj.obj.name: obj.copy_metadata() for obj in traceable_objects}
@_colocation_stack.setter
def _colocation_stack(self, colocation_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._colocation_stack = colocation_stack
# pylint: enable=protected-access
else:
self._graph_colocation_stack = colocation_stack
@property
def _control_dependencies_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where control_dependencies_stack
# doesn't yet exist.
if not hasattr(self._thread_local, "_control_dependencies_stack"):
self._thread_local._control_dependencies_stack = (
self._graph_control_dependencies_stack[:])
return self._thread_local._control_dependencies_stack
else:
return self._graph_control_dependencies_stack
@_control_dependencies_stack.setter
def _control_dependencies_stack(self, control_dependencies):
if self._stack_state_is_thread_local:
self._thread_local._control_dependencies_stack = control_dependencies
else:
self._graph_control_dependencies_stack = control_dependencies
@property
def _distribution_strategy_stack(self):
"""A stack to maintain distribution strategy context for each thread."""
if not hasattr(self._thread_local, "_distribution_strategy_stack"):
self._thread_local._distribution_strategy_stack = [] # pylint: disable=protected-access
return self._thread_local._distribution_strategy_stack # pylint: disable=protected-access
@_distribution_strategy_stack.setter
def _distribution_strategy_stack(self, _distribution_strategy_stack):
self._thread_local._distribution_strategy_stack = ( # pylint: disable=protected-access
_distribution_strategy_stack)
def _mutation_lock(self):
"""Returns a lock to guard code that creates & mutates ops.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_MUTATION_LOCK_GROUP)
def _session_run_lock(self):
"""Returns a lock to guard code for Session.run.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_SESSION_RUN_LOCK_GROUP)
# TODO(agarwal): currently device directives in an outer eager scope will not
# apply to inner graph mode code. Fix that.
@tf_export("device")
def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See
`tf.Graph.device`
for more details.
Args:
device_name_or_function: The device name or function to use in
the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If eager execution is enabled and a function is passed in.
"""
if context.executing_eagerly():
# TODO(agarwal): support device functions in EAGER mode.
if callable(device_name_or_function):
raise RuntimeError(
"tf.device does not support functions when eager execution "
"is enabled.")
return context.device(device_name_or_function)
else:
return get_default_graph().device(device_name_or_function)
@tf_export("container")
def container(container_name):
"""Wrapper for `Graph.container()` using the default graph.
Args:
container_name: The container string to use in the context.
Returns:
A context manager that specifies the default container to use for newly
created stateful ops.
"""
return get_default_graph().container(container_name)
def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False):
if context.executing_eagerly():
if op is not None:
return device(op.device)
else:
return _NullContextmanager()
else:
default_graph = get_default_graph()
if isinstance(op, EagerTensor):
if default_graph.building_function:
return default_graph.device(op.device)
else:
raise ValueError("Encountered an Eager-defined Tensor during graph "
"construction, but a function was not being built.")
return default_graph._colocate_with_for_gradient(
op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)
@tf_export("colocate_with")
def colocate_with(op, ignore_existing=False):
return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing)
@tf_export("control_dependencies")
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
See `tf.Graph.control_dependencies`
for more details.
When eager execution is enabled, any callable object in the `control_inputs`
list will be called.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies. If eager execution is enabled, any callable object in the
`control_inputs` list will be called.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
"""
if context.executing_eagerly():
if control_inputs:
# Excute any pending callables.
for control in control_inputs:
if callable(control):
control()
return _NullContextmanager()
else:
return get_default_graph().control_dependencies(control_inputs)
class _DefaultStack(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@tf_contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = _DefaultStack() # pylint: disable=protected-access
def default_session(session):
"""Python "with" handler for defining a default session.
This function provides a means of registering a session for handling
Tensor.eval() and Operation.run() calls. It is primarily intended for use
by session.Session, but can be used with any object that implements
the Session.run() interface.
Use with the "with" keyword to specify that Tensor.eval() and Operation.run()
invocations within the scope of a block should be executed by a particular
session.
The default session applies to the current thread only, so it is always
possible to inspect the call stack and determine the scope of a default
session. If you create a new thread, and wish to use the default session
in that thread, you must explicitly add a "with ops.default_session(sess):"
block in that thread's function.
Example:
The following code examples are equivalent:
# 1. Using the Session object directly:
sess = ...
c = tf.constant(5.0)
sess.run(c)
# 2. Using default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
result = c.eval()
# 3. Overriding default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
with ops.default_session(...):
c.eval(session=sess)
Args:
session: The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
@tf_export("get_default_session")
def get_default_session():
"""Returns the default session for the current thread.
The returned `Session` will be the innermost session on which a
`Session` or `Session.as_default()` context has been entered.
NOTE: The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
The default `Session` being used in the current thread.
"""
return _default_session_stack.get_default()
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
"""Uses the default session to evaluate one or more tensors.
Args:
tensors: A single Tensor, or a list of Tensor objects.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which the tensors are defined.
session: (Optional) A different session to use to evaluate "tensors".
Returns:
Either a single numpy ndarray if "tensors" is a single tensor; or a list
of numpy ndarrays that each correspond to the respective element in
"tensors".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot evaluate tensor using `eval()`: No default "
"session is registered. Use `with "
"sess.as_default()` or pass an explicit session to "
"`eval(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph. Pass an explicit session to "
"`eval(session=sess)`.")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph.")
return session.run(tensors, feed_dict)
def _run_using_default_session(operation, feed_dict, graph, session=None):
"""Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operation".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot execute operation using `run()`: No default "
"session is registered. Use `with "
"sess.as_default():` or pass an explicit session to "
"`run(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: "
"the operation's graph is different from the "
"session's graph. Pass an explicit session to "
"run(session=sess).")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: "
"the operation's graph is different from the session's "
"graph.")
session.run(operation, feed_dict)
class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
"""Override that returns a global default if the stack is empty."""
ret = super(_DefaultGraphStack, self).get_default()
if ret is None:
ret = self._GetGlobalDefaultGraph()
return ret
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
@tf_contextlib.contextmanager
def get_controller(self, default):
context.context().context_switches.push(
default.building_function, default.as_default)
try:
with super(_DefaultGraphStack, self).get_controller(
default) as g, context.graph_mode():
yield g
finally:
# If an exception is raised here it may be hiding a related exception in
# the try-block (just above).
context.context().context_switches.pop()
_default_graph_stack = _DefaultGraphStack()
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_contextlib.contextmanager
def init_scope():
"""A context manager that lifts ops out of control-flow scopes and function-building graphs.
There is often a need to lift variable initialization ops out of control-flow
scopes, function-building graphs, and gradient tapes. Entering an
`init_scope` is a mechanism for satisfying these desiderata. In particular,
entering an `init_scope` has three effects:
(1) All control dependencies are cleared the moment the scope is entered;
this is equivalent to entering the context manager returned from
`control_dependencies(None)`, which has the side-effect of exiting
control-flow scopes like `tf.cond` and `tf.while_loop`.
(2) All operations that are created while the scope is active are lifted
into the lowest context on the `context_stack` that is not building a
graph function. Here, a context is defined as either a graph or an eager
context. Every context switch, i.e., every installation of a graph as
the default graph and every switch into eager mode, is logged in a
thread-local stack called `context_switches`; the log entry for a
context switch is popped from the stack when the context is exited.
Entering an `init_scope` is equivalent to crawling up
`context_switches`, finding the first context that is not building a
graph function, and entering it. A caveat is that if graph mode is
enabled but the default graph stack is empty, then entering an
`init_scope` will simply install a fresh graph as the default one.
(3) The gradient tape is paused while the scope is active.
Raises:
RuntimeError: if graph state is incompatible with this initialization.
"""
# pylint: enable=g-doc-return-or-yield,line-too-long
if context.executing_eagerly():
# Fastpath.
with tape.stop_recording():
yield
else:
# Retrieve the active name scope: entering an `init_scope` preserves
# the name scope of the current context.
default_graph = get_default_graph()
scope = default_graph.get_name_scope()
if scope and scope[-1] != "/":
# Names that end with trailing slashes are treated by `name_scope` as
# absolute.
scope = scope + "/"
inner_device_stack = default_graph._device_function_stack # pylint: disable=protected-access
outer_context = None
if not _default_graph_stack.stack:
# If the default graph stack is empty, then we cannot be building a
# function. Install the global graph (which, in this case, is also the
# default graph) as the outer context.
if default_graph.building_function:
raise RuntimeError("The global graph is building a function.")
outer_context = default_graph.as_default
else:
# Find a context that is not building a function.
for stack_entry in reversed(context.context().context_switches.stack):
if not stack_entry.is_building_function:
outer_context = stack_entry.enter_context_fn
break
if outer_context is None:
# As a last resort, obtain the global default graph; this graph doesn't
# necessarily live on the graph stack (and hence it doesn't necessarily
# live on the context stack), but it is stored in the graph stack's
# encapsulating object.
outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access
if outer_context is None:
# Sanity check; this shouldn't be triggered.
raise RuntimeError("All graphs are building functions, and no "
"eager context was previously active.")
outer_graph = None
outer_device_stack = None
try:
with outer_context(), name_scope(scope), control_dependencies(
None), tape.stop_recording():
if not context.executing_eagerly():
# The device stack is preserved when lifting into a graph. Eager
# execution doesn't implement device stacks and in particular it
# doesn't support device functions, so in general it's not possible
# to do the same when lifting into the eager context.
outer_graph = get_default_graph()
outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access
outer_graph._device_function_stack = inner_device_stack # pylint: disable=protected-access
yield
finally:
# If an exception is raised here it may be hiding a related exception in
# try-block (just above).
if outer_graph is not None:
outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access
@tf_export("enable_eager_execution")
def enable_eager_execution(config=None,
device_policy=None,
execution_mode=None):
"""Enables eager execution for the lifetime of this program.
Eager execution provides an imperative interface to TensorFlow. With eager
execution enabled, TensorFlow functions execute operations immediately (as
opposed to adding to a graph to be executed later in a `tf.Session`) and
return concrete values (as opposed to symbolic references to a node in a
computational graph).
For example:
```python
tf.enable_eager_execution()
# After eager execution is enabled, operations are executed as they are
# defined and Tensor objects hold concrete values, which can be accessed as
# numpy.ndarray`s through the numpy() method.
assert tf.multiply(6, 7).numpy() == 42
```
Eager execution cannot be enabled after TensorFlow APIs have been used to
create or execute graphs. It is typically recommended to invoke this function
at program startup and not in a library (as most libraries should be usable
both with and without eager execution).
Args:
config: (Optional.) A `tf.ConfigProto` to use to configure the environment
in which operations are executed. Note that `tf.ConfigProto` is also
used to configure graph execution (via `tf.Session`) and many options
within `tf.ConfigProto` are not implemented (or are irrelevant) when
eager execution is enabled.
device_policy: (Optional.) Policy controlling how operations requiring
inputs on a specific device (e.g., a GPU 0) handle inputs on a different
device (e.g. GPU 1 or CPU). When set to None, an appropriate value will be
picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the
placement is not correct.
- tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not
on the right device but logs a warning.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors.
Note that this may hide performance problems as there is no notification
provided when operations are blocked on the tensor being copied between
devices.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies
int32 tensors, raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched are
actually executed. When set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Valid values:
- tf.contrib.eager.SYNC: executes each operation synchronously.
- tf.contrib.eager.ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
Raises:
ValueError: If eager execution is enabled after creating/executing a
TensorFlow graph, or if options provided conflict with a previous call
to this function.
"""
return enable_eager_execution_internal(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=None)
def enable_eager_execution_internal(config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Enables eager execution for the lifetime of this program.
Most of the doc string for enable_eager_execution is relevant here as well.
Args:
config: See enable_eager_execution doc string
device_policy: See enable_eager_execution doc string
execution_mode: See enable_eager_execution doc string
server_def: (Optional.) A tensorflow::ServerDef proto.
Enables execution on remote devices. GrpcServers need to be started by
creating an identical server_def to this, and setting the appropriate
task_indexes, so that the servers can communicate. It will then be
possible to execute operations on remote devices.
Raises:
ValueError
"""
if config is not None and not isinstance(config, config_pb2.ConfigProto):
raise TypeError(
"config must be a tf.ConfigProto, but got %s" % type(config))
if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT,
context.DEVICE_PLACEMENT_WARN,
context.DEVICE_PLACEMENT_SILENT,
context.DEVICE_PLACEMENT_SILENT_FOR_INT32):
raise ValueError(
"device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*"
)
if execution_mode not in (None, context.SYNC, context.ASYNC):
raise ValueError(
"execution_mode must be one of None, tf.contrib.eager.SYNC, "
"tf.contrib.eager.ASYNC")
# pylint: disable=protected-access
if context._default_mode == context.GRAPH_MODE:
graph_mode_has_been_used = (
_default_session_stack.stack
or len(get_default_graph().get_operations()) > 0) # pylint: disable=g-explicit-length-test
if graph_mode_has_been_used:
raise ValueError(
"tf.enable_eager_execution must be called at program startup.")
context._default_mode = context.EAGER_MODE
if context._context is None:
context._context = context.Context(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=server_def)
elif ((config is not None and config is not context._context._config) or
(device_policy is not None and
device_policy is not context._context._device_policy) or
(execution_mode is not None and
execution_mode is not context._context._execution_mode)):
raise ValueError("Trying to change the options of an active eager"
" execution. Context config: %s, specified config:"
" %s. Context device policy: %s, specified device"
" policy: %s. Context execution mode: %s, "
" specified execution mode %s." %
(context._context._config, config,
context._context._device_policy, device_policy,
context._context._execution_mode, execution_mode))
else:
raise ValueError(
"tf.enable_eager_execution must be called at program startup.")
# Monkey patch to get rid of an unnecessary conditional since the context is
# now initialized.
context.context = context.context_safe
def eager_run(main=None, argv=None):
"""Runs the program with an optional main function and argv list.
The program will run with eager execution enabled.
Example:
```python
import tensorflow as tf
# Import subject to future changes:
from tensorflow.contrib.eager.python import tfe
def main(_):
u = tf.constant(6.0)
v = tf.constant(7.0)
print(u * v)
if __name__ == "__main__":
tfe.run()
```
Args:
main: the main function to run.
argv: the arguments to pass to it.
"""
enable_eager_execution()
app.run(main, argv)
@tf_export("reset_default_graph")
def reset_default_graph():
"""Clears the default graph stack and resets the global default graph.
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.Session` or `tf.InteractiveSession` is active will result in undefined
behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
Raises:
AssertionError: If this function is called within a nested graph.
"""
if not _default_graph_stack.is_cleared():
raise AssertionError("Do not use tf.reset_default_graph() to clear "
"nested graphs. If you need a cleared graph, "
"exit the nesting and create a new graph.")
_default_graph_stack.reset()
@tf_export("get_default_graph")
def get_default_graph():
"""Returns the default graph for the current thread.
The returned graph will be the innermost graph on which a
`Graph.as_default()` context has been entered, or a global default
graph if none has been explicitly created.
NOTE: The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
Returns:
The default `Graph` being used in the current thread.
"""
return _default_graph_stack.get_default()
def has_default_graph():
"""Returns True if there is a default graph."""
return len(_default_graph_stack.stack) >= 1
def get_name_scope():
"""Returns the current name scope in the default_graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
if context.executing_eagerly():
return context.context().scope_name.rstrip("/")
return get_default_graph().get_name_scope()
def _assert_same_graph(original_item, item):
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
if original_item.graph is not item.graph:
raise ValueError("%s must be from the same graph as %s." % (item,
original_item))
def _get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the inputs
in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
if get_default_graph().building_function:
return get_default_graph()
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
# Determine if this is a valid graph_element.
# TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this
# up.
graph_element = None
if (isinstance(op_input, (Operation, _TensorLike)) and
((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = graph_element.graph
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError("%s is not from the passed-in graph." % graph_element)
# 2. If all else fails, we use the default graph, which is always there.
return graph or get_default_graph()
@tf_export("GraphKeys")
class GraphKeys(object):
"""Standard names to use for graph collections.
The standard library uses various well-known names to collect and
retrieve values associated with a graph. For example, the
`tf.Optimizer` subclasses default to optimizing the variables
collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is
specified, but it is also possible to pass an explicit list of
variables.
The following standard keys are defined:
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
`tf.global_variables`
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
machine. Usually used for temporarily variables, like counters.
Note: use `tf.contrib.framework.local_variable` to add to this collection.
* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
model for inference (feed forward). Note: use
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
`tf.trainable_variables`
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
`tf.summary.merge_all`
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
`tf.train.start_queue_runners`
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
`tf.moving_average_variables`
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
The following standard keys are _defined_, but their collections are **not**
automatically populated as many of the others are:
* `WEIGHTS`
* `BIASES`
* `ACTIVATIONS`
"""
# Key to collect Variable objects that are global (shared across machines).
# Default collection for all variables, except local ones.
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
# saved/restored.
LOCAL_VARIABLES = "local_variables"
# Key to collect local variables which are used to accumulate interal state
# to be used in tf.metrics.*.
METRIC_VARIABLES = "metric_variables"
# Key to collect model variables defined by layers.
MODEL_VARIABLES = "model_variables"
# Key to collect Variable objects that will be trained by the
# optimizers.
TRAINABLE_VARIABLES = "trainable_variables"
# Key to collect summaries.
SUMMARIES = "summaries"
# Key to collect QueueRunners.
QUEUE_RUNNERS = "queue_runners"
# Key to collect table initializers.
TABLE_INITIALIZERS = "table_initializer"
# Key to collect asset filepaths. An asset represents an external resource
# like a vocabulary file.
ASSET_FILEPATHS = "asset_filepaths"
# Key to collect Variable objects that keep moving averages.
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
# Key to collect regularization losses at graph construction.
REGULARIZATION_LOSSES = "regularization_losses"
# Key to collect concatenated sharded variables.
CONCATENATED_VARIABLES = "concatenated_variables"
# Key to collect savers.
SAVERS = "savers"
# Key to collect weights
WEIGHTS = "weights"
# Key to collect biases
BIASES = "biases"
# Key to collect activations
ACTIVATIONS = "activations"
# Key to collect update_ops
UPDATE_OPS = "update_ops"
# Key to collect losses
LOSSES = "losses"
# Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.
SAVEABLE_OBJECTS = "saveable_objects"
# Key to collect all shared resources used by the graph which need to be
# initialized once per cluster.
RESOURCES = "resources"
# Key to collect all shared resources used in this graph which need to be
# initialized once per session.
LOCAL_RESOURCES = "local_resources"
# Trainable resource-style variables.
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
# Key to indicate various ops.
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
# Used to count the number of evaluations performed during a single evaluation
# run.
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
# Key for control flow context.
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
# Used to store v2 summary names.
_SUMMARY_COLLECTION = "_SUMMARY_V2"
# List of all collections that keep track of variables.
_VARIABLE_COLLECTIONS = [
GLOBAL_VARIABLES,
LOCAL_VARIABLES,
METRIC_VARIABLES,
MODEL_VARIABLES,
TRAINABLE_VARIABLES,
MOVING_AVERAGE_VARIABLES,
CONCATENATED_VARIABLES,
TRAINABLE_RESOURCE_VARIABLES,
]
# Key for streaming model ports.
# NOTE(yuanbyu): internal and experimental.
_STREAMING_MODEL_PORTS = "streaming_model_ports"
@decorator_utils.classproperty
def VARIABLES(cls): # pylint: disable=no-self-argument
logging.log_first_n(logging.WARN,
"VARIABLES collection name is deprecated, please use "
"GLOBAL_VARIABLES instead; VARIABLES will be removed "
"after 2017-03-02.", 1)
return cls.GLOBAL_VARIABLES
@tf_export("add_to_collection")
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
See `tf.Graph.add_to_collection`
for more details.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection.
@compatibility(eager)
Collections are only supported in eager when variables are created inside an
EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collection(name, value)
@tf_export("add_to_collections")
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
See `tf.Graph.add_to_collections`
for more details.
Args:
names: The key for the collections. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
@compatibility(eager)
Collections are only supported in eager when variables are created inside an
EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collections(names, value)
@tf_export("get_collection_ref")
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
See `tf.Graph.get_collection_ref`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection. Note that this returns
the collection list itself, which can be modified in place to change the
collection.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection_ref(key)
@tf_export("get_collection")
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
See `tf.Graph.get_collection`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items
without a `name` attribute are never returned if a scope is supplied and
the choice or `re.match` means that a `scope` without special tokens
filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection(key, scope)
def get_all_collection_keys():
"""Returns a list of collections used in the default graph."""
return get_default_graph().get_all_collection_keys()
name_scope_cache = {}
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export("name_scope", "keras.backend.name_scope")
class name_scope(object): # pylint: disable=invalid-name
"""A context manager for use when defining a Python op.
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
`tf.Graph.name_scope`
for more details on that).
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope(name, "MyOp", [a, b, c]) as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
"""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
"""
self._name = default_name if name is None else name
self._default_name = default_name
self._values = values
self._ctx = context.context()
self._in_eager_mode = self._ctx.executing_eagerly()
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
if self._in_eager_mode:
self._old_name = self._ctx.scope_name
if not self._name:
scope_name = ""
else:
cache_key = self._name, self._old_name, self._default_name
if cache_key in name_scope_cache:
self._ctx.scope_name = name_scope_cache[cache_key]
return self._ctx.scope_name
elif self._name[-1] == "/":
# A trailing slash breaks out of nested name scopes, indicating a
# fully specified scope name, for compatibility with Graph.name_scope.
scope_name = self._name
else:
name_with_trailing_slash = self._name + "/"
scope_name = (
self._old_name + name_with_trailing_slash
if self._old_name else name_with_trailing_slash)
name_scope_cache[cache_key] = scope_name
self._ctx.scope_name = scope_name
return scope_name
else:
if self._name is None and self._values is not None:
# We only raise an error if values is not None (provided) because
# currently tf.name_scope(None) (values=None then) is sometimes used as
# an idiom to reset to top scope.
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided."
% (self._name, self._default_name))
if self._values is None:
self._values = []
g = _get_graph_from_inputs(self._values)
self._g_manager = g.as_default()
self._g_manager.__enter__()
try:
self._name_scope = g.name_scope(self._name)
return self._name_scope.__enter__()
except:
self._g_manager.__exit__(*sys.exc_info())
raise
def __exit__(self, type_arg, value_arg, traceback_arg):
if self._in_eager_mode:
self._ctx.scope_name = self._old_name
else:
self._name_scope.__exit__(type_arg, value_arg, traceback_arg)
self._g_manager.__exit__(type_arg, value_arg, traceback_arg)
return False # False values do not suppress exceptions
def strip_name_scope(name, export_scope):
"""Removes name scope from a name.
Args:
name: A `string` name.
export_scope: Optional `string`. Name scope to remove.
Returns:
Name with name scope removed, or the original name if export_scope
is None.
"""
if export_scope:
if export_scope[-1] == "/":
export_scope = export_scope[:-1]
try:
# Strips export_scope/, export_scope///,
# ^export_scope/, loc:@export_scope/.
str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)"
return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1)
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
def prepend_name_scope(name, import_scope):
"""Prepends name scope to a name.
Args:
name: A `string` name.
import_scope: Optional `string`. Name scope to add.
Returns:
Name with name scope added, or the original name if import_scope
is None.
"""
if import_scope:
if import_scope[-1] == "/":
import_scope = import_scope[:-1]
try:
str_to_replace = r"([\^]|loc:@|^)(.*)"
return re.sub(str_to_replace, r"\1" + import_scope + r"/\2",
compat.as_str(name))
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
# pylint: disable=g-doc-return-or-yield
# pylint: disable=not-context-manager
@tf_export("op_scope")
@tf_contextlib.contextmanager
def op_scope(values, name, default_name=None):
"""DEPRECATED. Same as name_scope above, just different argument order."""
logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
" use tf.name_scope(name, default_name, values)")
with name_scope(name, default_name=default_name, values=values) as scope:
yield scope
_proto_function_registry = registry.Registry("proto functions")
def register_proto_function(collection_name,
proto_type=None,
to_proto=None,
from_proto=None):
"""Registers `to_proto` and `from_proto` functions for collection_name.
`to_proto` function converts a Python object to the corresponding protocol
buffer, and returns the protocol buffer.
`from_proto` function converts protocol buffer into a Python object, and
returns the object..
Args:
collection_name: Name of the collection.
proto_type: Protobuf type, such as `saver_pb2.SaverDef`,
`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..
to_proto: Function that implements Python object to protobuf conversion.
from_proto: Function that implements protobuf to Python object conversion.
"""
if to_proto and not callable(to_proto):
raise TypeError("to_proto must be callable.")
if from_proto and not callable(from_proto):
raise TypeError("from_proto must be callable.")
_proto_function_registry.register((proto_type, to_proto, from_proto),
collection_name)
def get_collection_proto_type(collection_name):
"""Returns the proto_type for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[0]
except LookupError:
return None
def get_to_proto_function(collection_name):
"""Returns the to_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[1]
except LookupError:
return None
def get_from_proto_function(collection_name):
"""Returns the from_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[2]
except LookupError:
return None
def _operation_conversion_error(op, dtype=None, name=None, as_ref=False):
"""Produce a nice error if someone converts an Operation to a Tensor."""
raise TypeError(("Can't convert Operation '%s' to Tensor "
"(target dtype=%r, name=%r, as_ref=%r)") % (op.name, dtype,
name, as_ref))
register_tensor_conversion_function(Operation, _operation_conversion_error)
| {
"content_hash": "09401ad4856b6f27d65cd3eb401011f7",
"timestamp": "",
"source": "github",
"line_count": 6106,
"max_line_length": 116,
"avg_line_length": 36.42581067802162,
"alnum_prop": 0.6636707790806416,
"repo_name": "ZhangXinNan/tensorflow",
"id": "5527f5286074baefaa29c6aec8a7c027a759f377",
"size": "223105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "327005"
},
{
"name": "C#",
"bytes": "8215"
},
{
"name": "C++",
"bytes": "46648068"
},
{
"name": "CMake",
"bytes": "206720"
},
{
"name": "Dockerfile",
"bytes": "6978"
},
{
"name": "Go",
"bytes": "1210133"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "830576"
},
{
"name": "Jupyter Notebook",
"bytes": "2632421"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "51309"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40046802"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "455624"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
import pytest
from spacy.serialize.packer import Packer
from spacy.attrs import ORTH, SPACY
from spacy.tokens import Doc
import math
@pytest.mark.models
def test_read_write(EN):
doc1 = EN(u'This is a simple test. With a couple of sentences.')
doc2 = EN(u'This is another test document.')
with open('/tmp/spacy_docs.bin', 'wb') as file_:
file_.write(doc1.to_bytes())
file_.write(doc2.to_bytes())
with open('/tmp/spacy_docs.bin', 'rb') as file_:
bytes1, bytes2 = Doc.read_bytes(file_)
r1 = Doc(EN.vocab).from_bytes(bytes1)
r2 = Doc(EN.vocab).from_bytes(bytes2)
assert r1.string == doc1.string
assert r2.string == doc2.string
@pytest.mark.models
def test_left_right(EN):
orig = EN(u'This is a simple test. With a couple of sentences.')
result = Doc(orig.vocab).from_bytes(orig.to_bytes())
for word in result:
assert word.head.i == orig[word.i].head.i
if word.head is not word:
assert word.i in [w.i for w in word.head.children]
for child in word.lefts:
assert child.head.i == word.i
for child in word.rights:
assert child.head.i == word.i
@pytest.mark.models
def test_lemmas(EN):
orig = EN(u'The geese are flying')
result = Doc(orig.vocab).from_bytes(orig.to_bytes())
the, geese, are, flying = result
assert geese.lemma_ == 'goose'
assert are.lemma_ == 'be'
assert flying.lemma_ == 'fly'
| {
"content_hash": "32866957be4e772c525bd6afbb161343",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 68,
"avg_line_length": 28.764705882352942,
"alnum_prop": 0.6319018404907976,
"repo_name": "pombredanne/spaCy",
"id": "f90bb20c2473d6873041cc310118fc3f11ff25d4",
"size": "1467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spacy/tests/serialize/test_io.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "315320"
},
{
"name": "Groff",
"bytes": "188349"
},
{
"name": "HTML",
"bytes": "544516"
},
{
"name": "Makefile",
"bytes": "89484"
},
{
"name": "PostScript",
"bytes": "460967"
},
{
"name": "Python",
"bytes": "516087"
},
{
"name": "Shell",
"bytes": "96067"
}
],
"symlink_target": ""
} |
import pandas as pd
import pytz
from itertools import cycle
import numpy as np
from six import integer_types
from unittest import TestCase
import zipline.utils.factory as factory
from zipline.sources import (DataFrameSource,
DataPanelSource,
RandomWalkSource)
from zipline.utils import tradingcalendar as calendar_nyse
from zipline.finance.trading import with_environment
class TestDataFrameSource(TestCase):
def test_df_source(self):
source, df = factory.create_test_df_source()
assert isinstance(source.start, pd.lib.Timestamp)
assert isinstance(source.end, pd.lib.Timestamp)
for expected_dt, expected_price in df.iterrows():
sid0 = next(source)
assert expected_dt == sid0.dt
assert expected_price[0] == sid0.price
def test_df_sid_filtering(self):
_, df = factory.create_test_df_source()
source = DataFrameSource(df, sids=[0])
assert 1 not in [event.sid for event in source], \
"DataFrameSource should only stream selected sid 0, not sid 1."
def test_panel_source(self):
source, panel = factory.create_test_panel_source(source_type=5)
assert isinstance(source.start, pd.lib.Timestamp)
assert isinstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertTrue('sid' in event)
self.assertTrue('arbitrary' in event)
self.assertTrue('type' in event)
self.assertTrue(hasattr(event, 'volume'))
self.assertTrue(hasattr(event, 'price'))
self.assertEquals(event['type'], 5)
self.assertEquals(event['arbitrary'], 1.)
self.assertEquals(event['sid'], 0)
self.assertTrue(isinstance(event['volume'], int))
self.assertTrue(isinstance(event['arbitrary'], float))
@with_environment()
def test_yahoo_bars_to_panel_source(self, env=None):
stocks = ['AAPL', 'GE']
start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = factory.load_bars_from_yahoo(stocks=stocks,
indexes={},
start=start,
end=end)
check_fields = ['sid', 'open', 'high', 'low', 'close',
'volume', 'price']
source = DataPanelSource(data)
sids = [
asset.sid for asset in
[env.asset_finder.lookup_symbol(symbol, as_of_date=end)
for symbol in stocks]
]
stocks_iter = cycle(sids)
for event in source:
for check_field in check_fields:
self.assertIn(check_field, event)
self.assertTrue(isinstance(event['volume'], (integer_types)))
self.assertEqual(next(stocks_iter), event['sid'])
@with_environment()
def test_nan_filter_dataframe(self, env=None):
env.update_asset_finder(identifiers=[4, 5])
dates = pd.date_range('1/1/2000', periods=2, freq='B', tz='UTC')
df = pd.DataFrame(np.random.randn(2, 2),
index=dates,
columns=[4, 5])
# should be filtered
df.loc[dates[0], 4] = np.nan
# should not be filtered, should have been ffilled
df.loc[dates[1], 5] = np.nan
source = DataFrameSource(df)
event = next(source)
self.assertEqual(5, event.sid)
event = next(source)
self.assertEqual(4, event.sid)
event = next(source)
self.assertEqual(5, event.sid)
self.assertFalse(np.isnan(event.price))
@with_environment()
def test_nan_filter_panel(self, env=None):
env.update_asset_finder(identifiers=[4, 5])
dates = pd.date_range('1/1/2000', periods=2, freq='B', tz='UTC')
df = pd.Panel(np.random.randn(2, 2, 2),
major_axis=dates,
items=[4, 5],
minor_axis=['price', 'volume'])
# should be filtered
df.loc[4, dates[0], 'price'] = np.nan
# should not be filtered, should have been ffilled
df.loc[5, dates[1], 'price'] = np.nan
source = DataPanelSource(df)
event = next(source)
self.assertEqual(5, event.sid)
event = next(source)
self.assertEqual(4, event.sid)
event = next(source)
self.assertEqual(5, event.sid)
self.assertFalse(np.isnan(event.price))
class TestRandomWalkSource(TestCase):
def test_minute(self):
np.random.seed(123)
start_prices = {0: 100,
1: 500}
start = pd.Timestamp('1990-01-01', tz='UTC')
end = pd.Timestamp('1991-01-01', tz='UTC')
source = RandomWalkSource(start_prices=start_prices,
calendar=calendar_nyse, start=start,
end=end)
self.assertIsInstance(source.start, pd.lib.Timestamp)
self.assertIsInstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertIn(event.sid, start_prices.keys())
self.assertIn(event.dt.replace(minute=0, hour=0),
calendar_nyse.trading_days)
self.assertGreater(event.dt, start)
self.assertLess(event.dt, end)
self.assertGreater(event.price, 0,
"price should never go negative.")
self.assertTrue(13 <= event.dt.hour <= 21,
"event.dt.hour == %i, not during market \
hours." % event.dt.hour)
def test_day(self):
np.random.seed(123)
start_prices = {0: 100,
1: 500}
start = pd.Timestamp('1990-01-01', tz='UTC')
end = pd.Timestamp('1992-01-01', tz='UTC')
source = RandomWalkSource(start_prices=start_prices,
calendar=calendar_nyse, start=start,
end=end, freq='daily')
self.assertIsInstance(source.start, pd.lib.Timestamp)
self.assertIsInstance(source.end, pd.lib.Timestamp)
for event in source:
self.assertIn(event.sid, start_prices.keys())
self.assertIn(event.dt.replace(minute=0, hour=0),
calendar_nyse.trading_days)
self.assertGreater(event.dt, start)
self.assertLess(event.dt, end)
self.assertGreater(event.price, 0,
"price should never go negative.")
self.assertEqual(event.dt.hour, 0)
| {
"content_hash": "0445364ea2c108bafbf92cec340446bb",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 75,
"avg_line_length": 40.97560975609756,
"alnum_prop": 0.5583333333333333,
"repo_name": "mmilutinovic1313/zipline-with-algorithms",
"id": "3664acdd656bd1672aab548357645409ccf4bf4b",
"size": "7302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sources.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "564"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Python",
"bytes": "1047562"
},
{
"name": "Shell",
"bytes": "3962"
}
],
"symlink_target": ""
} |
"""bigtable instances update command."""
from googlecloudsdk.api_lib.bigtable import util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.bigtable import arguments
from googlecloudsdk.core import log
from googlecloudsdk.core import resources
class UpdateInstance(base.UpdateCommand):
"""Modify an existing Bigtable instance."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
arguments.ArgAdder(parser).AddInstance().AddInstanceDescription()
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
cli = util.GetAdminClient()
ref = resources.REGISTRY.Parse(
args.instance, collection='bigtableadmin.projects.instances')
msg = util.GetAdminMessages().BigtableadminProjectsInstancesGetRequest(
name=ref.RelativeName())
instance = cli.projects_instances.Get(msg)
instance.state = None # must be unset when calling Update
if args.description:
instance.displayName = args.description
instance = cli.projects_instances.Update(instance)
log.UpdatedResource(instance.name, kind='instance')
return instance
| {
"content_hash": "ce9653c98abb970e8813b7c062267222",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 34.61538461538461,
"alnum_prop": 0.7377777777777778,
"repo_name": "Sorsly/subtle",
"id": "2b28fb6b90c0c4e4433b4d912ea87009364cdd5e",
"size": "1945",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/surface/bigtable/instances/update.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1581"
},
{
"name": "CSS",
"bytes": "226"
},
{
"name": "HTML",
"bytes": "4637"
},
{
"name": "JavaScript",
"bytes": "3037"
},
{
"name": "PHP",
"bytes": "4543"
},
{
"name": "Pascal",
"bytes": "31"
},
{
"name": "Python",
"bytes": "13243860"
},
{
"name": "Roff",
"bytes": "1050600"
},
{
"name": "Shell",
"bytes": "16136"
},
{
"name": "Smarty",
"bytes": "2484"
},
{
"name": "SourcePawn",
"bytes": "308"
}
],
"symlink_target": ""
} |
import sys
import imp
import contextlib
@contextlib.contextmanager
def preserve_value(namespace, name):
""" A context manager to preserve, then restore, the specified binding.
:param namespace: The namespace object (e.g. a class or dict)
containing the name binding.
:param name: The name of the binding to be preserved.
:yield: None.
When the context manager is entered, the current value bound to `name`
in `namespace` is saved. When the context manager is exited, the
binding is re-established to the saved value.
"""
saved_value = getattr(namespace, name)
yield
setattr(namespace, name, saved_value)
def make_module_from_file(module_name, module_filepath):
""" Make a new module object from the source code in specified file.
:param module_name: The name of the resulting module object.
:param module_filepath: The filesystem path to open for
reading the module's Python source.
:return: The module object.
The Python import mechanism is not used. No cached bytecode
file is created, and no entry is placed in `sys.modules`.
"""
py_source_open_mode = 'U'
py_source_description = (b".py", py_source_open_mode, imp.PY_SOURCE)
with open(module_filepath, py_source_open_mode) as module_file:
with preserve_value(sys, 'dont_write_bytecode'):
sys.dont_write_bytecode = True
module = imp.load_module(
module_name,
module_file,
module_filepath,
py_source_description)
return module
| {
"content_hash": "18f58c4593698f7eea56f492546820c6",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 33.673469387755105,
"alnum_prop": 0.6442424242424243,
"repo_name": "kyleterry/tenyksclient",
"id": "f631707919076da94d7482af01e73b4cff1b41fe",
"size": "1786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tenyksclient/module_loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10496"
}
],
"symlink_target": ""
} |
"""Provides device automations for Alarm control panel."""
from typing import List
import voluptuous as vol
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.components.automation import AutomationActionType, state
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.const import (
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_PLATFORM,
CONF_TYPE,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType
from . import DOMAIN
TRIGGER_TYPES = {
"triggered",
"disarmed",
"armed_home",
"armed_away",
"armed_night",
}
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES),
}
)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device triggers for Alarm control panel devices."""
registry = await entity_registry.async_get_registry(hass)
triggers = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
entity_state = hass.states.get(entry.entity_id)
# We need a state or else we can't populate the HVAC and preset modes.
if entity_state is None:
continue
supported_features = entity_state.attributes["supported_features"]
# Add triggers for each entity that belongs to this integration
triggers += [
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "disarmed",
},
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "triggered",
},
]
if supported_features & SUPPORT_ALARM_ARM_HOME:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "armed_home",
}
)
if supported_features & SUPPORT_ALARM_ARM_AWAY:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "armed_away",
}
)
if supported_features & SUPPORT_ALARM_ARM_NIGHT:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "armed_night",
}
)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
config = TRIGGER_SCHEMA(config)
if config[CONF_TYPE] == "triggered":
from_state = STATE_ALARM_PENDING
to_state = STATE_ALARM_TRIGGERED
elif config[CONF_TYPE] == "disarmed":
from_state = STATE_ALARM_TRIGGERED
to_state = STATE_ALARM_DISARMED
elif config[CONF_TYPE] == "armed_home":
from_state = STATE_ALARM_PENDING
to_state = STATE_ALARM_ARMED_HOME
elif config[CONF_TYPE] == "armed_away":
from_state = STATE_ALARM_PENDING
to_state = STATE_ALARM_ARMED_AWAY
elif config[CONF_TYPE] == "armed_night":
from_state = STATE_ALARM_PENDING
to_state = STATE_ALARM_ARMED_NIGHT
state_config = {
state.CONF_PLATFORM: "state",
CONF_ENTITY_ID: config[CONF_ENTITY_ID],
state.CONF_FROM: from_state,
state.CONF_TO: to_state,
}
state_config = state.TRIGGER_SCHEMA(state_config)
return await state.async_attach_trigger(
hass, state_config, action, automation_info, platform_type="device"
)
| {
"content_hash": "95a5450d7c66404ac61f8bc5f888135f",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 80,
"avg_line_length": 31.814569536423843,
"alnum_prop": 0.5868026644462948,
"repo_name": "postlund/home-assistant",
"id": "95ae17aaaf5621041afa501292be150ce0a13bfd",
"size": "4804",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/alarm_control_panel/device_trigger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
} |
"""Microsoft SQLServer hook module"""
from __future__ import annotations
from typing import Any
import pymssql
from airflow.providers.common.sql.hooks.sql import DbApiHook
class MsSqlHook(DbApiHook):
"""Interact with Microsoft SQL Server."""
conn_name_attr = 'mssql_conn_id'
default_conn_name = 'mssql_default'
conn_type = 'mssql'
hook_name = 'Microsoft SQL Server'
supports_autocommit = True
DEFAULT_SQLALCHEMY_SCHEME = 'mssql+pymssql'
def __init__(
self,
*args,
sqlalchemy_scheme: str | None = None,
**kwargs,
) -> None:
"""
:param args: passed to DBApiHook
:param sqlalchemy_scheme: Scheme sqlalchemy connection. Default is ``mssql+pymssql`` Only used for
``get_sqlalchemy_engine`` and ``get_sqlalchemy_connection`` methods.
:param kwargs: passed to DbApiHook
"""
super().__init__(*args, **kwargs)
self.schema = kwargs.pop("schema", None)
self._sqlalchemy_scheme = sqlalchemy_scheme
@property
def connection_extra_lower(self) -> dict:
"""
``connection.extra_dejson`` but where keys are converted to lower case.
This is used internally for case-insensitive access of mssql params.
"""
conn = self.get_connection(self.mssql_conn_id) # type: ignore[attr-defined]
return {k.lower(): v for k, v in conn.extra_dejson.items()}
@property
def sqlalchemy_scheme(self) -> str:
"""Sqlalchemy scheme either from constructor, connection extras or default."""
return (
self._sqlalchemy_scheme
or self.connection_extra_lower.get('sqlalchemy_scheme')
or self.DEFAULT_SQLALCHEMY_SCHEME
)
def get_uri(self) -> str:
from urllib.parse import parse_qs, urlencode, urlsplit, urlunsplit
r = list(urlsplit(super().get_uri()))
# change pymssql driver:
r[0] = self.sqlalchemy_scheme
# remove query string 'sqlalchemy_scheme' like parameters:
qs = parse_qs(r[3], keep_blank_values=True)
for k in list(qs.keys()):
if k.lower() == 'sqlalchemy_scheme':
qs.pop(k, None)
r[3] = urlencode(qs, doseq=True)
return urlunsplit(r)
def get_sqlalchemy_connection(
self, connect_kwargs: dict | None = None, engine_kwargs: dict | None = None
) -> Any:
"""Sqlalchemy connection object"""
engine = self.get_sqlalchemy_engine(engine_kwargs=engine_kwargs)
return engine.connect(**(connect_kwargs or {}))
def get_conn(
self,
) -> pymssql.connect:
"""Returns a mssql connection object"""
conn = self.get_connection(self.mssql_conn_id) # type: ignore[attr-defined]
conn = pymssql.connect(
server=conn.host,
user=conn.login,
password=conn.password,
database=self.schema or conn.schema,
port=conn.port,
)
return conn
def set_autocommit(
self,
conn: pymssql.connect,
autocommit: bool,
) -> None:
conn.autocommit(autocommit)
def get_autocommit(self, conn: pymssql.connect):
return conn.autocommit_state
| {
"content_hash": "44d450d343a516b3b61b09224e3be1e6",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 107,
"avg_line_length": 32.81818181818182,
"alnum_prop": 0.6078793474915358,
"repo_name": "cfei18/incubator-airflow",
"id": "20c1d4a4530138ff597e4ee914cc45f59d838215",
"size": "4036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/providers/microsoft/mssql/hooks/mssql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
} |
'''
This example shows training of the POS tagger without the Language class,
showing the APIs of the atomic components.
This example was adapted from the gist here:
https://gist.github.com/kamac/a7bc139f62488839a8118214a4d932f2
Issue discussing the gist:
https://github.com/explosion/spaCy/issues/1179
The example was written for spaCy 1.8.2.
'''
from __future__ import unicode_literals
from __future__ import print_function
import plac
import codecs
import spacy.symbols as symbols
import spacy
from pathlib import Path
from spacy.vocab import Vocab
from spacy.tagger import Tagger
from spacy.tokens import Doc
from spacy.gold import GoldParse
from spacy.language import Language
from spacy import orth
from spacy import attrs
import random
TAG_MAP = {
'ADJ': {symbols.POS: symbols.ADJ},
'ADP': {symbols.POS: symbols.ADP},
'PUNCT': {symbols.POS: symbols.PUNCT},
'ADV': {symbols.POS: symbols.ADV},
'AUX': {symbols.POS: symbols.AUX},
'SYM': {symbols.POS: symbols.SYM},
'INTJ': {symbols.POS: symbols.INTJ},
'CCONJ': {symbols.POS: symbols.CCONJ},
'X': {symbols.POS: symbols.X},
'NOUN': {symbols.POS: symbols.NOUN},
'DET': {symbols.POS: symbols.DET},
'PROPN': {symbols.POS: symbols.PROPN},
'NUM': {symbols.POS: symbols.NUM},
'VERB': {symbols.POS: symbols.VERB},
'PART': {symbols.POS: symbols.PART},
'PRON': {symbols.POS: symbols.PRON},
'SCONJ': {symbols.POS: symbols.SCONJ},
}
LEX_ATTR_GETTERS = {
attrs.LOWER: lambda string: string.lower(),
attrs.NORM: lambda string: string,
attrs.SHAPE: orth.word_shape,
attrs.PREFIX: lambda string: string[0],
attrs.SUFFIX: lambda string: string[-3:],
attrs.CLUSTER: lambda string: 0,
attrs.IS_ALPHA: orth.is_alpha,
attrs.IS_ASCII: orth.is_ascii,
attrs.IS_DIGIT: lambda string: string.isdigit(),
attrs.IS_LOWER: orth.is_lower,
attrs.IS_PUNCT: orth.is_punct,
attrs.IS_SPACE: lambda string: string.isspace(),
attrs.IS_TITLE: orth.is_title,
attrs.IS_UPPER: orth.is_upper,
attrs.IS_BRACKET: orth.is_bracket,
attrs.IS_QUOTE: orth.is_quote,
attrs.IS_LEFT_PUNCT: orth.is_left_punct,
attrs.IS_RIGHT_PUNCT: orth.is_right_punct,
attrs.LIKE_URL: orth.like_url,
attrs.LIKE_NUM: orth.like_number,
attrs.LIKE_EMAIL: orth.like_email,
attrs.IS_STOP: lambda string: False,
attrs.IS_OOV: lambda string: True
}
def read_ud_data(path):
data = []
last_number = -1
sentence_words = []
sentence_tags = []
with codecs.open(path, encoding="utf-8") as f:
while True:
line = f.readline()
if not line:
break
if line[0].isdigit():
d = line.split()
if not "-" in d[0]:
number = int(line[0])
if number < last_number:
data.append((sentence_words, sentence_tags),)
sentence_words = []
sentence_tags = []
sentence_words.append(d[2])
sentence_tags.append(d[3])
last_number = number
if len(sentence_words) > 0:
data.append((sentence_words, sentence_tags,))
return data
def ensure_dir(path):
if not path.exists():
path.mkdir()
def main(train_loc, dev_loc, output_dir=None):
if output_dir is not None:
output_dir = Path(output_dir)
ensure_dir(output_dir)
ensure_dir(output_dir / "pos")
ensure_dir(output_dir / "vocab")
train_data = read_ud_data(train_loc)
vocab = Vocab(tag_map=TAG_MAP, lex_attr_getters=LEX_ATTR_GETTERS)
# Populate vocab
for words, _ in train_data:
for word in words:
_ = vocab[word]
model = spacy.tagger.TaggerModel(spacy.tagger.Tagger.feature_templates)
tagger = Tagger(vocab, model)
print(tagger.tag_names)
for i in range(30):
print("training model (iteration " + str(i) + ")...")
score = 0.
num_samples = 0.
for words, tags in train_data:
doc = Doc(vocab, words=words)
gold = GoldParse(doc, tags=tags)
cost = tagger.update(doc, gold)
for i, word in enumerate(doc):
num_samples += 1
if word.tag_ == tags[i]:
score += 1
print('Train acc', score/num_samples)
random.shuffle(train_data)
tagger.model.end_training()
score = 0.0
test_data = read_ud_data(dev_loc)
num_samples = 0
for words, tags in test_data:
doc = Doc(vocab, words)
tagger(doc)
for i, word in enumerate(doc):
num_samples += 1
if word.tag_ == tags[i]:
score += 1
print("score: " + str(score / num_samples * 100.0))
if output_dir is not None:
tagger.model.dump(str(output_dir / 'pos' / 'model'))
with (output_dir / 'vocab' / 'strings.json').open('w') as file_:
tagger.vocab.strings.dump(file_)
if __name__ == '__main__':
plac.call(main)
| {
"content_hash": "4df6f9557c1640ef7c91f254e409943f",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 75,
"avg_line_length": 30.951219512195124,
"alnum_prop": 0.5986997635933806,
"repo_name": "raphael0202/spaCy",
"id": "ce1ab50d6406a46b19273ddfbd81dd9ff248d9cf",
"size": "5076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/training/train_tagger_standalone_ud.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "237446"
},
{
"name": "C++",
"bytes": "26995"
},
{
"name": "CSS",
"bytes": "29678"
},
{
"name": "HTML",
"bytes": "286438"
},
{
"name": "JavaScript",
"bytes": "880"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "2537585"
},
{
"name": "Shell",
"bytes": "753"
}
],
"symlink_target": ""
} |
import asyncio
import json
import logging
from aio_jsonrpc_20.exception import (
InvalidRequestException,
CustomJsonRpcException
)
from aio_jsonrpc_20.response import ResponseMaker
from aio_jsonrpc_20.utils import (
is_valid_params,
check_request,
lazy_check_request
)
logger = logging.getLogger(__name__)
class RequestResolver(object):
"""This class allow to resolve a request, and build a response."""
__slots__ = [
'router',
'serializer',
'response_maker',
'check_request'
]
def __init__(
self,
router,
lazy_check=False,
error_verbose=True,
serializer=json
):
self.router = router
self.response_maker = ResponseMaker(error_verbose)
self.serializer = serializer
if lazy_check:
self.check_request = lazy_check_request
else:
self.check_request = check_request
async def handle(self, str_request):
# handle encoding
if isinstance(str_request, bytes):
str_request = str_request.decode("utf-8")
# get response from unserialized request
try:
request = self.serializer.loads(str_request)
except (TypeError, ValueError) as decode_error:
logger.warning('JSON decode error: {}'.format(decode_error))
response = self.response_maker.get_parse_error(
data='Could not decode JSON request object.'
)
else:
if request:
try:
# batch request case
if isinstance(request, list):
coros = []
for sub_req in request:
coros.append(self._get_response(sub_req))
response = await asyncio.gather(*coros)
response = [elt for elt in response if elt]
# simple request case
else:
response = await self._get_response(request)
# handle uncaught exception
except Exception as e:
logger.warning('Unexpected error: ' + str(e))
response = self.response_maker.get_internal_error(
data=e.args[0]
)
else:
response = self.response_maker.get_invalid_request(
data='Empty request is not allowed'
)
# return serialized result
return self.serializer.dumps(response) if response else ''
async def _get_response(self, request):
# get id and check in the same time if request is a dict
try:
request_id = request['id']
except KeyError:
request_id = None
except TypeError:
logger.warning('Request should be a dict object')
return self.response_maker.get_invalid_request(
data="Request should be a dict object",
)
# check jsonrpc 2.0 specification
try:
self.check_request(request)
except InvalidRequestException as e:
logger.warning('Invalid request')
return self.response_maker.get_invalid_request(
data=e.args[0],
request_id=request_id
)
# get method to call
try:
method = self.router[request['method']]
except KeyError:
logger.warning('Method not found')
return self.response_maker.get_method_not_found(
request_id=request_id
)
except TypeError:
logger.error('Router should be like an dict object')
return self.response_maker.get_internal_error(
data="Router should be like an dict object",
)
# try to execute method
try:
if 'params' in request:
params = request['params']
if isinstance(params, dict):
result = await method(**params)
else:
result = await method(*params)
else:
result = await method()
except CustomJsonRpcException as e:
# catch custom error
logger.warning('CustomError: ' + str(e))
return self.response_maker.get_server_error(
code=e.code,
data=e.data,
request_id=request_id
)
except Exception as e:
# determine the error's type
if (
isinstance(e, TypeError) and
not is_valid_params(method, params)
):
logger.warning('Invalid params')
return self.response_maker.get_invalid_params(
data=e.args[0], request_id=request_id
)
else:
logger.warning('Application error: ' + str(e))
return self.response_maker.get_internal_error(
data=e.args[0], request_id=request_id
)
# is notification ?
if request_id:
return self.response_maker.get_response(
result=result, request_id=request_id
)
else:
return None
| {
"content_hash": "1cfabaa7c3c8bcbc564afdd5e8d24e6a",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 72,
"avg_line_length": 33.391304347826086,
"alnum_prop": 0.5210193452380952,
"repo_name": "steffgrez/aio-jsonrpc-2.0",
"id": "11a26337d475b97826c0d1401283469ea67c7c76",
"size": "5376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aio_jsonrpc_20/resolver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29695"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/opee_sea_killer/shared_lair_opee_sea_killer_underwater.iff"
result.attribute_template_id = -1
result.stfName("lair_n","opee_sea_killer_underwater")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "023b96b8ac548ef2d3e7954d16787e14",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 100,
"avg_line_length": 26.46153846153846,
"alnum_prop": 0.7093023255813954,
"repo_name": "anhstudios/swganh",
"id": "0c9a96b926ff435092e71e00db4c5c2613c1849f",
"size": "489",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/lair/opee_sea_killer/shared_lair_opee_sea_killer_underwater.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
"""AbstractCardRequest class.
__author__ = "http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:[email protected]
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from smartcard.CardType import AnyCardType
from smartcard.PassThruCardService import PassThruCardService
import smartcard.System
class AbstractCardRequest(object):
"""The base class for xxxCardRequest classes.
A CardRequest is used for waitForCard() invocations and specifies what
kind of smart card an application is waited for.
Known subclasses: smartcard.pcsc.PCSCCardRequest"""
def __init__(self, newcardonly=False, readers=None,
cardType=None, cardServiceClass=None, timeout=1):
"""Construct new CardRequest.
newcardonly: if True, request a new card; default is
False, i.e. accepts cards already inserted
readers: the list of readers to consider for
requesting a card; default is to consider
all readers
cardType: the smartcard.CardType.CardType to wait for;
default is smartcard.CardType.AnyCardType,
i.e. the request will succeed with any card
cardServiceClass: the specific card service class to create
and bind to the card;default is to create
and bind a smartcard.PassThruCardService
timeout: the time in seconds we are ready to wait for
connecting to the requested card. default
is to wait one second; to wait forever, set
timeout to None
"""
self.newcardonly = newcardonly
self.readersAsked = readers
self.cardType = cardType
self.cardServiceClass = cardServiceClass
self.timeout = timeout
# if no CardType requeted, use AnyCardType
if None == self.cardType:
self.cardType = AnyCardType()
# if no card service requested, use pass-thru card service
if None == self.cardServiceClass:
self.cardServiceClass = PassThruCardService
def getReaders(self):
"""Returns the list or readers on which to wait for cards."""
# if readers not given, use all readers
if None == self.readersAsked:
return smartcard.System.readers()
else:
return self.readersAsked
def waitforcard(self):
"""Wait for card insertion and returns a card service."""
pass
def waitforcardevent(self):
"""Wait for card insertion or removal."""
pass
| {
"content_hash": "254d9a9cf831fd77299466ef417c9fdb",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 75,
"avg_line_length": 38.111111111111114,
"alnum_prop": 0.6495626822157434,
"repo_name": "mixja/eap-sim-lab",
"id": "ef3206f045180e1ab4920e97356979ca745c6418",
"size": "3430",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/pyscard-1.6.16/build/lib.macosx-10.10-x86_64-2.7/smartcard/AbstractCardRequest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "296205"
},
{
"name": "CSS",
"bytes": "16483"
},
{
"name": "JavaScript",
"bytes": "102146"
},
{
"name": "Makefile",
"bytes": "9775"
},
{
"name": "Python",
"bytes": "1465805"
},
{
"name": "Shell",
"bytes": "7763"
}
],
"symlink_target": ""
} |
__author__ = 'adrian'
from PyQt4 import QtGui
from PyQt4 import QtCore
from parking_app.UI.WarningConfirmationUI import WarningConfirmationUI
import parking_app.Common as Common
import random
class SlotUI(QtGui.QWidget):
def __init__(self):
super(SlotUI, self).__init__()
self.initUI()
def initUI(self):
vertical = QtGui.QVBoxLayout()
# Vehicle ID
self.btn_patente = QtGui.QPushButton('AAA-000',self)
self.btn_patente.clicked.connect(self.withdraw_vehicle)
self.lbl_vehicle = QtGui.QLabel(self)
vertical.addWidget(self.lbl_vehicle)
self.setLayout(vertical)
color = QtGui.QColor(150, 150, 150)
self.setBackgroundColor(color)
self.btn_patente.setHidden(True)
self.lbl_vehicle.setHidden(True)
def withdraw_vehicle(self):
print("Mostrar mensaje de confirmacion")
def setBackgroundColor(self, color):
self.setAutoFillBackground(True)
p = self.palette()
p.setColor(self.backgroundRole(), color)
self.setPalette(p)
def updateUI(self, vehicle_patent, vehicle_weight):
self.lbl_vehicle.setHidden(False)
self.btn_patente.setHidden(False)
# Vehicle ID
self.btn_patente.setText(vehicle_patent)
# Current vehicle
color = QtGui.QColor(100, 100, 255)
vehicleName = ''
if vehicle_weight == Common.Weights.veryLight.value:
vehicleName = 'MotoSide.png'
elif vehicle_weight == Common.Weights.light.value:
vehicleName = 'CarSide.png'
elif vehicle_weight == Common.Weights.heavy.value:
vehicleName = 'AutoTruckSide.png'
elif vehicle_weight == Common.Weights.veryHeavy.value:
vehicleName = 'TrukSide.png'
elif vehicle_weight == Common.Weights.empty.value:
color = QtGui.QColor(150, 150, 150)
self.lbl_vehicle.setHidden(False)
self.btn_patente.setHidden(False)
pixmap2 = QtGui.QPixmap(vehicleName)
pixmap2 = pixmap2.scaled(40, 40, QtCore.Qt.KeepAspectRatio)
self.lbl_vehicle.setPixmap(pixmap2)
self.setBackgroundColor(color)
| {
"content_hash": "0ad7acf9de53f783d110df4055b828a4",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 70,
"avg_line_length": 27.234567901234566,
"alnum_prop": 0.642792384406165,
"repo_name": "Nebla/cylindricalParkingPrototype",
"id": "644f5de4bedcf8eb3d60142e804d81e1ec721687",
"size": "2206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parking_app/UI/SlotUI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "26725"
},
{
"name": "C++",
"bytes": "629"
},
{
"name": "Objective-C",
"bytes": "541653"
},
{
"name": "Python",
"bytes": "68099"
},
{
"name": "Ruby",
"bytes": "80"
},
{
"name": "Shell",
"bytes": "3626"
}
],
"symlink_target": ""
} |
"""Data tests for cclib."""
| {
"content_hash": "228daa9de2075af935de84d8bb3fadd2",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 27,
"avg_line_length": 28,
"alnum_prop": 0.6071428571428571,
"repo_name": "cclib/cclib",
"id": "888af5721a312dca5576ca313aed46d8b1adc27a",
"size": "226",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/data/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Arc",
"bytes": "18395"
},
{
"name": "C++",
"bytes": "21085"
},
{
"name": "DIGITAL Command Language",
"bytes": "31999"
},
{
"name": "Python",
"bytes": "1617128"
},
{
"name": "Roff",
"bytes": "375502"
},
{
"name": "Shell",
"bytes": "1484"
},
{
"name": "TeX",
"bytes": "29388"
}
],
"symlink_target": ""
} |
from amonagent.modules.processes import processes_data_collector
from amonagent.modules.core import (
get_uptime,
get_memory_info,
get_cpu_utilization,
get_load_average,
disk_check,
get_network_traffic,
get_ip_address,
get_cpu_info
)
from amonagent.modules.distro import get_distro
from amonagent.modules.plugins import discover_plugins
import logging
log = logging.getLogger(__name__)
class Runner(object):
def __init__(self):
self.plugins_list = discover_plugins()
def info(self):
system_info_dict = {
'processor': get_cpu_info(),
'ip_address': get_ip_address(),
'distro': get_distro(),
}
return system_info_dict
def system(self):
system_data_dict = {
'memory': get_memory_info(),
'cpu': get_cpu_utilization(),
'disk': disk_check.check(),
'network': get_network_traffic(),
'loadavg': get_load_average(),
'uptime': get_uptime(),
}
return system_data_dict
def processes(self):
return processes_data_collector.collect()
def plugins(self):
plugin_result_dict = {}
for plugin in self.plugins_list:
# Don't stop the agent if the plugin data cannot be collected
try:
plugin.collect()
plugin_result_dict[plugin.name] = plugin.result
except:
log.exception("Can't collect data for plugin: {0}".format(plugin.name))
return False
return plugin_result_dict
runner = Runner()
| {
"content_hash": "444c76103935ebeb7f2afcc785a3ccc2",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 75,
"avg_line_length": 19.91304347826087,
"alnum_prop": 0.6899563318777293,
"repo_name": "amonapp/amonagent-legacy",
"id": "fa5554322f4c9dd81f36eb7ddb8bec3624d5d196",
"size": "1374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amonagent/runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "4892"
},
{
"name": "Puppet",
"bytes": "330"
},
{
"name": "Python",
"bytes": "62122"
},
{
"name": "SaltStack",
"bytes": "1193"
},
{
"name": "Shell",
"bytes": "10114"
}
],
"symlink_target": ""
} |
from bda.plone.shop.tests import set_browserlayer
from bda.plone.shop.tests import Shop_INTEGRATION_TESTING
from bda.plone.shop.utils import get_shop_settings
import plone.api
import unittest2 as unittest
class TestUser(unittest.TestCase):
layer = Shop_INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
self.request = self.layer['request']
set_browserlayer(self.request)
def test_is_customer(self):
"""Test if a newly created user is granted the "Customer" role.
"""
self.assertTrue(get_shop_settings().add_customer_role_to_new_users)
plone.api.user.create(
email="[email protected]",
username="testuser",
password="testuser"
)
self.assertTrue(
'Customer' in plone.api.user.get_roles(username="testuser")
)
| {
"content_hash": "2c4ee309b1c148216c7b8c2e2568fce3",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 75,
"avg_line_length": 31.071428571428573,
"alnum_prop": 0.6505747126436782,
"repo_name": "TheVirtualLtd/bda.plone.shop",
"id": "b9998ec13b9c229fa90fc1bdf5422efbf9dead39",
"size": "894",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/bda/plone/shop/tests/test_user.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6552"
},
{
"name": "JavaScript",
"bytes": "2481"
},
{
"name": "Python",
"bytes": "151173"
},
{
"name": "RobotFramework",
"bytes": "10582"
},
{
"name": "Shell",
"bytes": "2999"
}
],
"symlink_target": ""
} |
import click
@click.command()
def main(args=None):
"""Console script for mcmc"""
click.echo("Replace this message by putting your code into "
"mcmc.cli.main")
click.echo("See click documentation at http://click.pocoo.org/")
if __name__ == "__main__":
main()
| {
"content_hash": "2224ad9c9977916db08dad04cd5d056d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 68,
"avg_line_length": 22.53846153846154,
"alnum_prop": 0.6143344709897611,
"repo_name": "mchakra2/parallelmcmc",
"id": "4b5d817d54d3c434e5a2fcf9ead9658c39c2b498",
"size": "318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mcmc/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2283"
},
{
"name": "Python",
"bytes": "25693"
}
],
"symlink_target": ""
} |
import sharedLogger
import books
import parseUsfm
class AbstractRenderer(object):
def __init__(self, inputDir, outputDir, outputName, config):
self.identity = 'abstract renderer' if not self.identity else self.identity
self.outputDescription = outputName if not self.outputDescription else self.outputDescription
self.logger = sharedLogger.currentLogger
self.logger.info("\n Building: " + inputDir +
"\n as: " + self.outputDescription +
"\n using: " + self.identity)
self.oebFlag = False
self.config = config
def setOEBFlag(self):
self.oebFlag = True
booksUsfm = None
def loadUSFM(self, usfmDir):
self.booksUsfm = books.loadBooks(usfmDir)
def run(self, order='normal'):
if order == 'normal':
for bookName in books.silNames:
self.renderBook(bookName)
elif order == 'ntpsalms':
for bookName in books.silNamesNTPsalms:
self.renderBook(bookName)
def renderBook(self, bookName):
if bookName in self.booksUsfm:
self.logger.debug('Rendering ' + bookName)
tokens = parseUsfm.parseString(self.booksUsfm[bookName])
for t in tokens: t.renderOn(self)
self.logger.debug('Rendered ' + bookName)
def render_periph(self, token): return self.render_unhandled(token)
def render_id(self, token): pass
def render_ide(self, token): pass
def render_h(self, token): return self.render_unhandled(token)
def render_mt(self, token): return self.render_mt1(token)
def render_mt1(self, token): return self.render_unhandled(token)
def render_mt2(self, token): return self.render_unhandled(token)
def render_mt3(self, token): return self.render_unhandled(token)
def render_ms(self, token): return self.render_ms1(token)
def render_ms1(self, token): return self.render_unhandled(token)
def render_ms2(self, token): return self.render_unhandled(token)
def render_mr(self, token): return self.render_unhandled(token)
def render_mi(self, token): return self.render_unhandled(token)
def render_p(self, token): return self.render_unhandled(token)
def render_sp(self, token): return self.render_unhandled(token)
def render_m(self, token): return self.render_unhandled(token)
def render_s(self, token): return self.render_s1(token)
def render_s1(self, token): return self.render_unhandled(token)
def render_s2(self, token): return self.render_unhandled(token)
def render_s3(self, token): return self.render_unhandled(token)
def render_c(self, token): return self.render_unhandled(token)
def render_v(self, token): return self.render_unhandled(token)
def render_wj(self, token): return self.render_wj_s(token)
def render_wj_s(self, token): return self.render_unhandled(token)
def render_wj_e(self, token): return self.render_unhandled(token)
def render_text(self, token): return self.render_unhandled(token)
def render_q(self, token): return self.render_q1(token)
def render_q1(self, token): return self.render_unhandled(token)
def render_q2(self, token): return self.render_unhandled(token)
def render_q3(self, token): return self.render_unhandled(token)
def render_nb(self, token): return self.render_unhandled(token)
def render_b(self, token): return self.render_unhandled(token)
def render_qt(self, token): return self.render_qt_s(token)
def render_qt_s(self, token): return self.render_unhandled(token)
def render_qt_e(self, token): return self.render_unhandled(token)
def render_r(self, token): return self.render_unhandled(token)
def render_f(self, token): return self.render_f_s(token)
def render_f_s(self, token): return self.render_unhandled(token)
def render_f_e(self, token): return self.render_unhandled(token)
def render_fr(self, token): return self.render_unhandled(token)
def render_fr_e(self, token): return self.render_unhandled(token)
def render_fk(self, token): return self.render_unhandled(token)
def render_ft(self, token): return self.render_unhandled(token)
def render_fq(self, token): return self.render_unhandled(token)
def render_it(self, token): return self.render_it_s(token)
def render_it_s(self, token): return self.render_unhandled(token)
def render_it_e(self, token): return self.render_unhandled(token)
def render_em(self, token): return self.render_em_s(token)
def render_em_s(self, token): return self.render_unhandled(token)
def render_em_e(self, token): return self.render_unhandled(token)
def render_qs(self, token): return self.render_qs_s(token)
def render_qs_s(self, token): return self.render_unhandled(token)
def render_qs_e(self, token): return self.render_unhandled(token)
def render_nd(self, token): return self.render_nd_s(token)
def render_nd_s(self, token): return self.render_unhandled(token)
def render_nd_e(self, token): return self.render_unhandled(token)
def render_pbr(self, token): return self.render_unhandled(token)
def render_d(self, token): return self.render_unhandled(token)
def render_rem(self, token): pass
def render_pi(self, token): return self.render_unhandled(token)
def render_li(self, token): return self.render_unhandled(token)
def render_x(self, token): return self.render_x_s(token)
def render_x_s(self, token): return self.render_unhandled(token)
def render_x_e(self, token): return self.render_unhandled(token)
def render_xo(self, token): return self.render_unhandled(token)
def render_xt(self, token): return self.render_unhandled(token)
def render_xdc(self, token): return self.render_xdc_s(token)
def render_xdc_s(self, token): return self.render_unhandled(token)
def render_xdc_e(self, token): return self.render_unhandled(token)
def render_tl(self, token): return self.render_tl_s(token)
def render_tl_s(self, token): return self.render_unhandled(token)
def render_tl_e(self, token): return self.render_unhandled(token)
def render_add(self, token): return self.render_add_s(token)
def render_add_s(self, token): return self.render_unhandled(token)
def render_add_e(self, token): return self.render_unhandled(token)
def render_toc1(self, token): return self.render_unhandled(token)
def render_toc2(self, token): return self.render_unhandled(token)
def render_toc3(self, token): return self.render_unhandled(token)
def render_is1(self, token): return self.render_unhandled(token)
def render_ip(self, token): return self.render_unhandled(token)
def render_iot(self, token): return self.render_unhandled(token)
def render_io1(self, token): return self.render_unhandled(token)
def render_io2(self, token): return self.render_unhandled(token)
def render_ior(self, token): return self.render_ior_s(token)
def render_ior_s(self, token): return self.render_unhandled(token)
def render_ior_e(self, token): return self.render_unhandled(token)
def render_bk(self, token): return self.render_bk_s(token)
def render_bk_s(self, token): return self.render_unhandled(token)
def render_bk_e(self, token): return self.render_unhandled(token)
def render_sc(self, token): return self.render_sc_s(token)
def render_sc_s(self, token): return self.render_unhandled(token)
def render_sc_e(self, token): return self.render_unhandled(token)
def render_q_s(self, token): return self.render_qs_s(token)
def render_qs_s(self, token): return self.render_unhandled(token)
def render_qs_e(self, token): return self.render_unhandled(token)
def render_pb(self, token): return self.render_unhandled(token)
# Nested
def render_nested_nd(self, token): return self.render_nested_nd_s(token)
def render_nested_nd_s(self, token): return self.render_nd_s(token)
def render_nested_nd_e(self, token): return self.render_nd_e(token)
# This is unknown!
def render_unknown(self, token):
self.logger.warning("Unknown token ignored: " + token.getType() + " of value '" + token.getValue() + "'" )
# We do not specifically handle this!
def render_unhandled(self, token):
self.logger.debug("Unhandled token ignored: " + token.getType() + " of value '" + token.getValue() + "'" )
| {
"content_hash": "e372f788c4883ee00d6d0e34203036b2",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 114,
"avg_line_length": 49.687150837988824,
"alnum_prop": 0.662019338880144,
"repo_name": "openenglishbible/USFM-Tools",
"id": "7f20d963db9e3267327ee13d1f2d156ab27e2a4e",
"size": "8921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "transform/support/abstractRenderer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11949"
},
{
"name": "HTML",
"bytes": "768"
},
{
"name": "JavaScript",
"bytes": "6102"
},
{
"name": "Python",
"bytes": "142595"
},
{
"name": "Shell",
"bytes": "1133"
}
],
"symlink_target": ""
} |
"""Generic device for the HomematicIP Cloud component."""
import logging
from typing import Optional
from homematicip.aio.device import AsyncDevice
from homematicip.aio.home import AsyncHome
from homeassistant.components import homematicip_cloud
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_LOW_BATTERY = 'low_battery'
ATTR_MODEL_TYPE = 'model_type'
# RSSI HAP -> Device
ATTR_RSSI_DEVICE = 'rssi_device'
# RSSI Device -> HAP
ATTR_RSSI_PEER = 'rssi_peer'
ATTR_SABOTAGE = 'sabotage'
ATTR_GROUP_MEMBER_UNREACHABLE = 'group_member_unreachable'
class HomematicipGenericDevice(Entity):
"""Representation of an HomematicIP generic device."""
def __init__(self, home: AsyncHome, device,
post: Optional[str] = None) -> None:
"""Initialize the generic device."""
self._home = home
self._device = device
self.post = post
_LOGGER.info("Setting up %s (%s)", self.name, self._device.modelType)
@property
def device_info(self):
"""Return device specific attributes."""
# Only physical devices should be HA devices.
if isinstance(self._device, AsyncDevice):
return {
'identifiers': {
# Serial numbers of Homematic IP device
(homematicip_cloud.DOMAIN, self._device.id)
},
'name': self._device.label,
'manufacturer': self._device.oem,
'model': self._device.modelType,
'sw_version': self._device.firmwareVersion,
'via_hub': (homematicip_cloud.DOMAIN, self._device.homeId),
}
return None
async def async_added_to_hass(self):
"""Register callbacks."""
self._device.on_update(self._device_changed)
def _device_changed(self, *args, **kwargs):
"""Handle device state changes."""
_LOGGER.debug("Event %s (%s)", self.name, self._device.modelType)
self.async_schedule_update_ha_state()
@property
def name(self) -> str:
"""Return the name of the generic device."""
name = self._device.label
if self._home.name is not None and self._home.name != '':
name = "{} {}".format(self._home.name, name)
if self.post is not None and self.post != '':
name = "{} {}".format(name, self.post)
return name
@property
def should_poll(self) -> bool:
"""No polling needed."""
return False
@property
def available(self) -> bool:
"""Device available."""
return not self._device.unreach
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return "{}_{}".format(self.__class__.__name__, self._device.id)
@property
def icon(self) -> Optional[str]:
"""Return the icon."""
if hasattr(self._device, 'lowBat') and self._device.lowBat:
return 'mdi:battery-outline'
if hasattr(self._device, 'sabotage') and self._device.sabotage:
return 'mdi:alert'
return None
@property
def device_state_attributes(self):
"""Return the state attributes of the generic device."""
attr = {ATTR_MODEL_TYPE: self._device.modelType}
if hasattr(self._device, 'lowBat') and self._device.lowBat:
attr[ATTR_LOW_BATTERY] = self._device.lowBat
if hasattr(self._device, 'sabotage') and self._device.sabotage:
attr[ATTR_SABOTAGE] = self._device.sabotage
if hasattr(self._device, 'rssiDeviceValue') and \
self._device.rssiDeviceValue:
attr[ATTR_RSSI_DEVICE] = self._device.rssiDeviceValue
if hasattr(self._device, 'rssiPeerValue') and \
self._device.rssiPeerValue:
attr[ATTR_RSSI_PEER] = self._device.rssiPeerValue
return attr
| {
"content_hash": "a85fa6706dd0496437ea27b84ca0a271",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 77,
"avg_line_length": 35.72477064220183,
"alnum_prop": 0.6019517205957884,
"repo_name": "jnewland/home-assistant",
"id": "6bbbb8b4fab4a887ce98c9aa1111975715893d97",
"size": "3894",
"binary": false,
"copies": "1",
"ref": "refs/heads/ci",
"path": "homeassistant/components/homematicip_cloud/device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15240512"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17862"
}
],
"symlink_target": ""
} |
from unreal_engine.classes import PyFactory, StaticMesh, Object, Class
import unreal_engine as ue
from collada import Collada
from unreal_engine.structs import StaticMeshSourceModel, MeshBuildSettings
from unreal_engine import FRawMesh
import numpy
from unreal_engine import FVector, FRotator
from unreal_engine import SWindow, SVerticalBox, SHorizontalBox, SButton, SRotatorInputBox
from unreal_engine.enums import EHorizontalAlignment
from unreal_engine.classes import Material
from unreal_engine.structs import Rotator, StaticMaterial
class ColladaImportOptions(Object):
DefaultRotation = Rotator
DefaultMaterial = Material
class ColladaFactory(PyFactory):
ImportOptions = ColladaImportOptions()
def __init__(self):
# inform the editor that this class is able to import assets
self.bEditorImport = True
# register the .dae extension as supported
self.Formats = ['dae;Collada']
# set the UClass this UFactory will generate
self.SupportedClass = StaticMesh
def open_collada_wizard(self):
def cancel_import():
self.wizard.request_destroy()
def confirm_import():
self.do_import = True
self.wizard.request_destroy()
self.wizard = SWindow(title='Collada Import Options', modal=True, sizing_rule=1)(
SVerticalBox()
(
ue.create_detail_view(self.ImportOptions),
auto_height=True,
padding = 10
)
(
SHorizontalBox()
(
SButton(text='Cancel', on_clicked=cancel_import, h_align = EHorizontalAlignment.HAlign_Center)
)
(
SButton(text='Import', on_clicked=confirm_import, h_align = EHorizontalAlignment.HAlign_Center)
),
auto_height=True,
padding = 4,
),
)
self.wizard.add_modal()
# this functions starts with an uppercase letter, so it will be visible to the UE system
# not required obviously, but it will be a good example
def FixMeshData(self):
# move from collada system (y on top) to ue4 one (z on top, forward decreases over viewer)
for i in range(0, len(self.vertices), 3):
xv, yv, zv = self.vertices[i], self.vertices[i+1], self.vertices[i+2]
# invert forward
vec = FVector(zv * -1, xv, yv) * self.ImportOptions.DefaultRotation
self.vertices[i] = vec.x
self.vertices[i+1] = vec.y
self.vertices[i+2] = vec.z
xn, yn, zn = self.normals[i], self.normals[i+1], self.normals[i+2]
nor = FVector(zn * -1, xn, yn) * self.ImportOptions.DefaultRotation
# invert forward
self.normals[i] = nor.x
self.normals[i+1] = nor.y
self.normals[i+2] = nor.z
# fix uvs from 0 on bottom to 0 on top
for i, uv in enumerate(self.uvs):
if i % 2 != 0:
self.uvs[i] = 1 - uv
def PyFactoryCreateFile(self, uclass: Class, parent: Object, name: str, filename: str) -> Object:
# load the collada file
dae = Collada(filename)
ue.log_warning(dae)
self.do_import = False
self.open_collada_wizard()
if not self.do_import:
return None
# create a new UStaticMesh with the specified name and parent
static_mesh = StaticMesh(name, parent)
# prepare a new model with the specified build settings
source_model = StaticMeshSourceModel(BuildSettings=MeshBuildSettings(bRecomputeNormals=False, bRecomputeTangents=True, bUseMikkTSpace=True, bBuildAdjacencyBuffer=True, bRemoveDegenerates=True))
# extract vertices, uvs and normals from the da file (numpy.ravel will flatten the arrays to simple array of floats)
triset = dae.geometries[0].primitives[0]
self.vertices = numpy.ravel(triset.vertex[triset.vertex_index])
# take the first uv channel (there could be multiple channels, like the one for lightmapping)
self.uvs = numpy.ravel(triset.texcoordset[0][triset.texcoord_indexset[0]])
self.normals = numpy.ravel(triset.normal[triset.normal_index])
# fix mesh data
self.FixMeshData()
# create a new mesh, FRawMesh is an ptopmized wrapper exposed by the python plugin. read: no reflection involved
mesh = FRawMesh()
# assign vertices
mesh.set_vertex_positions(self.vertices)
# uvs are required
mesh.set_wedge_tex_coords(self.uvs)
# normals are optionals
mesh.set_wedge_tangent_z(self.normals)
# assign indices (not optimized, just return the list of triangles * 3...)
mesh.set_wedge_indices(numpy.arange(0, len(triset) * 3))
# assign the FRawMesh to the LOD0 (the model we created before)
mesh.save_to_static_mesh_source_model(source_model)
# assign LOD0 to the SataticMesh and build it
static_mesh.SourceModels = [source_model]
static_mesh.static_mesh_build()
static_mesh.static_mesh_create_body_setup()
static_mesh.StaticMaterials = [StaticMaterial(MaterialInterface=self.ImportOptions.DefaultMaterial, MaterialSlotName='Main')]
return static_mesh
| {
"content_hash": "7bbccb7a214be8516fdc3951c92f9d46",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 201,
"avg_line_length": 40.04964539007092,
"alnum_prop": 0.6047458827696122,
"repo_name": "getnamo/UnrealEnginePython",
"id": "33b8b0edcfd7cdf9c1d126ba8013ee308e209b0a",
"size": "5647",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tutorials/WritingAColladaFactoryWithPython_Assets/collada_factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1177094"
},
{
"name": "C#",
"bytes": "23839"
},
{
"name": "C++",
"bytes": "2133454"
},
{
"name": "Python",
"bytes": "109035"
},
{
"name": "Shell",
"bytes": "232"
}
],
"symlink_target": ""
} |
import inspect
import six
import collections
__all__ = [
'is_simple_callable',
'is_iterable',
'force_str',
'get_object_by_source',
'filter_list',
]
def is_simple_callable(obj):
'''
True if the object is a callable and takes no arguments, else False.
'''
function = inspect.isfunction(obj)
method = inspect.ismethod(obj)
if not (function or method):
return False
args, varargs, keywords, defaults = inspect.getargspec(obj)
len_args = len(args) if function else len(args) - 1
len_defaults = len(defaults) if defaults else 0
return len_args <= len_defaults
def is_iterable(obj):
'''
True if an object is iterable, else False
'''
return hasattr(obj, '__iter__')
def force_str(value, encoding='utf-8'):
"""
Forces the value to a str instance, decoding if necessary.
"""
if six.PY3:
if isinstance(value, bytes):
return str(value, encoding)
return value
def get_object_by_source(obj, source, allow_blank_source=False):
"""
Tries to get the object by source.
Similar to Python's `getattr(obj, source)`, but takes a dot separaed
string for source to get source from nested obj, instead of a single
source field. Also, supports getting source form obj where obj is a
dict type.
Example:
>>> obj = get_object_by_source(
object, source='user.username')
"""
try:
if isinstance(obj, collections.Mapping):
if '.' in source:
for source in source.split('.'):
obj = obj.get(source)
else:
obj = obj.get(source)
else:
if '.' in source:
for source in source.split('.'):
obj = getattr(obj, source)
else:
obj = getattr(obj, source)
except AttributeError:
if not allow_blank_source:
raise
obj = None
return obj
def filter_list(obj):
"""
Filters a list by removing all the None value within the list.
"""
if isinstance(obj, (list, tuple)):
return [item for item in obj if item is not None]
return obj
| {
"content_hash": "9aaee13289e4db6cf8d65554dace189b",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 72,
"avg_line_length": 26.202380952380953,
"alnum_prop": 0.5856428895956384,
"repo_name": "localmed/pyserializer",
"id": "6149cc497f644e7a859391eab1a47820377017be",
"size": "2201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyserializer/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1211"
},
{
"name": "Python",
"bytes": "117007"
},
{
"name": "Shell",
"bytes": "896"
}
],
"symlink_target": ""
} |
__all__ = ['AutoARIMAProphet']
# %% ../../nbs/adapters.prophet.ipynb 3
import sys
from copy import deepcopy
import pandas as pd
from ..arima import AutoARIMA
if sys.version_info.minor != 6 or (sys.platform not in ["win32", "cygwin"]):
try:
from prophet import Prophet
except ModuleNotFoundError as e:
msg = (
"{e}. To use prophet adapters you have to install "
"prophet. Please run `pip install prophet`. "
"Note that it is recommended to install prophet "
"using conda environments due to dependencies."
)
raise ModuleNotFoundError(msg) from e
elif sys.version_info.minor == 6 and (sys.platform in ["win32", "cygwin"]):
try:
from fbprophet import Prophet
except ModuleNotFoundError as e:
msg = (
"{e}. To use prophet adapters you have to install "
"fbprophet. Please run `pip install fbprophet`. "
"Note that it is recommended to install prophet "
"using conda environments due to dependencies."
)
raise ModuleNotFoundError(msg) from e
# %% ../../nbs/adapters.prophet.ipynb 6
class AutoARIMAProphet(Prophet):
"""AutoARIMAProphet adapter.
Returns best ARIMA model using external variables created by the Prophet interface.
This class receives as parameters the same as prophet.Prophet and uses a `models.AutoARIMA`
backend.
If your forecasting pipeline uses Prophet the `AutoARIMAProphet` adapter helps to
easily substitute Prophet with an AutoARIMA.
**Parameters:**<br>
`growth`: String 'linear', 'logistic' or 'flat' to specify a linear, logistic or flat trend.<br>
`changepoints`: List of dates of potential changepoints. Otherwise selected automatically.<br>
`n_changepoints`: Number of potential changepoints to include.<br>
`changepoint_range`: Proportion of history in which trend changepoints will be estimated.<br>
`yearly_seasonality`: Fit yearly seasonality.
Can be 'auto', True, False, or a number of Fourier terms to generate.<br>
`weekly_seasonality`: Fit weekly seasonality.
Can be 'auto', True, False, or a number of Fourier terms to generate.<br>
`daily_seasonality`: Fit daily seasonality.
Can be 'auto', True, False, or a number of Fourier terms to generate.<br>
`holidays`: pandas.DataFrame with columns holiday (string) and ds (date type).<br>
`interval_width`: float, uncertainty forecast intervals width. `StatsForecast`'s level <br>
**Notes:**<br>
You can create automated exogenous variables from the Prophet data processing pipeline
these exogenous will be included into `AutoARIMA`'s exogenous features. Parameters like
`seasonality_mode`, `seasonality_prior_scale`, `holidays_prior_scale`, `changepoint_prior_scale`,
`mcmc_samples`, `uncertainty_samples`, `stan_backend` are Prophet exclusive.
**References:**<br>
[Sean J. Taylor, Benjamin Letham (2017). "Prophet Forecasting at Scale"](https://peerj.com/preprints/3190.pdf)
[Oskar Triebe, Hansika Hewamalage, Polina Pilyugina, Nikolay Laptev, Christoph Bergmeir, Ram Rajagopal (2021). "NeuralProphet: Explainable Forecasting at Scale".](https://arxiv.org/pdf/2111.15397.pdf)
[Rob J. Hyndman, Yeasmin Khandakar (2008). "Automatic Time Series Forecasting: The forecast package for R"](https://www.jstatsoft.org/article/view/v027i03).
"""
def __init__(
self,
growth="linear",
changepoints=None,
n_changepoints=25,
changepoint_range=0.8,
yearly_seasonality="auto",
weekly_seasonality="auto",
daily_seasonality="auto",
holidays=None,
seasonality_mode="additive",
seasonality_prior_scale=10.0,
holidays_prior_scale=10.0,
changepoint_prior_scale=0.05,
mcmc_samples=0,
interval_width=0.80,
uncertainty_samples=1000,
stan_backend=None,
d=None,
D=None,
max_p=5,
max_q=5,
max_P=2,
max_Q=2,
max_order=5,
max_d=2,
max_D=1,
start_p=2,
start_q=2,
start_P=1,
start_Q=1,
stationary=False,
seasonal=True,
ic="aicc",
stepwise=True,
nmodels=94,
trace=False,
approximation=False,
method=None,
truncate=None,
test="kpss",
test_kwargs=None,
seasonal_test="seas",
seasonal_test_kwargs=None,
allowdrift=False,
allowmean=False,
blambda=None,
biasadj=False,
parallel=False,
num_cores=2,
period=1,
):
Prophet.__init__(
self,
growth,
changepoints,
n_changepoints,
changepoint_range,
yearly_seasonality,
weekly_seasonality,
daily_seasonality,
holidays,
seasonality_mode,
seasonality_prior_scale,
holidays_prior_scale,
changepoint_prior_scale,
mcmc_samples,
interval_width,
uncertainty_samples,
stan_backend,
)
self.arima = AutoARIMA(
d=d,
D=D,
max_p=max_p,
max_q=max_q,
max_P=max_P,
max_Q=max_Q,
max_order=max_order,
max_d=max_d,
max_D=max_D,
start_p=start_p,
start_q=start_q,
start_P=start_P,
start_Q=start_Q,
stationary=stationary,
seasonal=seasonal,
ic=ic,
stepwise=stepwise,
nmodels=nmodels,
trace=trace,
approximation=approximation,
method=method,
truncate=truncate,
test=test,
test_kwargs=test_kwargs,
seasonal_test=seasonal_test,
seasonal_test_kwargs=seasonal_test_kwargs,
allowdrift=allowdrift,
allowmean=allowmean,
blambda=blambda,
biasadj=biasadj,
parallel=parallel,
num_cores=num_cores,
period=period,
)
def fit(self, df, disable_seasonal_features=True, **kwargs):
"""Fit the AutoARIMAProphet adapter.
**Parameters:**<br>
`df`: pandas.DataFrame, with columns ds (date type) and y, the time series.<br>
`disable_seasonal_features`: bool, Wheter disable Prophet's seasonal features.<br>
`kwargs`: Additional arguments.<br>
**Returns:**<br>
`self`: `AutoARIMAProphet` adapter object with `AutoARIMA` fitted model.
"""
if self.history is not None:
raise Exception(
"Prophet object can only be fit once. " "Instantiate a new object."
)
if ("ds" not in df) or ("y" not in df):
raise ValueError(
'Dataframe must have columns "ds" and "y" with the dates and '
"values respectively."
)
history = df[df["y"].notnull()].copy()
if history.shape[0] < 2:
raise ValueError("Dataframe has less than 2 non-NaN rows.")
self.history_dates = pd.to_datetime(
pd.Series(df["ds"].unique(), name="ds")
).sort_values()
history = self.setup_dataframe(history, initialize_scales=True)
self.history = history
self.set_auto_seasonalities()
(
seasonal_features,
prior_scales,
component_cols,
modes,
) = self.make_all_seasonality_features(history)
self.train_component_cols = component_cols
self.component_modes = modes
self.fit_kwargs = deepcopy(kwargs)
if disable_seasonal_features:
seas = tuple(self.seasonalities.keys())
seasonal_features = seasonal_features.loc[
:, ~seasonal_features.columns.str.startswith(seas)
]
self.xreg_cols = seasonal_features.columns
y = history["y"].values
X = seasonal_features.values if not seasonal_features.empty else None
self.arima = self.arima.fit(y=y, X=X)
return self
def predict(self, df=None):
"""Predict using the AutoARIMAProphet adapter.
**Parameters:**<br>
`df`: pandas.DataFrame, with columns ds (date type) and y, the time series.<br>
**Returns:**<br>
`fcsts_df`: A pandas.DataFrame with the forecast components.
"""
if self.history is None:
raise Exception("Model has not been fit.")
if df is None:
df = self.history.copy()
else:
if df.shape[0] == 0:
raise ValueError("Dataframe has no rows.")
df = self.setup_dataframe(df.copy())
seasonal_features = self.make_all_seasonality_features(df)[0].loc[
:, self.xreg_cols
]
ds_forecast = set(df["ds"])
h = len(ds_forecast - set(self.history["ds"]))
if h > 0:
X = seasonal_features.values[-h:] if not seasonal_features.empty else None
fcsts_df = self.arima.predict(
h=h, X=X, level=int(100 * self.interval_width)
)
else:
fcsts_df = pd.DataFrame()
if len(ds_forecast) > h:
in_sample = self.arima.predict_in_sample(
level=int(100 * self.interval_width)
)
fcsts_df = pd.concat([in_sample, fcsts_df]).reset_index(drop=True)
yhat = fcsts_df.pop("mean")
fcsts_df.columns = ["yhat_lower", "yhat_upper"]
fcsts_df.insert(0, "yhat", yhat)
fcsts_df.insert(0, "ds", df["ds"])
return fcsts_df
| {
"content_hash": "d04682a31b8724ce9bff7d9ec31c45b1",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 204,
"avg_line_length": 35.7043795620438,
"alnum_prop": 0.5834611060002044,
"repo_name": "Nixtla/statsforecast",
"id": "ae4dea87d9a826a35a6a036d20bdf78a5eef505b",
"size": "9874",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "statsforecast/adapters/prophet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "904"
},
{
"name": "Dockerfile",
"bytes": "866"
},
{
"name": "Jupyter Notebook",
"bytes": "29577745"
},
{
"name": "Makefile",
"bytes": "2137"
},
{
"name": "Python",
"bytes": "491864"
},
{
"name": "R",
"bytes": "4449"
}
],
"symlink_target": ""
} |
"""Command-line sample app demonstrating customer-supplied encryption keys.
This sample demonstrates uploading an object while supplying an encryption key,
retrieving that object's contents, and finally rotating that key to a new
value.
This sample is used on this page:
https://cloud.google.com/storage/docs/json_api/v1/json-api-python-samples
For more information, see the README.md under /storage.
"""
import argparse
import filecmp
import tempfile
import googleapiclient.discovery
import googleapiclient.http
# You can (and should) generate your own encryption key. Here's a good way to
# accomplish this with Python:
# python -c \
# 'import base64; import os; print(base64.encodestring(os.urandom(32)))'
# Although these keys are provided here for simplicity, please remember that it
# is a bad idea to store your encryption keys in your source code.
ENCRYPTION_KEY = '4RzDI0TeWa9M/nAvYH05qbCskPaSU/CFV5HeCxk0IUA='
# You can use openssl to quickly calculate the hash of any key.
# Try running this:
# openssl base64 -d <<< ENCRYPTION_KEY | openssl dgst -sha256 -binary \
# | openssl base64
KEY_HASH = 'aanjNC2nwso8e2FqcWILC3/Tt1YumvIwEj34kr6PRpI='
ANOTHER_ENCRYPTION_KEY = 'oevtavYZC+TfGtV86kJBKTeytXAm1s2r3xIqam+QPKM='
ANOTHER_KEY_HASH = '/gd0N3k3MK0SEDxnUiaswl0FFv6+5PHpo+5KD5SBCeA='
def create_service():
"""Creates the service object for calling the Cloud Storage API."""
# Construct the service object for interacting with the Cloud Storage API -
# the 'storage' service, at version 'v1'.
# You can browse other available api services and versions here:
# https://developers.google.com/api-client-library/python/apis/
return googleapiclient.discovery.build('storage', 'v1')
def upload_object(bucket, filename, encryption_key, key_hash):
"""Uploads an object, specifying a custom encryption key."""
service = create_service()
with open(filename, 'rb') as f:
request = service.objects().insert(
bucket=bucket, name=filename,
# You can also just set media_body=filename, but for the sake of
# demonstration, pass in the more generic file handle, which could
# very well be a StringIO or similar.
media_body=googleapiclient.http.MediaIoBaseUpload(
f, 'application/octet-stream'))
request.headers['x-goog-encryption-algorithm'] = 'AES256'
request.headers['x-goog-encryption-key'] = encryption_key
request.headers['x-goog-encryption-key-sha256'] = key_hash
resp = request.execute()
return resp
def download_object(bucket, obj, out_file, encryption_key, key_hash):
"""Downloads an object protected by a custom encryption key."""
service = create_service()
request = service.objects().get_media(bucket=bucket, object=obj)
request.headers['x-goog-encryption-algorithm'] = 'AES256'
request.headers['x-goog-encryption-key'] = encryption_key
request.headers['x-goog-encryption-key-sha256'] = key_hash
# Unfortunately, http.MediaIoBaseDownload overwrites HTTP headers,
# and so it cannot be used here. Instead, we shall download as a
# single request.
out_file.write(request.execute())
def rotate_key(bucket, obj, current_encryption_key, current_key_hash,
new_encryption_key, new_key_hash):
"""Changes the encryption key used to store an existing object."""
service = create_service()
request = service.objects().rewrite(
sourceBucket=bucket, sourceObject=obj,
destinationBucket=bucket, destinationObject=obj,
body={})
# For very large objects, calls to rewrite may not complete on the first
# call and may need to be resumed.
while True:
request.headers.update({
'x-goog-copy-source-encryption-algorithm': 'AES256',
'x-goog-copy-source-encryption-key': current_encryption_key,
'x-goog-copy-source-encryption-key-sha256': current_key_hash,
'x-goog-encryption-algorithm': 'AES256',
'x-goog-encryption-key': new_encryption_key,
'x-goog-encryption-key-sha256': new_key_hash})
rewrite_response = request.execute()
if rewrite_response['done']:
break
print('Continuing rewrite call...')
request = service.objects().rewrite(
source_bucket=bucket, source_object=obj,
destination_bucket=bucket, destination_object=obj,
rewriteToken=rewrite_response['rewriteToken'])
rewrite_response.execute()
def main(bucket, filename):
print('Uploading object gs://{}/{}'.format(bucket, filename))
upload_object(bucket, filename, ENCRYPTION_KEY, KEY_HASH)
print('Downloading it back')
with tempfile.NamedTemporaryFile(mode='w+b') as tmpfile:
download_object(bucket, filename, tmpfile, ENCRYPTION_KEY, KEY_HASH)
tmpfile.seek(0)
assert filecmp.cmp(filename, tmpfile.name), \
'Downloaded file has different content from the original file.'
print('Rotating its key')
rotate_key(bucket, filename, ENCRYPTION_KEY, KEY_HASH,
ANOTHER_ENCRYPTION_KEY, ANOTHER_KEY_HASH)
print('Done')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('bucket', help='Your Cloud Storage bucket.')
parser.add_argument('filename', help='A file to upload and download.')
args = parser.parse_args()
main(args.bucket, args.filename)
| {
"content_hash": "adef7e04060042722780d2ebab118538",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 79,
"avg_line_length": 39.20979020979021,
"alnum_prop": 0.6873550918494739,
"repo_name": "GoogleCloudPlatform/python-docs-samples",
"id": "4ddd4e78ccaac44e0570fc942598c859d18b4608",
"size": "6233",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "storage/api/customer_supplied_keys.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8008"
},
{
"name": "Dockerfile",
"bytes": "62031"
},
{
"name": "HTML",
"bytes": "69878"
},
{
"name": "JavaScript",
"bytes": "26494"
},
{
"name": "Jinja",
"bytes": "1892"
},
{
"name": "Jupyter Notebook",
"bytes": "47951698"
},
{
"name": "Makefile",
"bytes": "932"
},
{
"name": "Procfile",
"bytes": "138"
},
{
"name": "PureBasic",
"bytes": "11115"
},
{
"name": "Python",
"bytes": "5323502"
},
{
"name": "Shell",
"bytes": "78261"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
def update_attributes(obj, dictionary, keys):
if not dictionary:
return
for key in keys:
if key not in dictionary:
continue
value = dictionary[key]
if getattr(obj, key) is not None and value is None:
continue
if type(value) is dict:
continue
setattr(obj, key, dictionary[key])
| {
"content_hash": "3f46eca852679cbc8bb5a56716c27b3f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 64,
"avg_line_length": 22.05,
"alnum_prop": 0.5963718820861678,
"repo_name": "fuzeman/trakt.py",
"id": "98be8ad35c9da8ef5fd9dc716ffdd5e3cf2aca8d",
"size": "441",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "trakt/objects/core/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "295322"
}
],
"symlink_target": ""
} |
import numpy as np
import cv2
import sys
from video import Video
import os
if __name__ == "__main__":
videopath = sys.argv[1]
video = Video(videopath)
video.negate()
| {
"content_hash": "a26068e4ff5c81b8cdaa194fc6aa4050",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 28,
"avg_line_length": 15.153846153846153,
"alnum_prop": 0.5989847715736041,
"repo_name": "adobe-research/video-lecture-summaries",
"id": "da8c04e4d49673a02332477243ae8ad89faf6e64",
"size": "219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scripts/negatevideo.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6400"
},
{
"name": "BlitzBasic",
"bytes": "63"
},
{
"name": "CSS",
"bytes": "43297"
},
{
"name": "HTML",
"bytes": "15459294"
},
{
"name": "JavaScript",
"bytes": "239670"
},
{
"name": "PostScript",
"bytes": "3330579"
},
{
"name": "Python",
"bytes": "738196"
},
{
"name": "Ruby",
"bytes": "573"
},
{
"name": "TeX",
"bytes": "10314"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CBA'
copyright = u'2016-2017, Kai Diefenbach'
author = u'Kai Diefenbach'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'CBA v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'CBAdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CBA.tex', u'CBA Documentation',
u'Kai Diefenbach', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cba', u'CBA Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CBA', u'CBA Documentation',
author, 'CBA', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| {
"content_hash": "65d259a03a469cdc96161de39b819728",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 80,
"avg_line_length": 28.267080745341616,
"alnum_prop": 0.6855636123928807,
"repo_name": "diefenbach/django-cba",
"id": "52b040a94ac639291fbedee56e7edc34d940facf",
"size": "9758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3021200"
},
{
"name": "HTML",
"bytes": "24065"
},
{
"name": "JavaScript",
"bytes": "3163620"
},
{
"name": "Python",
"bytes": "41129"
}
],
"symlink_target": ""
} |
"""Contains tests for oweb.views.basic.home"""
# Python imports
from unittest import skip
# Django imports
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.contrib.auth.models import User
# app imports
from oweb.tests import OWebViewTests
from oweb.models.account import Account
@override_settings(AUTH_USER_MODEL='auth.User')
class OWebViewsHomeTests(OWebViewTests):
def test_login_required(self):
"""Unauthenticated users should be redirected to oweb:app_login"""
r = self.client.get(reverse('oweb:home'))
self.assertRedirects(r,
reverse('oweb:app_login'),
status_code=302,
target_status_code=200)
def test_account_listing(self):
"""Does the home view list the correct accounts?"""
u = User.objects.get(username='test01')
accs = Account.objects.filter(owner=u)
self.client.login(username='test01', password='foo')
r = self.client.get(reverse('oweb:home'))
self.assertEqual(r.status_code, 200)
self.assertTemplateUsed(r, 'oweb/home.html')
self.assertTrue('accounts' in r.context)
self.assertEqual([acc.pk for acc in r.context['accounts']], [acc.pk for acc in accs])
| {
"content_hash": "9294f7d50e3e85686e223caf8496952f",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 93,
"avg_line_length": 39.93939393939394,
"alnum_prop": 0.6608497723823976,
"repo_name": "Mischback/django-oweb",
"id": "2798ef9da63ccbd6cce38817f60b5cd407e69587",
"size": "1318",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "oweb/tests/views/home.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6779"
},
{
"name": "Python",
"bytes": "170213"
},
{
"name": "Shell",
"bytes": "6706"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class ReplicaInfo(Model):
"""Information about the identity, status, health, node name, uptime, and
other details about the replica.
:param replica_status: Possible values include: 'Invalid', 'InBuild',
'Standby', 'Ready', 'Down', 'Dropped'
:type replica_status: str or :class:`enum
<azure.servicefabric.models.enum>`
:param health_state: Possible values include: 'Invalid', 'Ok', 'Warning',
'Error', 'Unknown'
:type health_state: str or :class:`enum <azure.servicefabric.models.enum>`
:param node_name:
:type node_name: str
:param address: The address the replica is listening on.
:type address: str
:param last_in_build_duration_in_seconds: The last in build duration of
the replica in seconds.
:type last_in_build_duration_in_seconds: str
:param service_kind: Polymorphic Discriminator
:type service_kind: str
"""
_validation = {
'service_kind': {'required': True},
}
_attribute_map = {
'replica_status': {'key': 'ReplicaStatus', 'type': 'str'},
'health_state': {'key': 'HealthState', 'type': 'str'},
'node_name': {'key': 'NodeName', 'type': 'str'},
'address': {'key': 'Address', 'type': 'str'},
'last_in_build_duration_in_seconds': {'key': 'LastInBuildDurationInSeconds', 'type': 'str'},
'service_kind': {'key': 'ServiceKind', 'type': 'str'},
}
_subtype_map = {
'service_kind': {'Stateful': 'StatefulServiceReplicaInfo', 'Stateless': 'StatelessServiceInstanceInfo'}
}
def __init__(self, replica_status=None, health_state=None, node_name=None, address=None, last_in_build_duration_in_seconds=None):
self.replica_status = replica_status
self.health_state = health_state
self.node_name = node_name
self.address = address
self.last_in_build_duration_in_seconds = last_in_build_duration_in_seconds
self.service_kind = None
| {
"content_hash": "a893e6f9ea20cc74ea4827cd4c87009b",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 133,
"avg_line_length": 40.57142857142857,
"alnum_prop": 0.6413480885311871,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "1f3cc3d49400e6e97950a4984b40de0118968872",
"size": "2462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-servicefabric/azure/servicefabric/models/replica_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
from SDAWithVectorField import Obstacle
from SDAWithVectorField import Constants
class StationaryObstacle(Obstacle):
"""
Wrapper class for Stationary obstacles
"""
def __init__(self, point, radius, height):
"""
:param point: The point for the stationary obstacle
:type point: Numpy Array
:param radius: The radius of the obstacle
:type radius: Float
:param height: The height of the obstacle
:type height: Float
"""
super(StationaryObstacle, self).__init__(point)
self.radius = radius
self.height = height
self.type = "StationaryObstacle"
def get_radius(self):
"""
Return the radius of the obstacle plus that of the safety radius
"""
return self.radius
def get_height(self):
"""
The height of the obstacle
"""
return self.height
def get_type(self):
return self.__class__.__name__ | {
"content_hash": "c0a7be051fccd825de6752963963029f",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 72,
"avg_line_length": 25.842105263157894,
"alnum_prop": 0.59979633401222,
"repo_name": "FlintHill/SUAS-Competition",
"id": "086baf076da8f74e366f7e3055f804a0b3043430",
"size": "982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SDAPackageWithVectorField/SDAWithVectorField/stationary_obstacle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "164260"
},
{
"name": "HTML",
"bytes": "46489"
},
{
"name": "JavaScript",
"bytes": "105325"
},
{
"name": "PHP",
"bytes": "2701"
},
{
"name": "Python",
"bytes": "538468"
},
{
"name": "Shell",
"bytes": "1913"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_rebel_specforce_guerrilla_human_female_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "88f9c868cf6232f412ea82fd25b0984b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 95,
"avg_line_length": 25.46153846153846,
"alnum_prop": 0.7099697885196374,
"repo_name": "anhstudios/swganh",
"id": "c699bf91c80d44045cdd976424c0ddd611ac3b12",
"size": "476",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_dressed_rebel_specforce_guerrilla_human_female_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
from tempest_lib import exceptions as lib_exc
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest import test
class NetworksNegativeTestJSON(base.BaseNetworkTest):
@test.attr(type=['negative'])
@test.idempotent_id('9293e937-824d-42d2-8d5b-e985ea67002a')
def test_show_non_existent_network(self):
non_exist_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.networks_client.show_network,
non_exist_id)
@test.attr(type=['negative'])
@test.idempotent_id('d746b40c-5e09-4043-99f7-cba1be8b70df')
def test_show_non_existent_subnet(self):
non_exist_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.show_subnet,
non_exist_id)
@test.attr(type=['negative'])
@test.idempotent_id('a954861d-cbfd-44e8-b0a9-7fab111f235d')
def test_show_non_existent_port(self):
non_exist_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.show_port,
non_exist_id)
@test.attr(type=['negative'])
@test.idempotent_id('98bfe4e3-574e-4012-8b17-b2647063de87')
def test_update_non_existent_network(self):
non_exist_id = data_utils.rand_uuid()
self.assertRaises(
lib_exc.NotFound, self.networks_client.update_network,
non_exist_id, name="new_name")
@test.attr(type=['negative'])
@test.idempotent_id('03795047-4a94-4120-a0a1-bd376e36fd4e')
def test_delete_non_existent_network(self):
non_exist_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.networks_client.delete_network,
non_exist_id)
@test.attr(type=['negative'])
@test.idempotent_id('1cc47884-ac52-4415-a31c-e7ce5474a868')
def test_update_non_existent_subnet(self):
non_exist_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.update_subnet,
non_exist_id, name='new_name')
@test.attr(type=['negative'])
@test.idempotent_id('a176c859-99fb-42ec-a208-8a85b552a239')
def test_delete_non_existent_subnet(self):
non_exist_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.delete_subnet, non_exist_id)
@test.attr(type=['negative'])
@test.idempotent_id('13d3b106-47e6-4b9b-8d53-dae947f092fe')
def test_create_port_on_non_existent_network(self):
non_exist_net_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.create_port, network_id=non_exist_net_id)
@test.attr(type=['negative'])
@test.idempotent_id('cf8eef21-4351-4f53-adcd-cc5cb1e76b92')
def test_update_non_existent_port(self):
non_exist_port_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.update_port,
non_exist_port_id, name='new_name')
@test.attr(type=['negative'])
@test.idempotent_id('49ec2bbd-ac2e-46fd-8054-798e679ff894')
def test_delete_non_existent_port(self):
non_exist_port_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.delete_port, non_exist_port_id)
| {
"content_hash": "abae23ca80758ab2cf640d486d5b5ebc",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 79,
"avg_line_length": 42.125,
"alnum_prop": 0.6409495548961425,
"repo_name": "xbezdick/tempest",
"id": "4d1971f9f2f262a5df9b61a79a11f446978f905b",
"size": "4052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/network/test_networks_negative.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2880166"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
} |
import os
from os import path
import sys
from optparse import OptionParser
import json
dataset = {"dataset": {"listing": [], "ui": [], "manifest": [], "code": [] }}
def generate_target(source_dir, targetType):
for root, dirs, files in os.walk(source_dir):
if len(dirs) == 0:
entry = {"target": root}
dataset["dataset"][targetType].append(entry)
def main(args):
parser = OptionParser(usage="python %prog manifest_root_dir ui_xml_root_dir out_json_file", version="%prog 1.0")
(options, args) = parser.parse_args()
if len(args) != 3:
parser.error("Invalid number of arguments.")
if os.path.exists(args[2]):
sys.exit(args[0] + " already exists")
generate_target(args[0], "manifest")
generate_target(args[1], "ui")
with open(args[2], "w") as f:
f.write(json.dumps(dataset, indent=4, separators=(',', ': ')))
print "dataset config (for leveldb) has been written at " + args[2]
main(sys.argv[1:])
| {
"content_hash": "a6a271e4aac542e70067afa70b208bf2",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 116,
"avg_line_length": 35.214285714285715,
"alnum_prop": 0.6206896551724138,
"repo_name": "sieveable/sieveable-tools",
"id": "bdd161c22068445bac9cd5e53235699967547112",
"size": "1000",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/dataset_paths_writer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "72701"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('expenses.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| {
"content_hash": "0efe738b48617aa4a52e09f56337c09e",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 67,
"avg_line_length": 32.5,
"alnum_prop": 0.7446153846153846,
"repo_name": "asyncee/home-bookkeeping-sample",
"id": "acfe22c2de42a8a3ce032c2847b421311d65db03",
"size": "325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sampleproject/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1944"
},
{
"name": "HTML",
"bytes": "9139"
},
{
"name": "Python",
"bytes": "23727"
},
{
"name": "Shell",
"bytes": "217"
}
],
"symlink_target": ""
} |
Subsets and Splits