text
stringlengths 26
2.53M
|
---|
<|endoftext|>"""
Upload handlers to test the upload API.
"""
from django.core.files.uploadhandler import FileUploadHandler, StopUpload
class QuotaUploadHandler(FileUploadHandler):
"""
This test upload handler terminates the connection if more than a quota
(5MB) is uploaded.
"""
QUOTA = 5 * 2**20 # 5 MB
def __init__(self, request=None):
super(QuotaUploadHandler, self).__init__(request)
self.total_upload = 0
def receive_data_chunk(self, raw_data, start):
self.total_upload += len(raw_data)
if self.total_upload >= self.QUOTA:
raise StopUpload(connection_reset=True)
return raw_data
def file_complete(self, file_size):
return None
class CustomUploadError(Exception):
pass
class ErroringUploadHandler(FileUploadHandler):
"""A handler that raises an exception."""
def receive_data_chunk(self, raw_data, start):
raise CustomUploadError("Oops!")
<|endoftext|> |
<|endoftext|>from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from .models import Author, Artist
class ListViewTests(TestCase):
fixtures = ["generic-views-test-data.json"]
urls = "regressiontests.generic_views.urls"
def test_items(self):
res = self.client.get("/list/dict/")
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, "generic_views/list.html")
self.assertEqual(res.context["object_list"][0]["first"], "John")
def test_queryset(self):
res = self.client.get("/list/authors/")
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, "generic_views/author_list.html")
self.assertEqual(list(res.context["object_list"]), list(Author.objects.all()))
self.assertIs(res.context["author_list"], res.context["object_list"])
self.assertIsNone(res.context["paginator"])
self.assertIsNone(res.context["page_obj"])
self.assertFalse(res.context["is_paginated"])
def test_paginated_queryset(self):
self._make_authors(100)
res = self.client.get("/list/authors/paginated/")
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, "generic_views/author_list.html")
self.assertEqual(len(res.context["object_list"]), 30)
self.assertIs(res.context["author_list"], res.context["object_list"])
self.assertTrue(res.context["is_paginated"])
self.assertEqual(res.context["page_obj"].number, 1)
self.assertEqual(res.context["paginator"].num_pages, 4)
self.assertEqual(res.context["author_list"][0].name, "Author 00")
self.assertEqual(list(res.context["author_list"])[-1].name, "Author 29")
def test_paginated_queryset_shortdata(self):
# Test that short datasets ALSO result in a paginated view.
res = self.client.get("/list/authors/paginated/")
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, "generic_views/author_list.html")
self.assertEqual(list(res.context["object_list"]), list(Author.objects.all()))
self.assertIs(res.context["author_list"], res.context["object_list"])
self.assertEqual(res.context["page_obj"].number, 1)
self.assertEqual(res.context["paginator"].num_pages, 1)
self.assertFalse(res.context["is_paginated"])
def test_paginated_get_page_by_query_string(self):
self._make_authors(100)
res = self.client.get("/list/authors/paginated/", {"page": "2"})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, "generic_views/author_list.html")
self.assertEqual(len(res.context["object_list"]), 30)
self.assertIs(res.context["author_list"], res.context["object_list"])
self.assertEqual(res.context["author_list"][0].name, "Author 30")
self.assertEqual(res.context["page_obj"].number, 2)
def test_paginated_get_last_page_by_query_string(self):
self._make_authors(100)
res = self.client.get("/list/authors/paginated/", {"page": "last"})
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context["object_list"]), 10)
self.assertIs(res.context["author_list"], res.context["object_list"])
self.assertEqual(res.context["author_list"][0].name, "Author 90")
self.assertEqual(res.context["page_obj"].number, 4)
def test_paginated_get_page_by_urlvar(self):
self._make_authors(100)
res = self.client.get("/list/authors/paginated/3/")
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, "generic_views/author_list.html")
self.assertEqual(len(res.context["object_list"]), 30)
self.assertIs(res.context["author_list"], res.context["object_list"])
self.assertEqual(res.context["author_list"][0].name, "Author 60")
self.assertEqual(res.context["page_obj"].number, 3)
def test_paginated_page_out_of_range(self):
self._make_authors(100)
res = self.client.get("/list/authors/paginated/42/")
self.assertEqual(res.status_code, 404)
def test_paginated_invalid_page(self):
self._make_authors(100)
res = self.client.get("/list/authors/paginated/?page=frog")
self.assertEqual(res.status_code, 404)
def test_paginated_custom_paginator_class(self):
self._make_authors(7)
res = self.client.get("/list/authors/paginated/custom_class/")
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context["paginator"].num_pages, 1)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context["object_list"]), 7)
def test_paginated_custom_paginator_constructor(self):
self._make_authors(7)
res = self.client.get("/list/authors/paginated/custom_constructor/")
self.assertEqual(res.status_code, 200)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context["object_list"]), 7)
def test_paginated_non_queryset(self):
res = self.client.get("/list/dict/paginated/")
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context["object_list"]), 1)
def test_verbose_name(self):
res = self.client.get("/list/artists/")
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, "generic_views/list.html")
self.assertEqual(list(res.context["object_list"]), list(Artist.objects.all()))
self.assertIs(res.context["artist_list"], res.context["object_list"])
self.assertIsNone(res.context["paginator"])
self.assertIsNone(res.context["page_obj"])
self.assertFalse(res.context["is_paginated"])
def test_allow_empty_false(self):
res = self.client.get("/list/authors/notempty/")
self.assertEqual(res.status_code, 200)
Author.objects.all().delete()
res = self.client.get("/list/authors/notempty/")
self.assertEqual(res.status_code, 404)
def test_template_name(self):
res = self.client.get("/list/authors/template_name/")
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context["object_list"]), list(Author.objects.all()))
self.assertIs(res.context["author_list"], res.context["object_list"])
self.assertTemplateUsed(res, "generic_views/list.html")
def test_template_name_suffix(self):
res = self.client.get("/list/authors/template_name_suffix/")
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context["object_list"]), list(Author.objects.all()))
self.assertIs(res.context["author_list"], res.context["object_list"])
self.assertTemplateUsed(res, "generic_views/author_objects.html")
def test_context_object_name(self):
res = self.client.get("/list/authors/context_object_name/")
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context["object_list"]), list(Author.objects.all()))
self.assertNotIn("authors", res.context)
self.assertIs(res.context["author_list"], res.context["object_list"])
self.assertTemplateUsed(res, "generic_views/author_list.html")
def test_duplicate_context_object_name(self):
res = self.client.get("/list/authors/dupe_context_object_name/")
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context["object_list"]), list(Author.objects.all()))
self.assertNotIn("authors", res.context)
self.assertNotIn("author_list", res.context)
self.assertTemplateUsed(res, "generic_views/author_list.html")
def test_missing_items(self):
self.assertRaises(
ImproperlyConfigured, self.client.get, "/list/authors/invalid/"
)
def _make_authors(self, n):
Author.objects.all().delete()
for i in range(n):
Author.objects.create(name="Author %02i" % i, slug="a%s" % i)
<|endoftext|> |
<|endoftext|>from django.db import models
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField()
facebook_user_id = models.BigIntegerField(null=True)
def __unicode__(self):
return "%s %s" % (self.first_name, self.last_name)
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
reporter = models.ForeignKey(Reporter)
def __unicode__(self):
return self.headline
class Meta:
ordering = ("headline",)
<|endoftext|> |
<|endoftext|>try:
from io import StringIO
except ImportError:
from io import StringIO
from django.core import management
from django.contrib.auth.models import User
from django.test import TestCase
from .models import Person, Group, Membership, UserMembership, Car, Driver, CarDriver
class M2MThroughTestCase(TestCase):
def test_everything(self):
bob = Person.objects.create(name="Bob")
jim = Person.objects.create(name="Jim")
rock = Group.objects.create(name="Rock")
roll = Group.objects.create(name="Roll")
frank = User.objects.create_user("frank", "[email protected]", "password")
jane = User.objects.create_user("jane", "[email protected]", "password")
Membership.objects.create(person=bob, group=rock)
Membership.objects.create(person=bob, group=roll)
Membership.objects.create(person=jim, group=rock)
self.assertQuerysetEqual(
bob.group_set.all(),
[
"<Group: Rock>",
"<Group: Roll>",
],
)
self.assertQuerysetEqual(
roll.members.all(),
[
"<Person: Bob>",
],
)
self.assertRaises(AttributeError, setattr, bob, "group_set", [])
self.assertRaises(AttributeError, setattr, roll, "members", [])
self.assertRaises(AttributeError, rock.members.create, name="Anne")
self.assertRaises(AttributeError, bob.group_set.create, name="Funk")
UserMembership.objects.create(user=frank, group=rock)
UserMembership.objects.create(user=frank, group=roll)
UserMembership.objects.create(user=jane, group=rock)
self.assertQuerysetEqual(
frank.group_set.all(),
[
"<Group: Rock>",
"<Group: Roll>",
],
)
self.assertQuerysetEqual(
roll.user_members.all(),
[
"<User: frank>",
],
)
def test_serialization(self):
"m2m-through models aren't serialized as m2m fields. Refs #8134"
p = Person.objects.create(name="Bob")
g = Group.objects.create(name="Roll")
m = Membership.objects.create(person=p, group=g)
pks = {"p_pk": p.pk, "g_pk": g.pk, "m_pk": m.pk}
out = StringIO()
management.call_command(
"dumpdata", "m2m_through_regress", format="json", stdout=out
)
self.assertEqual(
out.getvalue().strip(),
"""[{"pk": %(m_pk)s, "model": "m2m_through_regress.membership", "fields": {"person": %(p_pk)s, "price": 100, "group": %(g_pk)s}}, {"pk": %(p_pk)s, "model": "m2m_through_regress.person", "fields": {"name": "Bob"}}, {"pk": %(g_pk)s, "model": "m2m_through_regress.group", "fields": {"name": "Roll"}}]"""
% pks,
)
out = StringIO()
management.call_command(
"dumpdata", "m2m_through_regress", format="xml", indent=2, stdout=out
)
self.assertEqual(
out.getvalue().strip(),
"""
<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="%(m_pk)s" model="m2m_through_regress.membership">
<field to="m2m_through_regress.person" name="person" rel="ManyToOneRel">%(p_pk)s</field>
<field to="m2m_through_regress.group" name="group" rel="ManyToOneRel">%(g_pk)s</field>
<field type="IntegerField" name="price">100</field>
</object>
<object pk="%(p_pk)s" model="m2m_through_regress.person">
<field type="CharField" name="name">Bob</field>
</object>
<object pk="%(g_pk)s" model="m2m_through_regress.group">
<field type="CharField" name="name">Roll</field>
</object>
</django-objects>
""".strip()
% pks,
)
def test_join_trimming(self):
"Check that we don't involve too many copies of the intermediate table when doing a join. Refs #8046, #8254"
bob = Person.objects.create(name="Bob")
jim = Person.objects.create(name="Jim")
rock = Group.objects.create(name="Rock")
roll = Group.objects.create(name="Roll")
Membership.objects.create(person=bob, group=rock)
Membership.objects.create(person=jim, group=rock, price=50)
Membership.objects.create(person=bob, group=roll, price=50)
self.assertQuerysetEqual(
rock.members.filter(membership__price=50),
[
"<Person: Jim>",
],
)
self.assertQuerysetEqual(
bob.group_set.filter(membership__price=50),
[
"<Group: Roll>",
],
)
class ToFieldThroughTests(TestCase):
def setUp(self):
self.car = Car.objects.create(make="Toyota")
self.driver = Driver.objects.create(name="Ryan Briscoe")
CarDriver.objects.create(car=self.car, driver=self.driver)
# We are testing if wrong objects get deleted due to using wrong
# field value in m2m queries. So, it is essential that the pk
# numberings do not match.
# Create one intentionally unused driver to mix up the autonumbering
self.unused_driver = Driver.objects.create(name="Barney Gumble")
# And two intentionally unused cars.
self.unused_car1 = Car.objects.create(make="Trabant")
self.unused_car2 = Car.objects.create(make="Wartburg")
def test_to_field(self):
self.assertQuerysetEqual(self.car.drivers.all(), ["<Driver: Ryan Briscoe>"])
def test_to_field_reverse(self):
self.assertQuerysetEqual(self.driver.car_set.all(), ["<Car: Toyota>"])
def test_to_field_clear_reverse(self):
self.driver.car_set.clear()
self.assertQuerysetEqual(self.driver.car_set.all(), [])
def test_to_field_clear(self):
self.car.drivers.clear()
self.assertQuerysetEqual(self.car.drivers.all(), [])
# Low level tests for _add_items and _remove_items. We test these methods
# because .add/.remove aren't available for m2m fields with through, but
# through is the only way to set to_field currently. We do want to make
# sure these methods are ready if the ability to use .add or .remove with
# to_field relations is added some day.
def test_add(self):
self.assertQuerysetEqual(self.car.drivers.all(), ["<Driver: Ryan Briscoe>"])
# Yikes - barney is going to drive...
self.car.drivers._add_items("car", "driver", self.unused_driver)
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Barney Gumble>", "<Driver: Ryan Briscoe>"],
)
def test_add_null(self):
nullcar = Car.objects.create(make=None)
with self.assertRaises(ValueError):
nullcar.drivers._add_items("car", "driver", self.unused_driver)
def test_add_related_null(self):
nulldriver = Driver.objects.create(name=None)
with self.assertRaises(ValueError):
self.car.drivers._add_items("car", "driver", nulldriver)
def test_add_reverse(self):
car2 = Car.objects.create(make="Honda")
self.assertQuerysetEqual(self.driver.car_set.all(), ["<Car: Toyota>"])
self.driver.car_set._add_items("driver", "car", car2)
self.assertQuerysetEqual(
self.driver.car_set.all(), ["<Car: Toyota>", "<Car: Honda>"]
)
def test_add_null_reverse(self):
nullcar = Car.objects.create(make=None)
with self.assertRaises(ValueError):
self.driver.car_set._add_items("driver", "car", nullcar)
def test_add_null_reverse_related(self):
nulldriver = Driver.objects.create(name=None)
with self.assertRaises(ValueError):
nulldriver.car_set._add_items("driver", "car", self.car)
def test_remove(self):
self.assertQuerysetEqual(self.car.drivers.all(), ["<Driver: Ryan Briscoe>"])
self.car.drivers._remove_items("car", "driver", self.driver)
self.assertQuerysetEqual(self.car.drivers.all(), [])
def test_remove_reverse(self):
self.assertQuerysetEqual(self.driver.car_set.all(), ["<Car: Toyota>"])
self.driver.car_set._remove_items("driver", "car", self.car)
self.assertQuerysetEqual(self.driver.car_set.all(), [])
class ThroughLoadDataTestCase(TestCase):
fixtures = ["m2m_through"]
def test_sequence_creation(self):
"Check that sequences on an m2m_through are created for the through model, not a phantom auto-generated m2m table. Refs #11107"
out = StringIO()
management.call_command(
"dumpdata", "m2m_through_regress", format="json", stdout=out
)
self.assertEqual(
out.getvalue().strip(),
"""[{"pk": 1, "model": "m2m_through_regress.usermembership", "fields": {"price": 100, "group": 1, "user": 1}}, {"pk": 1, "model": "m2m_through_regress.person", "fields": {"name": "Guido"}}, {"pk": 1, "model": "m2m_through_regress.group", "fields": {"name": "Python Core Group"}}]""",
)
<|endoftext|> |
<|endoftext|>from datetime import date
from django import forms
from django.conf import settings
from django.contrib.admin.options import (
ModelAdmin,
TabularInline,
InlineModelAdmin,
HORIZONTAL,
VERTICAL,
)
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.validation import validate
from django.contrib.admin.widgets import AdminDateWidget, AdminRadioSelect
from django.contrib.admin import SimpleListFilter, BooleanFieldListFilter
from django.core.exceptions import ImproperlyConfigured
from django.forms.models import BaseModelFormSet
from django.forms.widgets import Select
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import unittest
from .models import Band, Concert, ValidationTestModel, ValidationTestInlineModel
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class ModelAdminTests(TestCase):
def setUp(self):
self.band = Band.objects.create(
name="The Doors",
bio="",
sign_date=date(1965, 1, 1),
)
self.site = AdminSite()
# form/fields/fieldsets interaction ##############################
def test_default_fields(self):
ma = ModelAdmin(Band, self.site)
self.assertEqual(
list(ma.get_form(request).base_fields.keys()), ["name", "bio", "sign_date"]
)
def test_default_fieldsets(self):
# fieldsets_add and fieldsets_change should return a special data structure that
# is used in the templates. They should generate the "right thing" whether we
# have specified a custom form, the fields argument, or nothing at all.
#
# Here's the default case. There are no custom form_add/form_change methods,
# no fields argument, and no fieldsets argument.
ma = ModelAdmin(Band, self.site)
self.assertEqual(
ma.get_fieldsets(request),
[(None, {"fields": ["name", "bio", "sign_date"]})],
)
self.assertEqual(
ma.get_fieldsets(request, self.band),
[(None, {"fields": ["name", "bio", "sign_date"]})],
)
def test_field_arguments(self):
# If we specify the fields argument, fieldsets_add and fielsets_change should
# just stick the fields into a formsets structure and return it.
class BandAdmin(ModelAdmin):
fields = ["name"]
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_fieldsets(request), [(None, {"fields": ["name"]})])
self.assertEqual(
ma.get_fieldsets(request, self.band), [(None, {"fields": ["name"]})]
)
def test_field_arguments_restricted_on_form(self):
# If we specify fields or fieldsets, it should exclude fields on the Form class
# to the fields specified. This may cause errors to be raised in the db layer if
# required model fields arent in fields/fieldsets, but that's preferable to
# ghost errors where you have a field in your Form class that isn't being
# displayed because you forgot to add it to fields/fieldsets
# Using `fields`.
class BandAdmin(ModelAdmin):
fields = ["name"]
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields.keys()), ["name"])
self.assertEqual(
list(ma.get_form(request, self.band).base_fields.keys()), ["name"]
)
# Using `fieldsets`.
class BandAdmin(ModelAdmin):
fieldsets = [(None, {"fields": ["name"]})]
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields.keys()), ["name"])
self.assertEqual(
list(ma.get_form(request, self.band).base_fields.keys()), ["name"]
)
# Using `exclude`.
class BandAdmin(ModelAdmin):
exclude = ["bio"]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(ma.get_form(request).base_fields.keys()), ["name", "sign_date"]
)
# You can also pass a tuple to `exclude`.
class BandAdmin(ModelAdmin):
exclude = ("bio",)
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(ma.get_form(request).base_fields.keys()), ["name", "sign_date"]
)
# Using `fields` and `exclude`.
class BandAdmin(ModelAdmin):
fields = ["name", "bio"]
exclude = ["bio"]
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields.keys()), ["name"])
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `ModelAdmin.readonly_fields` and when no
`ModelAdmin.exclude` is defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ["bio"]
class BandAdmin(ModelAdmin):
readonly_fields = ["name"]
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(ma.get_form(request).base_fields.keys()),
[
"sign_date",
],
)
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ["day"]
class ConcertInline(TabularInline):
readonly_fields = ["transport"]
form = AdminConcertForm
fk_name = "main_band"
model = Concert
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets(request))[0]().forms[0].fields.keys()),
[
"main_band",
"opening_band",
"id",
"DELETE",
],
)
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is overridden if
`ModelAdmin.exclude` or `InlineModelAdmin.exclude` are defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ["bio"]
class BandAdmin(ModelAdmin):
exclude = ["name"]
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(ma.get_form(request).base_fields.keys()),
[
"bio",
"sign_date",
],
)
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ["day"]
class ConcertInline(TabularInline):
exclude = ["transport"]
form = AdminConcertForm
fk_name = "main_band"
model = Concert
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets(request))[0]().forms[0].fields.keys()),
[
"main_band",
"opening_band",
"day",
"id",
"DELETE",
],
)
def test_custom_form_validation(self):
# If we specify a form, it should use it allowing custom validation to work
# properly. This won't, however, break any of the admin widgets or media.
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class Meta:
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(ma.get_form(request).base_fields.keys()),
["name", "bio", "sign_date", "delete"],
)
self.assertEqual(
type(ma.get_form(request).base_fields["sign_date"].widget), AdminDateWidget
)
def test_form_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `ModelAdmin.get_form()`
overrides all other declarations. Refs #8999.
"""
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ["name"]
class BandAdmin(ModelAdmin):
exclude = [
"sign_date",
]
form = AdminBandForm
def get_form(self, request, obj=None, **kwargs):
kwargs["exclude"] = ["bio"]
return super(BandAdmin, self).get_form(request, obj, **kwargs)
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(ma.get_form(request).base_fields.keys()),
[
"name",
"sign_date",
],
)
def test_formset_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `InlineModelAdmin.get_formset()`
overrides all other declarations. Refs #8999.
"""
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ["day"]
class ConcertInline(TabularInline):
exclude = ["transport"]
form = AdminConcertForm
fk_name = "main_band"
model = Concert
def get_formset(self, request, obj=None, **kwargs):
kwargs["exclude"] = ["opening_band"]
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets(request))[0]().forms[0].fields.keys()),
[
"main_band",
"day",
"transport",
"id",
"DELETE",
],
)
def test_queryset_override(self):
# If we need to override the queryset of a ModelChoiceField in our custom form
# make sure that RelatedFieldWidgetWrapper doesn't mess that up.
band2 = Band(name="The Beatles", bio="", sign_date=date(1962, 1, 1))
band2.save()
class ConcertAdmin(ModelAdmin):
pass
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(
str(form["main_band"]),
'<select name="main_band" id="id_main_band">\n'
'<option value="" selected="selected">---------</option>\n'
'<option value="%d">The Beatles</option>\n'
'<option value="%d">The Doors</option>\n'
"</select>" % (band2.id, self.band.id),
)
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
def __init__(self, *args, **kwargs):
super(AdminConcertForm, self).__init__(*args, **kwargs)
self.fields["main_band"].queryset = Band.objects.filter(
name="The Doors"
)
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(
str(form["main_band"]),
'<select name="main_band" id="id_main_band">\n'
'<option value="" selected="selected">---------</option>\n'
'<option value="%d">The Doors</option>\n'
"</select>" % self.band.id,
)
def test_regression_for_ticket_15820(self):
"""
Ensure that `obj` is passed from `InlineModelAdmin.get_fieldsets()` to
`InlineModelAdmin.get_formset()`.
"""
class CustomConcertForm(forms.ModelForm):
class Meta:
model = Concert
fields = ["day"]
class ConcertInline(TabularInline):
model = Concert
fk_name = "main_band"
def get_formset(self, request, obj=None, **kwargs):
if obj:
kwargs["form"] = CustomConcertForm
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
concert = Concert.objects.create(
main_band=self.band, opening_band=self.band, day=1
)
ma = BandAdmin(Band, self.site)
inline_instances = ma.get_inline_instances(request)
fieldsets = list(inline_instances[0].get_fieldsets(request))
self.assertEqual(
fieldsets[0][1]["fields"], ["main_band", "opening_band", "day", "transport"]
)
fieldsets = list(
inline_instances[0].get_fieldsets(request, inline_instances[0].model)
)
self.assertEqual(fieldsets[0][1]["fields"], ["day"])
# radio_fields behavior ###########################################
def test_default_foreign_key_widget(self):
# First, without any radio_fields specified, the widgets for ForeignKey
# and fields with choices specified ought to be a basic Select widget.
# ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so
# they need to be handled properly when type checking. For Select fields, all of
# the choices lists have a first entry of dashes.
cma = ModelAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields["main_band"].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields["main_band"].widget.choices),
[("", "---------"), (self.band.id, "The Doors")],
)
self.assertEqual(type(cmafa.base_fields["opening_band"].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields["opening_band"].widget.choices),
[("", "---------"), (self.band.id, "The Doors")],
)
self.assertEqual(type(cmafa.base_fields["day"].widget), Select)
self.assertEqual(
list(cmafa.base_fields["day"].widget.choices),
[("", "---------"), (1, "Fri"), (2, "Sat")],
)
self.assertEqual(type(cmafa.base_fields["transport"].widget), Select)
self.assertEqual(
list(cmafa.base_fields["transport"].widget.choices),
[("", "---------"), (1, "Plane"), (2, "Train"), (3, "Bus")],
)
def test_foreign_key_as_radio_field(self):
# Now specify all the fields as radio_fields. Widgets should now be
# RadioSelect, and the choices list should have a first entry of 'None' if
# blank=True for the model field. Finally, the widget should have the
# 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL.
class ConcertAdmin(ModelAdmin):
radio_fields = {
"main_band": HORIZONTAL,
"opening_band": VERTICAL,
"day": VERTICAL,
"transport": HORIZONTAL,
}
cma = ConcertAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(
type(cmafa.base_fields["main_band"].widget.widget), AdminRadioSelect
)
self.assertEqual(
cmafa.base_fields["main_band"].widget.attrs, {"class": "radiolist inline"}
)
self.assertEqual(
list(cmafa.base_fields["main_band"].widget.choices),
[(self.band.id, "The Doors")],
)
self.assertEqual(
type(cmafa.base_fields["opening_band"].widget.widget), AdminRadioSelect
)
self.assertEqual(
cmafa.base_fields["opening_band"].widget.attrs, {"class": "radiolist"}
)
self.assertEqual(
list(cmafa.base_fields["opening_band"].widget.choices),
[("", "None"), (self.band.id, "The Doors")],
)
self.assertEqual(type(cmafa.base_fields["day"].widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields["day"].widget.attrs, {"class": "radiolist"})
self.assertEqual(
list(cmafa.base_fields["day"].widget.choices), [(1, "Fri"), (2, "Sat")]
)
self.assertEqual(type(cmafa.base_fields["transport"].widget), AdminRadioSelect)
self.assertEqual(
cmafa.base_fields["transport"].widget.attrs, {"class": "radiolist inline"}
)
self.assertEqual(
list(cmafa.base_fields["transport"].widget.choices),
[("", "None"), (1, "Plane"), (2, "Train"), (3, "Bus")],
)
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ("transport",)
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(
list(ma.get_form(request).base_fields.keys()),
["main_band", "opening_band", "day"],
)
class AdminConcertForm(forms.ModelForm):
extra = forms.CharField()
class Meta:
model = Concert
fields = ["extra", "transport"]
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(
list(ma.get_form(request).base_fields.keys()), ["extra", "transport"]
)
class ConcertInline(TabularInline):
form = AdminConcertForm
model = Concert
fk_name = "main_band"
can_delete = True
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets(request))[0]().forms[0].fields.keys()),
["extra", "transport", "id", "DELETE", "main_band"],
)
class ValidationTests(unittest.TestCase):
def test_validation_only_runs_in_debug(self):
# Ensure validation only runs when DEBUG = True
try:
settings.DEBUG = True
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
site = AdminSite()
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields' must be a list or tuple.",
site.register,
ValidationTestModel,
ValidationTestModelAdmin,
)
finally:
settings.DEBUG = False
site = AdminSite()
site.register(ValidationTestModel, ValidationTestModelAdmin)
def test_raw_id_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ("non_existent_field",)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ("name",)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields\[0\]', 'name' must be either a ForeignKey or ManyToManyField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ("users",)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_fieldsets_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = 10
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ({},)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ((),)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]' does not have exactly two elements.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", ()),)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]\[1\]' must be a dictionary.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {}),)
self.assertRaisesRegex(
ImproperlyConfigured,
"'fields' key is required in ValidationTestModelAdmin.fieldsets\[0\]\[1\] field options dict.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {"fields": ("non_existent_field",)}),)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]\[1\]\['fields'\]' refers to field 'non_existent_field' that is missing from the form.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {"fields": ("name",)}),)
validate(ValidationTestModelAdmin, ValidationTestModel)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {"fields": ("name",)}),)
fields = [
"name",
]
self.assertRaisesRegex(
ImproperlyConfigured,
"Both fieldsets and fields are specified in ValidationTestModelAdmin.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = [(None, {"fields": ["name", "name"]})]
self.assertRaisesRegex(
ImproperlyConfigured,
"There are duplicate field\(s\) in ValidationTestModelAdmin.fieldsets",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fields = ["name", "name"]
self.assertRaisesRegex(
ImproperlyConfigured,
"There are duplicate field\(s\) in ValidationTestModelAdmin.fields",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_form_validation(self):
class FakeForm(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
form = FakeForm
self.assertRaisesRegex(
ImproperlyConfigured,
"ValidationTestModelAdmin.form does not inherit from BaseModelForm.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (("Band", {"fields": ("non_existent_field",)}),)
self.assertRaisesRegex(
ImproperlyConfigured,
"'BandAdmin.fieldsets\[0\]\[1\]\['fields'\]' refers to field 'non_existent_field' that is missing from the form.",
validate,
BandAdmin,
Band,
)
class BandAdmin(ModelAdmin):
fieldsets = (("Band", {"fields": ("name",)}),)
validate(BandAdmin, Band)
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (("Band", {"fields": ("non_existent_field",)}),)
self.assertRaisesRegex(
ImproperlyConfigured,
"'BandAdmin.fieldsets\[0]\[1\]\['fields'\]' refers to field 'non_existent_field' that is missing from the form.",
validate,
BandAdmin,
Band,
)
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class Meta:
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (("Band", {"fields": ("name", "bio", "sign_date", "delete")}),)
validate(BandAdmin, Band)
def test_filter_vertical_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = 10
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_vertical' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("non_existent_field",)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_vertical' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("name",)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_vertical\[0\]' must be a ManyToManyField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("users",)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_filter_horizontal_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = 10
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_horizontal' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("non_existent_field",)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_horizontal' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("name",)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_horizontal\[0\]' must be a ManyToManyField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("users",)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_radio_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = ()
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields' must be a dictionary.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"non_existent_field": None}
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"name": None}
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields\['name'\]' is neither an instance of ForeignKey nor does have choices set.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": None}
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields\['state'\]' is neither admin.HORIZONTAL nor admin.VERTICAL.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": VERTICAL}
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_prepopulated_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = ()
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields' must be a dictionary.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"non_existent_field": None}
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ("non_existent_field",)}
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields\['slug'\]\[0\]' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"users": ("name",)}
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields\['users'\]' is either a DateTimeField, ForeignKey or ManyToManyField. This isn't allowed.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_display_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = 10
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display = ("non_existent_field",)
self.assertRaisesRegex(
ImproperlyConfigured,
"ValidationTestModelAdmin.list_display\[0\], 'non_existent_field' is not a callable or an attribute of 'ValidationTestModelAdmin' or found in the model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display = ("users",)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display\[0\]', 'users' is a ManyToManyField which is not supported.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ("name", "decade_published_in", "a_method", a_callable)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_display_links_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = 10
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display_links' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ("non_existent_field",)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display_links\[0\]' refers to 'non_existent_field' which is not defined in 'list_display'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ("name",)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display_links\[0\]' refers to 'name' which is not defined in 'list_display'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ("name", "decade_published_in", "a_method", a_callable)
list_display_links = ("name", "decade_published_in", "a_method", a_callable)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_filter_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = 10
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ("non_existent_field",)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]' refers to 'non_existent_field' which does not refer to a Field.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class RandomClass(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (RandomClass,)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]' is 'RandomClass' which is not a descendant of ListFilter.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (("is_active", RandomClass),)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]\[1\]' is 'RandomClass' which is not of type FieldListFilter.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return "awesomeness"
def get_choices(self, request):
return (
("bit", "A bit awesome"),
("very", "Very awesome"),
)
def get_query_set(self, cl, qs):
return qs
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (("is_active", AwesomeFilter),)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]\[1\]' is 'AwesomeFilter' which is not of type FieldListFilter.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (BooleanFieldListFilter,)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]' is 'BooleanFieldListFilter' which is of type FieldListFilter but is not associated with a field name.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
# Valid declarations below -----------
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (
"is_active",
AwesomeFilter,
("is_active", BooleanFieldListFilter),
"no",
)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_per_page_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = "hello"
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_per_page' should be a integer.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 100
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_max_show_all_allowed_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = "hello"
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_max_show_all' should be an integer.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 200
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_search_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
search_fields = 10
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.search_fields' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_date_hierarchy_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = "non_existent_field"
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.date_hierarchy' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = "name"
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.date_hierarchy is neither an instance of DateField nor DateTimeField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = "pub_date"
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_ordering_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = 10
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.ordering' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ("non_existent_field",)
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.ordering\[0\]' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ("?", "name")
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.ordering' has the random ordering marker '\?', but contains other fields as well. Please either remove '\?' or the other fields.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ("?",)
validate(ValidationTestModelAdmin, ValidationTestModel)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ("band__name",)
validate(ValidationTestModelAdmin, ValidationTestModel)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ("name",)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_select_related_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = 1
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_select_related' should be a boolean.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = False
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_save_as_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = 1
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.save_as' should be a boolean.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
save_as = True
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_save_on_top_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = 1
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.save_on_top' should be a boolean.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = True
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_inlines_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
inlines = 10
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.inlines' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.inlines\[0\]' does not inherit from BaseModelAdmin.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegex(
ImproperlyConfigured,
"'model' is a required attribute of 'ValidationTestModelAdmin.inlines\[0\]'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class SomethingBad(object):
pass
class ValidationTestInline(TabularInline):
model = SomethingBad
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestModelAdmin.inlines\[0\].model' does not inherit from models.Model.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_fields_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = 10
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestInline.fields' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = ("non_existent_field",)
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestInline.fields' refers to field 'non_existent_field' that is missing from the form.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_fk_name_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "non_existent_field"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestInline.fk_name' refers to field 'non_existent_field' that is missing from model 'modeladmin.ValidationTestInlineModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "parent"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_extra_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestInline.extra' should be a integer.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_max_num_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestInline.max_num' should be an integer or None \(default\).",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_formset_validation(self):
class FakeFormSet(object):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = FakeFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegex(
ImproperlyConfigured,
"'ValidationTestInline.formset' does not inherit from BaseModelFormSet.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class RealModelFormSet(BaseModelFormSet):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = RealModelFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
<|endoftext|> |
<|endoftext|>import os
from django.conf import settings, global_settings
from django.http import HttpRequest
from django.test import TransactionTestCase, TestCase, signals
from django.test.utils import override_settings
# @override_settings(TEST='override')
class FullyDecoratedTranTestCase(TransactionTestCase):
def test_override(self):
self.assertEqual(settings.TEST, "override")
@override_settings(TEST="override2")
def test_method_override(self):
self.assertEqual(settings.TEST, "override2")
def test_decorated_testcase_name(self):
self.assertEqual(
FullyDecoratedTranTestCase.__name__, "FullyDecoratedTranTestCase"
)
def test_decorated_testcase_module(self):
self.assertEqual(FullyDecoratedTranTestCase.__module__, __name__)
FullyDecoratedTranTestCase = override_settings(TEST="override")(
FullyDecoratedTranTestCase
)
# @override_settings(TEST='override')
class FullyDecoratedTestCase(TestCase):
def test_override(self):
self.assertEqual(settings.TEST, "override")
@override_settings(TEST="override2")
def test_method_override(self):
self.assertEqual(settings.TEST, "override2")
FullyDecoratedTestCase = override_settings(TEST="override")(FullyDecoratedTestCase)
class ClassDecoratedTestCaseSuper(TestCase):
"""
Dummy class for testing max recursion error in child class call to
super(). Refs #17011.
"""
def test_max_recursion_error(self):
pass
class ClassDecoratedTestCase(ClassDecoratedTestCaseSuper):
def test_override(self):
self.assertEqual(settings.TEST, "override")
@override_settings(TEST="override2")
def test_method_override(self):
self.assertEqual(settings.TEST, "override2")
def test_max_recursion_error(self):
"""
Overriding a method on a super class and then calling that method on
the super class should not trigger infinite recursion. See #17011.
"""
try:
super(ClassDecoratedTestCase, self).test_max_recursion_error()
except RuntimeError as e:
self.fail()
ClassDecoratedTestCase = override_settings(TEST="override")(ClassDecoratedTestCase)
class SettingGetter(object):
def __init__(self):
self.test = getattr(settings, "TEST", "undefined")
testvalue = None
def signal_callback(sender, setting, value, **kwargs):
if setting == "TEST":
global testvalue
testvalue = value
signals.setting_changed.connect(signal_callback)
class SettingsTests(TestCase):
def test_override(self):
settings.TEST = "test"
self.assertEqual("test", settings.TEST)
with self.settings(TEST="override"):
self.assertEqual("override", settings.TEST)
self.assertEqual("test", settings.TEST)
del settings.TEST
def test_override_change(self):
settings.TEST = "test"
self.assertEqual("test", settings.TEST)
with self.settings(TEST="override"):
self.assertEqual("override", settings.TEST)
settings.TEST = "test2"
self.assertEqual("test", settings.TEST)
del settings.TEST
def test_override_doesnt_leak(self):
self.assertRaises(AttributeError, getattr, settings, "TEST")
with self.settings(TEST="override"):
self.assertEqual("override", settings.TEST)
settings.TEST = "test"
self.assertRaises(AttributeError, getattr, settings, "TEST")
@override_settings(TEST="override")
def test_decorator(self):
self.assertEqual("override", settings.TEST)
def test_context_manager(self):
self.assertRaises(AttributeError, getattr, settings, "TEST")
override = override_settings(TEST="override")
self.assertRaises(AttributeError, getattr, settings, "TEST")
override.enable()
self.assertEqual("override", settings.TEST)
override.disable()
self.assertRaises(AttributeError, getattr, settings, "TEST")
def test_class_decorator(self):
self.assertEqual(SettingGetter().test, "undefined")
DecoratedSettingGetter = override_settings(TEST="override")(SettingGetter)
self.assertEqual(DecoratedSettingGetter().test, "override")
self.assertRaises(AttributeError, getattr, settings, "TEST")
def test_signal_callback_context_manager(self):
self.assertRaises(AttributeError, getattr, settings, "TEST")
with self.settings(TEST="override"):
self.assertEqual(testvalue, "override")
self.assertEqual(testvalue, None)
@override_settings(TEST="override")
def test_signal_callback_decorator(self):
self.assertEqual(testvalue, "override")
#
# Regression tests for #10130: deleting settings.
#
def test_settings_delete(self):
settings.TEST = "test"
self.assertEqual("test", settings.TEST)
del settings.TEST
self.assertRaises(AttributeError, getattr, settings, "TEST")
def test_settings_delete_wrapped(self):
self.assertRaises(TypeError, delattr, settings, "_wrapped")
def test_allowed_include_roots_string(self):
"""
ALLOWED_INCLUDE_ROOTS is not allowed to be incorrectly set to a string
rather than a tuple.
"""
self.assertRaises(
ValueError, setattr, settings, "ALLOWED_INCLUDE_ROOTS", "/var/www/ssi/"
)
class TrailingSlashURLTests(TestCase):
settings_module = settings
def setUp(self):
self._original_media_url = self.settings_module.MEDIA_URL
def tearDown(self):
self.settings_module.MEDIA_URL = self._original_media_url
def test_blank(self):
"""
If blank, no DeprecationWarning error will be raised, even though it
doesn't end in a slash.
"""
self.settings_module.MEDIA_URL = ""
self.assertEqual("", self.settings_module.MEDIA_URL)
def test_end_slash(self):
"""
MEDIA_URL works if you end in a slash.
"""
self.settings_module.MEDIA_URL = "/foo/"
self.assertEqual("/foo/", self.settings_module.MEDIA_URL)
self.settings_module.MEDIA_URL = "http://media.foo.com/"
self.assertEqual("http://media.foo.com/", self.settings_module.MEDIA_URL)
def test_no_end_slash(self):
"""
MEDIA_URL raises an DeprecationWarning error if it doesn't end in a
slash.
"""
import warnings
warnings.filterwarnings(
"error", "If set, MEDIA_URL must end with a slash", DeprecationWarning
)
def setattr_settings(settings_module, attr, value):
setattr(settings_module, attr, value)
self.assertRaises(
DeprecationWarning,
setattr_settings,
self.settings_module,
"MEDIA_URL",
"/foo",
)
self.assertRaises(
DeprecationWarning,
setattr_settings,
self.settings_module,
"MEDIA_URL",
"http://media.foo.com",
)
def test_double_slash(self):
"""
If a MEDIA_URL ends in more than one slash, presume they know what
they're doing.
"""
self.settings_module.MEDIA_URL = "/stupid//"
self.assertEqual("/stupid//", self.settings_module.MEDIA_URL)
self.settings_module.MEDIA_URL = "http://media.foo.com/stupid//"
self.assertEqual(
"http://media.foo.com/stupid//", self.settings_module.MEDIA_URL
)
class SecureProxySslHeaderTest(TestCase):
settings_module = settings
def setUp(self):
self._original_setting = self.settings_module.SECURE_PROXY_SSL_HEADER
def tearDown(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = self._original_setting
def test_none(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = None
req = HttpRequest()
self.assertEqual(req.is_secure(), False)
def test_set_without_xheader(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = (
"HTTP_X_FORWARDED_PROTOCOL",
"https",
)
req = HttpRequest()
self.assertEqual(req.is_secure(), False)
def test_set_with_xheader_wrong(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = (
"HTTP_X_FORWARDED_PROTOCOL",
"https",
)
req = HttpRequest()
req.META["HTTP_X_FORWARDED_PROTOCOL"] = "wrongvalue"
self.assertEqual(req.is_secure(), False)
def test_set_with_xheader_right(self):
self.settings_module.SECURE_PROXY_SSL_HEADER = (
"HTTP_X_FORWARDED_PROTOCOL",
"https",
)
req = HttpRequest()
req.META["HTTP_X_FORWARDED_PROTOCOL"] = "https"
self.assertEqual(req.is_secure(), True)
class EnvironmentVariableTest(TestCase):
"""
Ensures proper settings file is used in setup_environ if
DJANGO_SETTINGS_MODULE is set in the environment.
"""
def setUp(self):
self.original_value = os.environ.get("DJANGO_SETTINGS_MODULE")
def tearDown(self):
if self.original_value:
os.environ["DJANGO_SETTINGS_MODULE"] = self.original_value
elif "DJANGO_SETTINGS_MODULE" in os.environ:
del os.environ["DJANGO_SETTINGS_MODULE"]
def test_env_var_used(self):
"""
If the environment variable is set, do not ignore it. However, the
kwarg original_settings_path takes precedence.
This tests both plus the default (neither set).
"""
from django.core.management import setup_environ
# whatever was already there
original_module = os.environ.get("DJANGO_SETTINGS_MODULE", "the default")
# environment variable set by user
user_override = "custom.settings"
# optional argument to setup_environ
orig_path = "original.path"
# expect default
setup_environ(global_settings)
self.assertEqual(os.environ.get("DJANGO_SETTINGS_MODULE"), original_module)
# override with environment variable
os.environ["DJANGO_SETTINGS_MODULE"] = user_override
setup_environ(global_settings)
self.assertEqual(os.environ.get("DJANGO_SETTINGS_MODULE"), user_override)
# pass in original_settings_path (should take precedence)
os.environ["DJANGO_SETTINGS_MODULE"] = user_override
setup_environ(global_settings, original_settings_path=orig_path)
self.assertEqual(os.environ.get("DJANGO_SETTINGS_MODULE"), orig_path)
<|endoftext|> |
<|endoftext|>import operator
from django import template
from django.template.defaultfilters import stringfilter
from django.template.loader import get_template
register = template.Library()
@register.filter
@stringfilter
def trim(value, num):
return value[:num]
@register.simple_tag
def no_params():
"""Expected no_params __doc__"""
return "no_params - Expected result"
no_params.anything = "Expected no_params __dict__"
@register.simple_tag
def one_param(arg):
"""Expected one_param __doc__"""
return "one_param - Expected result: %s" % arg
one_param.anything = "Expected one_param __dict__"
@register.simple_tag(takes_context=False)
def explicit_no_context(arg):
"""Expected explicit_no_context __doc__"""
return "explicit_no_context - Expected result: %s" % arg
explicit_no_context.anything = "Expected explicit_no_context __dict__"
@register.simple_tag(takes_context=True)
def no_params_with_context(context):
"""Expected no_params_with_context __doc__"""
return (
"no_params_with_context - Expected result (context value: %s)"
% context["value"]
)
no_params_with_context.anything = "Expected no_params_with_context __dict__"
@register.simple_tag(takes_context=True)
def params_and_context(context, arg):
"""Expected params_and_context __doc__"""
return "params_and_context - Expected result (context value: %s): %s" % (
context["value"],
arg,
)
params_and_context.anything = "Expected params_and_context __dict__"
@register.simple_tag
def simple_two_params(one, two):
"""Expected simple_two_params __doc__"""
return "simple_two_params - Expected result: %s, %s" % (one, two)
simple_two_params.anything = "Expected simple_two_params __dict__"
@register.simple_tag
def simple_one_default(one, two="hi"):
"""Expected simple_one_default __doc__"""
return "simple_one_default - Expected result: %s, %s" % (one, two)
simple_one_default.anything = "Expected simple_one_default __dict__"
@register.simple_tag
def simple_unlimited_args(one, two="hi", *args):
"""Expected simple_unlimited_args __doc__"""
return "simple_unlimited_args - Expected result: %s" % (
", ".join([str(arg) for arg in [one, two] + list(args)])
)
simple_unlimited_args.anything = "Expected simple_unlimited_args __dict__"
@register.simple_tag
def simple_only_unlimited_args(*args):
"""Expected simple_only_unlimited_args __doc__"""
return "simple_only_unlimited_args - Expected result: %s" % ", ".join(
[str(arg) for arg in args]
)
simple_only_unlimited_args.anything = "Expected simple_only_unlimited_args __dict__"
@register.simple_tag
def simple_unlimited_args_kwargs(one, two="hi", *args, **kwargs):
"""Expected simple_unlimited_args_kwargs __doc__"""
# Sort the dictionary by key to guarantee the order for testing.
sorted_kwarg = sorted(iter(kwargs.items()), key=operator.itemgetter(0))
return "simple_unlimited_args_kwargs - Expected result: %s / %s" % (
", ".join([str(arg) for arg in [one, two] + list(args)]),
", ".join(["%s=%s" % (k, v) for (k, v) in sorted_kwarg]),
)
simple_unlimited_args_kwargs.anything = "Expected simple_unlimited_args_kwargs __dict__"
@register.simple_tag(takes_context=True)
def simple_tag_without_context_parameter(arg):
"""Expected simple_tag_without_context_parameter __doc__"""
return "Expected result"
simple_tag_without_context_parameter.anything = (
"Expected simple_tag_without_context_parameter __dict__"
)
@register.simple_tag(takes_context=True)
def current_app(context):
return "%s" % context.current_app
@register.simple_tag(takes_context=True)
def use_l10n(context):
return "%s" % context.use_l10n
@register.simple_tag(name="minustwo")
def minustwo_overridden_name(value):
return value - 2
register.simple_tag(lambda x: x - 1, name="minusone")
@register.inclusion_tag("inclusion.html")
def inclusion_no_params():
"""Expected inclusion_no_params __doc__"""
return {"result": "inclusion_no_params - Expected result"}
inclusion_no_params.anything = "Expected inclusion_no_params __dict__"
@register.inclusion_tag(get_template("inclusion.html"))
def inclusion_no_params_from_template():
"""Expected inclusion_no_params_from_template __doc__"""
return {"result": "inclusion_no_params_from_template - Expected result"}
inclusion_no_params_from_template.anything = (
"Expected inclusion_no_params_from_template __dict__"
)
@register.inclusion_tag("inclusion.html")
def inclusion_one_param(arg):
"""Expected inclusion_one_param __doc__"""
return {"result": "inclusion_one_param - Expected result: %s" % arg}
inclusion_one_param.anything = "Expected inclusion_one_param __dict__"
@register.inclusion_tag(get_template("inclusion.html"))
def inclusion_one_param_from_template(arg):
"""Expected inclusion_one_param_from_template __doc__"""
return {"result": "inclusion_one_param_from_template - Expected result: %s" % arg}
inclusion_one_param_from_template.anything = (
"Expected inclusion_one_param_from_template __dict__"
)
@register.inclusion_tag("inclusion.html", takes_context=False)
def inclusion_explicit_no_context(arg):
"""Expected inclusion_explicit_no_context __doc__"""
return {"result": "inclusion_explicit_no_context - Expected result: %s" % arg}
inclusion_explicit_no_context.anything = (
"Expected inclusion_explicit_no_context __dict__"
)
@register.inclusion_tag(get_template("inclusion.html"), takes_context=False)
def inclusion_explicit_no_context_from_template(arg):
"""Expected inclusion_explicit_no_context_from_template __doc__"""
return {
"result": "inclusion_explicit_no_context_from_template - Expected result: %s"
% arg
}
inclusion_explicit_no_context_from_template.anything = (
"Expected inclusion_explicit_no_context_from_template __dict__"
)
@register.inclusion_tag("inclusion.html", takes_context=True)
def inclusion_no_params_with_context(context):
"""Expected inclusion_no_params_with_context __doc__"""
return {
"result": "inclusion_no_params_with_context - Expected result (context value: %s)"
% context["value"]
}
inclusion_no_params_with_context.anything = (
"Expected inclusion_no_params_with_context __dict__"
)
@register.inclusion_tag(get_template("inclusion.html"), takes_context=True)
def inclusion_no_params_with_context_from_template(context):
"""Expected inclusion_no_params_with_context_from_template __doc__"""
return {
"result": "inclusion_no_params_with_context_from_template - Expected result (context value: %s)"
% context["value"]
}
inclusion_no_params_with_context_from_template.anything = (
"Expected inclusion_no_params_with_context_from_template __dict__"
)
@register.inclusion_tag("inclusion.html", takes_context=True)
def inclusion_params_and_context(context, arg):
"""Expected inclusion_params_and_context __doc__"""
return {
"result": "inclusion_params_and_context - Expected result (context value: %s): %s"
% (context["value"], arg)
}
inclusion_params_and_context.anything = "Expected inclusion_params_and_context __dict__"
@register.inclusion_tag(get_template("inclusion.html"), takes_context=True)
def inclusion_params_and_context_from_template(context, arg):
"""Expected inclusion_params_and_context_from_template __doc__"""
return {
"result": "inclusion_params_and_context_from_template - Expected result (context value: %s): %s"
% (context["value"], arg)
}
inclusion_params_and_context_from_template.anything = (
"Expected inclusion_params_and_context_from_template __dict__"
)
@register.inclusion_tag("inclusion.html")
def inclusion_two_params(one, two):
"""Expected inclusion_two_params __doc__"""
return {"result": "inclusion_two_params - Expected result: %s, %s" % (one, two)}
inclusion_two_params.anything = "Expected inclusion_two_params __dict__"
@register.inclusion_tag(get_template("inclusion.html"))
def inclusion_two_params_from_template(one, two):
"""Expected inclusion_two_params_from_template __doc__"""
return {
"result": "inclusion_two_params_from_template - Expected result: %s, %s"
% (one, two)
}
inclusion_two_params_from_template.anything = (
"Expected inclusion_two_params_from_template __dict__"
)
@register.inclusion_tag("inclusion.html")
def inclusion_one_default(one, two="hi"):
"""Expected inclusion_one_default __doc__"""
return {"result": "inclusion_one_default - Expected result: %s, %s" % (one, two)}
inclusion_one_default.anything = "Expected inclusion_one_default __dict__"
@register.inclusion_tag(get_template("inclusion.html"))
def inclusion_one_default_from_template(one, two="hi"):
"""Expected inclusion_one_default_from_template __doc__"""
return {
"result": "inclusion_one_default_from_template - Expected result: %s, %s"
% (one, two)
}
inclusion_one_default_from_template.anything = (
"Expected inclusion_one_default_from_template __dict__"
)
@register.inclusion_tag("inclusion.html")
def inclusion_unlimited_args(one, two="hi", *args):
"""Expected inclusion_unlimited_args __doc__"""
return {
"result": "inclusion_unlimited_args - Expected result: %s"
% (", ".join([str(arg) for arg in [one, two] + list(args)]))
}
inclusion_unlimited_args.anything = "Expected inclusion_unlimited_args __dict__"
@register.inclusion_tag(get_template("inclusion.html"))
def inclusion_unlimited_args_from_template(one, two="hi", *args):
"""Expected inclusion_unlimited_args_from_template __doc__"""
return {
"result": "inclusion_unlimited_args_from_template - Expected result: %s"
% (", ".join([str(arg) for arg in [one, two] + list(args)]))
}
inclusion_unlimited_args_from_template.anything = (
"Expected inclusion_unlimited_args_from_template __dict__"
)
@register.inclusion_tag("inclusion.html")
def inclusion_only_unlimited_args(*args):
"""Expected inclusion_only_unlimited_args __doc__"""
return {
"result": "inclusion_only_unlimited_args - Expected result: %s"
% (", ".join([str(arg) for arg in args]))
}
inclusion_only_unlimited_args.anything = (
"Expected inclusion_only_unlimited_args __dict__"
)
@register.inclusion_tag(get_template("inclusion.html"))
def inclusion_only_unlimited_args_from_template(*args):
"""Expected inclusion_only_unlimited_args_from_template __doc__"""
return {
"result": "inclusion_only_unlimited_args_from_template - Expected result: %s"
% (", ".join([str(arg) for arg in args]))
}
inclusion_only_unlimited_args_from_template.anything = (
"Expected inclusion_only_unlimited_args_from_template __dict__"
)
@register.inclusion_tag("test_incl_tag_current_app.html", takes_context=True)
def inclusion_tag_current_app(context):
"""Expected inclusion_tag_current_app __doc__"""
return {}
inclusion_tag_current_app.anything = "Expected inclusion_tag_current_app __dict__"
@register.inclusion_tag("test_incl_tag_use_l10n.html", takes_context=True)
def inclusion_tag_use_l10n(context):
"""Expected inclusion_tag_use_l10n __doc__"""
return {}
inclusion_tag_use_l10n.anything = "Expected inclusion_tag_use_l10n __dict__"
@register.inclusion_tag("inclusion.html")
def inclusion_unlimited_args_kwargs(one, two="hi", *args, **kwargs):
"""Expected inclusion_unlimited_args_kwargs __doc__"""
# Sort the dictionary by key to guarantee the order for testing.
sorted_kwarg = sorted(iter(kwargs.items()), key=operator.itemgetter(0))
return {
"result": "inclusion_unlimited_args_kwargs - Expected result: %s / %s"
% (
", ".join([str(arg) for arg in [one, two] + list(args)]),
", ".join(["%s=%s" % (k, v) for (k, v) in sorted_kwarg]),
)
}
inclusion_unlimited_args_kwargs.anything = (
"Expected inclusion_unlimited_args_kwargs __dict__"
)
@register.inclusion_tag("inclusion.html", takes_context=True)
def inclusion_tag_without_context_parameter(arg):
"""Expected inclusion_tag_without_context_parameter __doc__"""
return {}
inclusion_tag_without_context_parameter.anything = (
"Expected inclusion_tag_without_context_parameter __dict__"
)
@register.assignment_tag
def assignment_no_params():
"""Expected assignment_no_params __doc__"""
return "assignment_no_params - Expected result"
assignment_no_params.anything = "Expected assignment_no_params __dict__"
@register.assignment_tag
def assignment_one_param(arg):
"""Expected assignment_one_param __doc__"""
return "assignment_one_param - Expected result: %s" % arg
assignment_one_param.anything = "Expected assignment_one_param __dict__"
@register.assignment_tag(takes_context=False)
def assignment_explicit_no_context(arg):
"""Expected assignment_explicit_no_context __doc__"""
return "assignment_explicit_no_context - Expected result: %s" % arg
assignment_explicit_no_context.anything = (
"Expected assignment_explicit_no_context __dict__"
)
@register.assignment_tag(takes_context=True)
def assignment_no_params_with_context(context):
"""Expected assignment_no_params_with_context __doc__"""
return (
"assignment_no_params_with_context - Expected result (context value: %s)"
% context["value"]
)
assignment_no_params_with_context.anything = (
"Expected assignment_no_params_with_context __dict__"
)
@register.assignment_tag(takes_context=True)
def assignment_params_and_context(context, arg):
"""Expected assignment_params_and_context __doc__"""
return "assignment_params_and_context - Expected result (context value: %s): %s" % (
context["value"],
arg,
)
assignment_params_and_context.anything = (
"Expected assignment_params_and_context __dict__"
)
@register.assignment_tag
def assignment_two_params(one, two):
"""Expected assignment_two_params __doc__"""
return "assignment_two_params - Expected result: %s, %s" % (one, two)
assignment_two_params.anything = "Expected assignment_two_params __dict__"
@register.assignment_tag
def assignment_one_default(one, two="hi"):
"""Expected assignment_one_default __doc__"""
return "assignment_one_default - Expected result: %s, %s" % (one, two)
assignment_one_default.anything = "Expected assignment_one_default __dict__"
@register.assignment_tag
def assignment_unlimited_args(one, two="hi", *args):
"""Expected assignment_unlimited_args __doc__"""
return "assignment_unlimited_args - Expected result: %s" % (
", ".join([str(arg) for arg in [one, two] + list(args)])
)
assignment_unlimited_args.anything = "Expected assignment_unlimited_args __dict__"
@register.assignment_tag
def assignment_only_unlimited_args(*args):
"""Expected assignment_only_unlimited_args __doc__"""
return "assignment_only_unlimited_args - Expected result: %s" % ", ".join(
[str(arg) for arg in args]
)
assignment_only_unlimited_args.anything = (
"Expected assignment_only_unlimited_args __dict__"
)
@register.assignment_tag
def assignment_unlimited_args_kwargs(one, two="hi", *args, **kwargs):
"""Expected assignment_unlimited_args_kwargs __doc__"""
# Sort the dictionary by key to guarantee the order for testing.
sorted_kwarg = sorted(iter(kwargs.items()), key=operator.itemgetter(0))
return "assignment_unlimited_args_kwargs - Expected result: %s / %s" % (
", ".join([str(arg) for arg in [one, two] + list(args)]),
", ".join(["%s=%s" % (k, v) for (k, v) in sorted_kwarg]),
)
assignment_unlimited_args_kwargs.anything = (
"Expected assignment_unlimited_args_kwargs __dict__"
)
@register.assignment_tag(takes_context=True)
def assignment_tag_without_context_parameter(arg):
"""Expected assignment_tag_without_context_parameter __doc__"""
return "Expected result"
assignment_tag_without_context_parameter.anything = (
"Expected assignment_tag_without_context_parameter __dict__"
)
<|endoftext|> |
<|endoftext|>from django.conf.urls import patterns, url, include
from .views import empty_view
urlpatterns = patterns(
"",
url(r"^$", empty_view, name="named-url1"),
url(r"^extra/(?P<extra>\w+)/$", empty_view, name="named-url2"),
url(r"^(?P<one>\d+)|(?P<two>\d+)/$", empty_view),
(r"^included/", include("regressiontests.urlpatterns_reverse.included_named_urls")),
)
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
# This works exactly like 2to3, except that it uses Django's fixers rather
# than 2to3's built-in fixers.
import sys
from lib2to3.main import main
sys.exit(main("django.utils.2to3_fixes"))
<|endoftext|> |
<|endoftext|>"""
This encapsulates the logic for displaying filters in the Django admin.
Filters are specified in models with the "list_filter" option.
Each filter subclass knows how to display a filter for a field that passes a
certain test -- e.g. being a DateField or ForeignKey.
"""
import datetime
from django.db import models
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.utils.encoding import smart_text, force_text
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib.admin.util import (
get_model_from_relation,
reverse_field_path,
get_limit_choices_to_from_path,
prepare_lookup_value,
)
from django.contrib.admin.options import IncorrectLookupParameters
class ListFilter(object):
title = None # Human-readable title to appear in the right sidebar.
template = "admin/filter.html"
def __init__(self, request, params, model, model_admin):
# This dictionary will eventually contain the request's query string
# parameters actually used by this filter.
self.used_parameters = {}
if self.title is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'title'." % self.__class__.__name__
)
def has_output(self):
"""
Returns True if some choices would be output for this filter.
"""
raise NotImplementedError
def choices(self, cl):
"""
Returns choices ready to be output in the template.
"""
raise NotImplementedError
def queryset(self, request, queryset):
"""
Returns the filtered queryset.
"""
raise NotImplementedError
def expected_parameters(self):
"""
Returns the list of parameter names that are expected from the
request's query string and that will be used by this filter.
"""
raise NotImplementedError
class SimpleListFilter(ListFilter):
# The parameter that should be used in the query string for that filter.
parameter_name = None
def __init__(self, request, params, model, model_admin):
super(SimpleListFilter, self).__init__(request, params, model, model_admin)
if self.parameter_name is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'parameter_name'." % self.__class__.__name__
)
lookup_choices = self.lookups(request, model_admin)
if lookup_choices is None:
lookup_choices = ()
self.lookup_choices = list(lookup_choices)
if self.parameter_name in params:
value = params.pop(self.parameter_name)
self.used_parameters[self.parameter_name] = value
def has_output(self):
return len(self.lookup_choices) > 0
def value(self):
"""
Returns the value (in string format) provided in the request's
query string for this filter, if any. If the value wasn't provided then
returns None.
"""
return self.used_parameters.get(self.parameter_name, None)
def lookups(self, request, model_admin):
"""
Must be overriden to return a list of tuples (value, verbose value)
"""
raise NotImplementedError
def expected_parameters(self):
return [self.parameter_name]
def choices(self, cl):
yield {
"selected": self.value() is None,
"query_string": cl.get_query_string({}, [self.parameter_name]),
"display": _("All"),
}
for lookup, title in self.lookup_choices:
yield {
"selected": self.value() == force_text(lookup),
"query_string": cl.get_query_string(
{
self.parameter_name: lookup,
},
[],
),
"display": title,
}
class FieldListFilter(ListFilter):
_field_list_filters = []
_take_priority_index = 0
def __init__(self, field, request, params, model, model_admin, field_path):
self.field = field
self.field_path = field_path
self.title = getattr(field, "verbose_name", field_path)
super(FieldListFilter, self).__init__(request, params, model, model_admin)
for p in self.expected_parameters():
if p in params:
value = params.pop(p)
self.used_parameters[p] = prepare_lookup_value(p, value)
def has_output(self):
return True
def queryset(self, request, queryset):
try:
return queryset.filter(**self.used_parameters)
except ValidationError as e:
raise IncorrectLookupParameters(e)
@classmethod
def register(cls, test, list_filter_class, take_priority=False):
if take_priority:
# This is to allow overriding the default filters for certain types
# of fields with some custom filters. The first found in the list
# is used in priority.
cls._field_list_filters.insert(
cls._take_priority_index, (test, list_filter_class)
)
cls._take_priority_index += 1
else:
cls._field_list_filters.append((test, list_filter_class))
@classmethod
def create(cls, field, request, params, model, model_admin, field_path):
for test, list_filter_class in cls._field_list_filters:
if not test(field):
continue
return list_filter_class(
field, request, params, model, model_admin, field_path=field_path
)
class RelatedFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
other_model = get_model_from_relation(field)
if hasattr(field, "rel"):
rel_name = field.rel.get_related_field().name
else:
rel_name = other_model._meta.pk.name
self.lookup_kwarg = "%s__%s__exact" % (field_path, rel_name)
self.lookup_kwarg_isnull = "%s__isnull" % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull, None)
self.lookup_choices = field.get_choices(include_blank=False)
super(RelatedFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path
)
if hasattr(field, "verbose_name"):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
def has_output(self):
if (
isinstance(self.field, models.related.RelatedObject)
and self.field.field.null
or hasattr(self.field, "rel")
and self.field.null
):
extra = 1
else:
extra = 0
return len(self.lookup_choices) + extra > 1
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
"selected": self.lookup_val is None and not self.lookup_val_isnull,
"query_string": cl.get_query_string(
{}, [self.lookup_kwarg, self.lookup_kwarg_isnull]
),
"display": _("All"),
}
for pk_val, val in self.lookup_choices:
yield {
"selected": self.lookup_val == smart_text(pk_val),
"query_string": cl.get_query_string(
{
self.lookup_kwarg: pk_val,
},
[self.lookup_kwarg_isnull],
),
"display": val,
}
if (
isinstance(self.field, models.related.RelatedObject)
and self.field.field.null
or hasattr(self.field, "rel")
and self.field.null
):
yield {
"selected": bool(self.lookup_val_isnull),
"query_string": cl.get_query_string(
{
self.lookup_kwarg_isnull: "True",
},
[self.lookup_kwarg],
),
"display": EMPTY_CHANGELIST_VALUE,
}
FieldListFilter.register(
lambda f: (
hasattr(f, "rel") and bool(f.rel) or isinstance(f, models.related.RelatedObject)
),
RelatedFieldListFilter,
)
class BooleanFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = "%s__exact" % field_path
self.lookup_kwarg2 = "%s__isnull" % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val2 = request.GET.get(self.lookup_kwarg2, None)
super(BooleanFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path
)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg2]
def choices(self, cl):
for lookup, title in ((None, _("All")), ("1", _("Yes")), ("0", _("No"))):
yield {
"selected": self.lookup_val == lookup and not self.lookup_val2,
"query_string": cl.get_query_string(
{
self.lookup_kwarg: lookup,
},
[self.lookup_kwarg2],
),
"display": title,
}
if isinstance(self.field, models.NullBooleanField):
yield {
"selected": self.lookup_val2 == "True",
"query_string": cl.get_query_string(
{
self.lookup_kwarg2: "True",
},
[self.lookup_kwarg],
),
"display": _("Unknown"),
}
FieldListFilter.register(
lambda f: isinstance(f, (models.BooleanField, models.NullBooleanField)),
BooleanFieldListFilter,
)
class ChoicesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = "%s__exact" % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
super(ChoicesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path
)
def expected_parameters(self):
return [self.lookup_kwarg]
def choices(self, cl):
yield {
"selected": self.lookup_val is None,
"query_string": cl.get_query_string({}, [self.lookup_kwarg]),
"display": _("All"),
}
for lookup, title in self.field.flatchoices:
yield {
"selected": smart_text(lookup) == self.lookup_val,
"query_string": cl.get_query_string({self.lookup_kwarg: lookup}),
"display": title,
}
FieldListFilter.register(lambda f: bool(f.choices), ChoicesFieldListFilter)
class DateFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.field_generic = "%s__" % field_path
self.date_params = dict(
[
(k, v)
for k, v in list(params.items())
if k.startswith(self.field_generic)
]
)
now = timezone.now()
# When time zone support is enabled, convert "now" to the user's time
# zone so Django's definition of "Today" matches what the user expects.
if timezone.is_aware(now):
now = timezone.localtime(now)
if isinstance(field, models.DateTimeField):
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
else: # field is a models.DateField
today = now.date()
tomorrow = today + datetime.timedelta(days=1)
self.lookup_kwarg_since = "%s__gte" % field_path
self.lookup_kwarg_until = "%s__lt" % field_path
self.links = (
(_("Any date"), {}),
(
_("Today"),
{
self.lookup_kwarg_since: str(today),
self.lookup_kwarg_until: str(tomorrow),
},
),
(
_("Past 7 days"),
{
self.lookup_kwarg_since: str(today - datetime.timedelta(days=7)),
self.lookup_kwarg_until: str(tomorrow),
},
),
(
_("This month"),
{
self.lookup_kwarg_since: str(today.replace(day=1)),
self.lookup_kwarg_until: str(tomorrow),
},
),
(
_("This year"),
{
self.lookup_kwarg_since: str(today.replace(month=1, day=1)),
self.lookup_kwarg_until: str(tomorrow),
},
),
)
super(DateFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path
)
def expected_parameters(self):
return [self.lookup_kwarg_since, self.lookup_kwarg_until]
def choices(self, cl):
for title, param_dict in self.links:
yield {
"selected": self.date_params == param_dict,
"query_string": cl.get_query_string(param_dict, [self.field_generic]),
"display": title,
}
FieldListFilter.register(lambda f: isinstance(f, models.DateField), DateFieldListFilter)
# This should be registered last, because it's a last resort. For example,
# if a field is eligible to use the BooleanFieldListFilter, that'd be much
# more appropriate, and the AllValuesFieldListFilter won't get used for it.
class AllValuesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = field_path
self.lookup_kwarg_isnull = "%s__isnull" % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull, None)
parent_model, reverse_path = reverse_field_path(model, field_path)
queryset = parent_model._default_manager.all()
# optional feature: limit choices base on existing relationships
# queryset = queryset.complex_filter(
# {'%s__isnull' % reverse_path: False})
limit_choices_to = get_limit_choices_to_from_path(model, field_path)
queryset = queryset.filter(limit_choices_to)
self.lookup_choices = (
queryset.distinct().order_by(field.name).values_list(field.name, flat=True)
)
super(AllValuesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path
)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
"selected": (self.lookup_val is None and self.lookup_val_isnull is None),
"query_string": cl.get_query_string(
{}, [self.lookup_kwarg, self.lookup_kwarg_isnull]
),
"display": _("All"),
}
include_none = False
for val in self.lookup_choices:
if val is None:
include_none = True
continue
val = smart_text(val)
yield {
"selected": self.lookup_val == val,
"query_string": cl.get_query_string(
{
self.lookup_kwarg: val,
},
[self.lookup_kwarg_isnull],
),
"display": val,
}
if include_none:
yield {
"selected": bool(self.lookup_val_isnull),
"query_string": cl.get_query_string(
{
self.lookup_kwarg_isnull: "True",
},
[self.lookup_kwarg],
),
"display": EMPTY_CHANGELIST_VALUE,
}
FieldListFilter.register(lambda f: True, AllValuesFieldListFilter)
<|endoftext|> |
<|endoftext|>"""
Creates permissions for all installed apps that need permissions.
"""
import getpass
import locale
import unicodedata
from django.contrib.auth import models as auth_app, get_user_model
from django.core import exceptions
from django.core.management.base import CommandError
from django.db import DEFAULT_DB_ALIAS, router
from django.db.models import get_models, signals
from django.utils import six
from django.utils.six.moves import input
def _get_permission_codename(action, opts):
return "%s_%s" % (action, opts.object_name.lower())
def _get_all_permissions(opts, ctype):
"""
Returns (codename, name) for all permissions in the given opts.
"""
builtin = _get_builtin_permissions(opts)
custom = list(opts.permissions)
_check_permission_clashing(custom, builtin, ctype)
return builtin + custom
def _get_builtin_permissions(opts):
"""
Returns (codename, name) for all autogenerated permissions.
"""
perms = []
for action in ("add", "change", "delete"):
perms.append(
(
_get_permission_codename(action, opts),
"Can %s %s" % (action, opts.verbose_name_raw),
)
)
return perms
def _check_permission_clashing(custom, builtin, ctype):
"""
Check that permissions for a model do not clash. Raises CommandError if
there are duplicate permissions.
"""
pool = set()
builtin_codenames = set(p[0] for p in builtin)
for codename, _name in custom:
if codename in pool:
raise CommandError(
"The permission codename '%s' is duplicated for model '%s.%s'."
% (codename, ctype.app_label, ctype.model_class().__name__)
)
elif codename in builtin_codenames:
raise CommandError(
"The permission codename '%s' clashes with a builtin permission "
"for model '%s.%s'."
% (codename, ctype.app_label, ctype.model_class().__name__)
)
pool.add(codename)
def create_permissions(app, created_models, verbosity, db=DEFAULT_DB_ALIAS, **kwargs):
if not router.allow_syncdb(db, auth_app.Permission):
return
from django.contrib.contenttypes.models import ContentType
app_models = get_models(app)
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = list()
# The codenames and ctypes that should exist.
ctypes = set()
for klass in app_models:
# Force looking up the content types in the current database
# before creating foreign keys to them.
ctype = ContentType.objects.db_manager(db).get_for_model(klass)
ctypes.add(ctype)
for perm in _get_all_permissions(klass._meta, ctype):
searched_perms.append((ctype, perm))
# Find all the Permissions that have a context_type for a model we're
# looking for. We don't need to check for codenames since we already have
# a list of the ones we're going to create.
all_perms = set(
auth_app.Permission.objects.using(db)
.filter(
content_type__in=ctypes,
)
.values_list("content_type", "codename")
)
perms = [
auth_app.Permission(codename=codename, name=name, content_type=ctype)
for ctype, (codename, name) in searched_perms
if (ctype.pk, codename) not in all_perms
]
auth_app.Permission.objects.using(db).bulk_create(perms)
if verbosity >= 2:
for perm in perms:
print(("Adding permission '%s'" % perm))
def create_superuser(app, created_models, verbosity, db, **kwargs):
from django.core.management import call_command
UserModel = get_user_model()
if UserModel in created_models and kwargs.get("interactive", True):
msg = (
"\nYou just installed Django's auth system, which means you "
"don't have any superusers defined.\nWould you like to create one "
"now? (yes/no): "
)
confirm = eval(input(msg))
while 1:
if confirm not in ("yes", "no"):
confirm = eval(input('Please enter either "yes" or "no": '))
continue
if confirm == "yes":
call_command("createsuperuser", interactive=True, database=db)
break
def get_system_username():
"""
Try to determine the current system user's username.
:returns: The username as a unicode string, or an empty string if the
username could not be determined.
"""
try:
result = getpass.getuser()
except (ImportError, KeyError):
# KeyError will be raised by os.getpwuid() (called by getuser())
# if there is no corresponding entry in the /etc/passwd file
# (a very restricted chroot environment, for example).
return ""
if not six.PY3:
default_locale = locale.getdefaultlocale()[1]
if not default_locale:
return ""
try:
result = result.decode(default_locale)
except UnicodeDecodeError:
# UnicodeDecodeError - preventive treatment for non-latin Windows.
return ""
return result
def get_default_username(check_db=True):
"""
Try to determine the current system user's username to use as a default.
:param check_db: If ``True``, requires that the username does not match an
existing ``auth.User`` (otherwise returns an empty string).
:returns: The username, or an empty string if no username can be
determined.
"""
# If the User model has been swapped out, we can't make any assumptions
# about the default user name.
if auth_app.User._meta.swapped:
return ""
default_username = get_system_username()
try:
default_username = (
unicodedata.normalize("NFKD", default_username)
.encode("ascii", "ignore")
.decode("ascii")
.replace(" ", "")
.lower()
)
except UnicodeDecodeError:
return ""
# Run the username validator
try:
auth_app.User._meta.get_field("username").run_validators(default_username)
except exceptions.ValidationError:
return ""
# Don't return the default username if it is already taken.
if check_db and default_username:
try:
auth_app.User._default_manager.get(username=default_username)
except auth_app.User.DoesNotExist:
pass
else:
return ""
return default_username
signals.post_syncdb.connect(
create_permissions, dispatch_uid="django.contrib.auth.management.create_permissions"
)
signals.post_syncdb.connect(
create_superuser,
sender=auth_app,
dispatch_uid="django.contrib.auth.management.create_superuser",
)
<|endoftext|> |
<|endoftext|>from django.contrib.syndication.views import Feed
from django.contrib.sites.models import get_current_site
from django.contrib import comments
from django.utils.translation import ugettext as _
class LatestCommentFeed(Feed):
"""Feed of latest comments on the current site."""
def __call__(self, request, *args, **kwargs):
self.site = get_current_site(request)
return super(LatestCommentFeed, self).__call__(request, *args, **kwargs)
def title(self):
return _("%(site_name)s comments") % dict(site_name=self.site.name)
def link(self):
return "http://%s/" % (self.site.domain)
def description(self):
return _("Latest comments on %(site_name)s") % dict(site_name=self.site.name)
def items(self):
qs = comments.get_model().objects.filter(
site__pk=self.site.pk,
is_public=True,
is_removed=False,
)
return qs.order_by("-submit_date")[:40]
def item_pubdate(self, item):
return item.submit_date
<|endoftext|> |
<|endoftext|>import os
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.test import TestCase
from django.test.utils import override_settings
@override_settings(
LOGIN_URL="/accounts/login/",
MIDDLEWARE_CLASSES=(
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
# no 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
),
TEMPLATE_DIRS=(os.path.join(os.path.dirname(__file__), "templates"),),
SITE_ID=1,
)
class FlatpageViewTests(TestCase):
fixtures = ["sample_flatpages", "example_site"]
urls = "django.contrib.flatpages.tests.urls"
def test_view_flatpage(self):
"A flatpage can be served through a view"
response = self.client.get("/flatpage_root/flatpage/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view"
response = self.client.get("/flatpage_root/no_such_flatpage/")
self.assertEqual(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get("/flatpage_root/sekrit/")
self.assertRedirects(response, "/accounts/login/?next=/flatpage_root/sekrit/")
User.objects.create_user("testuser", "[email protected]", "s3krit")
self.client.login(username="testuser", password="s3krit")
response = self.client.get("/flatpage_root/sekrit/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A fallback flatpage won't be served if the middleware is disabled"
response = self.client.get("/flatpage/")
self.assertEqual(response.status_code, 404)
def test_fallback_non_existent_flatpage(self):
"A non-existent flatpage won't be served if the fallback middlware is disabled"
response = self.client.get("/no_such_flatpage/")
self.assertEqual(response.status_code, 404)
def test_view_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served through a view"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get("/flatpage_root/some.very_special~chars-here/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it special!</p>")
@override_settings(
APPEND_SLASH=True,
LOGIN_URL="/accounts/login/",
MIDDLEWARE_CLASSES=(
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
# no 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
),
TEMPLATE_DIRS=(os.path.join(os.path.dirname(__file__), "templates"),),
SITE_ID=1,
)
class FlatpageViewAppendSlashTests(TestCase):
fixtures = ["sample_flatpages", "example_site"]
urls = "django.contrib.flatpages.tests.urls"
def test_redirect_view_flatpage(self):
"A flatpage can be served through a view and should add a slash"
response = self.client.get("/flatpage_root/flatpage")
self.assertRedirects(response, "/flatpage_root/flatpage/", status_code=301)
def test_redirect_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view and should not add a slash"
response = self.client.get("/flatpage_root/no_such_flatpage")
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage(self):
"A fallback flatpage won't be served if the middleware is disabled and should not add a slash"
response = self.client.get("/flatpage")
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_non_existent_flatpage(self):
"A non-existent flatpage won't be served if the fallback middlware is disabled and should not add a slash"
response = self.client.get("/no_such_flatpage")
self.assertEqual(response.status_code, 404)
def test_redirect_view_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served through a view and should add a slash"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get("/flatpage_root/some.very_special~chars-here")
self.assertRedirects(
response, "/flatpage_root/some.very_special~chars-here/", status_code=301
)
<|endoftext|> |
<|endoftext|>from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.sqlite3.introspection import (
DatabaseIntrospection,
FlexibleFieldLookupDict,
)
from django.utils import six
class GeoFlexibleFieldLookupDict(FlexibleFieldLookupDict):
"""
Sublcass that includes updates the `base_data_types_reverse` dict
for geometry field types.
"""
base_data_types_reverse = FlexibleFieldLookupDict.base_data_types_reverse.copy()
base_data_types_reverse.update(
{
"point": "GeometryField",
"linestring": "GeometryField",
"polygon": "GeometryField",
"multipoint": "GeometryField",
"multilinestring": "GeometryField",
"multipolygon": "GeometryField",
"geometrycollection": "GeometryField",
}
)
class SpatiaLiteIntrospection(DatabaseIntrospection):
data_types_reverse = GeoFlexibleFieldLookupDict()
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying the `geometry_columns` table to get additional metadata.
cursor.execute(
'SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col),
)
row = cursor.fetchone()
if not row:
raise Exception(
'Could not find a geometry column for "%s"."%s"'
% (table_name, geo_col)
)
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params["srid"] = srid
if isinstance(dim, six.string_types) and "Z" in dim:
field_params["dim"] = 3
finally:
cursor.close()
return field_type, field_params
<|endoftext|> |
<|endoftext|>from ctypes import c_char_p, c_double, c_int, c_void_p, POINTER
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import lgdal
from django.contrib.gis.gdal.prototypes.errcheck import check_bool, check_envelope
from django.contrib.gis.gdal.prototypes.generation import (
const_string_output,
double_output,
geom_output,
int_output,
srs_output,
string_output,
void_output,
)
### Generation routines specific to this module ###
def env_func(f, argtypes):
"For getting OGREnvelopes."
f.argtypes = argtypes
f.restype = None
f.errcheck = check_envelope
return f
def pnt_func(f):
"For accessing point information."
return double_output(f, [c_void_p, c_int])
def topology_func(f):
f.argtypes = [c_void_p, c_void_p]
f.restype = c_int
f.errchck = check_bool
return f
### OGR_G ctypes function prototypes ###
# GeoJSON routines.
from_json = geom_output(lgdal.OGR_G_CreateGeometryFromJson, [c_char_p])
to_json = string_output(
lgdal.OGR_G_ExportToJson, [c_void_p], str_result=True, decoding="ascii"
)
to_kml = string_output(
lgdal.OGR_G_ExportToKML, [c_void_p, c_char_p], str_result=True, decoding="ascii"
)
# GetX, GetY, GetZ all return doubles.
getx = pnt_func(lgdal.OGR_G_GetX)
gety = pnt_func(lgdal.OGR_G_GetY)
getz = pnt_func(lgdal.OGR_G_GetZ)
# Geometry creation routines.
from_wkb = geom_output(
lgdal.OGR_G_CreateFromWkb, [c_char_p, c_void_p, POINTER(c_void_p), c_int], offset=-2
)
from_wkt = geom_output(
lgdal.OGR_G_CreateFromWkt,
[POINTER(c_char_p), c_void_p, POINTER(c_void_p)],
offset=-1,
)
create_geom = geom_output(lgdal.OGR_G_CreateGeometry, [c_int])
clone_geom = geom_output(lgdal.OGR_G_Clone, [c_void_p])
get_geom_ref = geom_output(lgdal.OGR_G_GetGeometryRef, [c_void_p, c_int])
get_boundary = geom_output(lgdal.OGR_G_GetBoundary, [c_void_p])
geom_convex_hull = geom_output(lgdal.OGR_G_ConvexHull, [c_void_p])
geom_diff = geom_output(lgdal.OGR_G_Difference, [c_void_p, c_void_p])
geom_intersection = geom_output(lgdal.OGR_G_Intersection, [c_void_p, c_void_p])
geom_sym_diff = geom_output(lgdal.OGR_G_SymmetricDifference, [c_void_p, c_void_p])
geom_union = geom_output(lgdal.OGR_G_Union, [c_void_p, c_void_p])
# Geometry modification routines.
add_geom = void_output(lgdal.OGR_G_AddGeometry, [c_void_p, c_void_p])
import_wkt = void_output(lgdal.OGR_G_ImportFromWkt, [c_void_p, POINTER(c_char_p)])
# Destroys a geometry
destroy_geom = void_output(lgdal.OGR_G_DestroyGeometry, [c_void_p], errcheck=False)
# Geometry export routines.
to_wkb = void_output(
lgdal.OGR_G_ExportToWkb, None, errcheck=True
) # special handling for WKB.
to_wkt = string_output(
lgdal.OGR_G_ExportToWkt, [c_void_p, POINTER(c_char_p)], decoding="ascii"
)
to_gml = string_output(
lgdal.OGR_G_ExportToGML, [c_void_p], str_result=True, decoding="ascii"
)
get_wkbsize = int_output(lgdal.OGR_G_WkbSize, [c_void_p])
# Geometry spatial-reference related routines.
assign_srs = void_output(
lgdal.OGR_G_AssignSpatialReference, [c_void_p, c_void_p], errcheck=False
)
get_geom_srs = srs_output(lgdal.OGR_G_GetSpatialReference, [c_void_p])
# Geometry properties
get_area = double_output(lgdal.OGR_G_GetArea, [c_void_p])
get_centroid = void_output(lgdal.OGR_G_Centroid, [c_void_p, c_void_p])
get_dims = int_output(lgdal.OGR_G_GetDimension, [c_void_p])
get_coord_dim = int_output(lgdal.OGR_G_GetCoordinateDimension, [c_void_p])
set_coord_dim = void_output(
lgdal.OGR_G_SetCoordinateDimension, [c_void_p, c_int], errcheck=False
)
get_geom_count = int_output(lgdal.OGR_G_GetGeometryCount, [c_void_p])
get_geom_name = const_string_output(
lgdal.OGR_G_GetGeometryName, [c_void_p], decoding="ascii"
)
get_geom_type = int_output(lgdal.OGR_G_GetGeometryType, [c_void_p])
get_point_count = int_output(lgdal.OGR_G_GetPointCount, [c_void_p])
get_point = void_output(
lgdal.OGR_G_GetPoint,
[c_void_p, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)],
errcheck=False,
)
geom_close_rings = void_output(lgdal.OGR_G_CloseRings, [c_void_p], errcheck=False)
# Topology routines.
ogr_contains = topology_func(lgdal.OGR_G_Contains)
ogr_crosses = topology_func(lgdal.OGR_G_Crosses)
ogr_disjoint = topology_func(lgdal.OGR_G_Disjoint)
ogr_equals = topology_func(lgdal.OGR_G_Equals)
ogr_intersects = topology_func(lgdal.OGR_G_Intersects)
ogr_overlaps = topology_func(lgdal.OGR_G_Overlaps)
ogr_touches = topology_func(lgdal.OGR_G_Touches)
ogr_within = topology_func(lgdal.OGR_G_Within)
# Transformation routines.
geom_transform = void_output(lgdal.OGR_G_Transform, [c_void_p, c_void_p])
geom_transform_to = void_output(lgdal.OGR_G_TransformTo, [c_void_p, c_void_p])
# For retrieving the envelope of the geometry.
get_envelope = env_func(lgdal.OGR_G_GetEnvelope, [c_void_p, POINTER(OGREnvelope)])
<|endoftext|> |
<|endoftext|>import binascii
import unittest
from django.contrib.gis import memoryview
from django.contrib.gis.geos import (
GEOSGeometry,
WKTReader,
WKTWriter,
WKBReader,
WKBWriter,
geos_version_info,
)
from django.utils import six
class GEOSIOTest(unittest.TestCase):
def test01_wktreader(self):
# Creating a WKTReader instance
wkt_r = WKTReader()
wkt = "POINT (5 23)"
# read() should return a GEOSGeometry
ref = GEOSGeometry(wkt)
g1 = wkt_r.read(wkt.encode())
g2 = wkt_r.read(wkt)
for geom in (g1, g2):
self.assertEqual(ref, geom)
# Should only accept six.string_types objects.
self.assertRaises(TypeError, wkt_r.read, 1)
self.assertRaises(TypeError, wkt_r.read, memoryview(b"foo"))
def test02_wktwriter(self):
# Creating a WKTWriter instance, testing its ptr property.
wkt_w = WKTWriter()
self.assertRaises(TypeError, wkt_w._set_ptr, WKTReader.ptr_type())
ref = GEOSGeometry("POINT (5 23)")
ref_wkt = "POINT (5.0000000000000000 23.0000000000000000)"
self.assertEqual(ref_wkt, wkt_w.write(ref).decode())
def test03_wkbreader(self):
# Creating a WKBReader instance
wkb_r = WKBReader()
hex = b"000000000140140000000000004037000000000000"
wkb = memoryview(binascii.a2b_hex(hex))
ref = GEOSGeometry(hex)
# read() should return a GEOSGeometry on either a hex string or
# a WKB buffer.
g1 = wkb_r.read(wkb)
g2 = wkb_r.read(hex)
for geom in (g1, g2):
self.assertEqual(ref, geom)
bad_input = (1, 5.23, None, False)
for bad_wkb in bad_input:
self.assertRaises(TypeError, wkb_r.read, bad_wkb)
def test04_wkbwriter(self):
wkb_w = WKBWriter()
# Representations of 'POINT (5 23)' in hex -- one normal and
# the other with the byte order changed.
g = GEOSGeometry("POINT (5 23)")
hex1 = b"010100000000000000000014400000000000003740"
wkb1 = memoryview(binascii.a2b_hex(hex1))
hex2 = b"000000000140140000000000004037000000000000"
wkb2 = memoryview(binascii.a2b_hex(hex2))
self.assertEqual(hex1, wkb_w.write_hex(g))
self.assertEqual(wkb1, wkb_w.write(g))
# Ensuring bad byteorders are not accepted.
for bad_byteorder in (-1, 2, 523, "foo", None):
# Equivalent of `wkb_w.byteorder = bad_byteorder`
self.assertRaises(ValueError, wkb_w._set_byteorder, bad_byteorder)
# Setting the byteorder to 0 (for Big Endian)
wkb_w.byteorder = 0
self.assertEqual(hex2, wkb_w.write_hex(g))
self.assertEqual(wkb2, wkb_w.write(g))
# Back to Little Endian
wkb_w.byteorder = 1
# Now, trying out the 3D and SRID flags.
g = GEOSGeometry("POINT (5 23 17)")
g.srid = 4326
hex3d = b"0101000080000000000000144000000000000037400000000000003140"
wkb3d = memoryview(binascii.a2b_hex(hex3d))
hex3d_srid = (
b"01010000A0E6100000000000000000144000000000000037400000000000003140"
)
wkb3d_srid = memoryview(binascii.a2b_hex(hex3d_srid))
# Ensuring bad output dimensions are not accepted
for bad_outdim in (-1, 0, 1, 4, 423, "foo", None):
# Equivalent of `wkb_w.outdim = bad_outdim`
self.assertRaises(ValueError, wkb_w._set_outdim, bad_outdim)
# These tests will fail on 3.0.0 because of a bug that was fixed in 3.1:
# http://trac.osgeo.org/geos/ticket/216
if not geos_version_info()["version"].startswith("3.0."):
# Now setting the output dimensions to be 3
wkb_w.outdim = 3
self.assertEqual(hex3d, wkb_w.write_hex(g))
self.assertEqual(wkb3d, wkb_w.write(g))
# Telling the WKBWriter to include the srid in the representation.
wkb_w.srid = True
self.assertEqual(hex3d_srid, wkb_w.write_hex(g))
self.assertEqual(wkb3d_srid, wkb_w.write(g))
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSIOTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
<|endoftext|> |
<|endoftext|>from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
# function that will pass a test.
def pass_test(*args):
return
def no_backend(test_func, backend):
"Use this decorator to disable test on specified backend."
if settings.DATABASES[DEFAULT_DB_ALIAS]["ENGINE"].rsplit(".")[-1] == backend:
return pass_test
else:
return test_func
# Decorators to disable entire test functions for specific
# spatial backends.
def no_oracle(func):
return no_backend(func, "oracle")
def no_postgis(func):
return no_backend(func, "postgis")
def no_mysql(func):
return no_backend(func, "mysql")
def no_spatialite(func):
return no_backend(func, "spatialite")
# Shortcut booleans to omit only portions of tests.
_default_db = settings.DATABASES[DEFAULT_DB_ALIAS]["ENGINE"].rsplit(".")[-1]
oracle = _default_db == "oracle"
postgis = _default_db == "postgis"
mysql = _default_db == "mysql"
spatialite = _default_db == "spatialite"
HAS_SPATIALREFSYS = True
if oracle and "gis" in settings.DATABASES[DEFAULT_DB_ALIAS]["ENGINE"]:
from django.contrib.gis.db.backends.oracle.models import SpatialRefSys
elif postgis:
from django.contrib.gis.db.backends.postgis.models import SpatialRefSys
elif spatialite:
from django.contrib.gis.db.backends.spatialite.models import SpatialRefSys
else:
HAS_SPATIALREFSYS = False
SpatialRefSys = None
<|endoftext|> |
<|endoftext|># -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
PROVINCE_CHOICES = (
("01", _("Araba")),
("02", _("Albacete")),
("03", _("Alacant")),
("04", _("Almeria")),
("05", _("Avila")),
("06", _("Badajoz")),
("07", _("Illes Balears")),
("08", _("Barcelona")),
("09", _("Burgos")),
("10", _("Caceres")),
("11", _("Cadiz")),
("12", _("Castello")),
("13", _("Ciudad Real")),
("14", _("Cordoba")),
("15", _("A Coruna")),
("16", _("Cuenca")),
("17", _("Girona")),
("18", _("Granada")),
("19", _("Guadalajara")),
("20", _("Guipuzkoa")),
("21", _("Huelva")),
("22", _("Huesca")),
("23", _("Jaen")),
("24", _("Leon")),
("25", _("Lleida")),
("26", _("La Rioja")),
("27", _("Lugo")),
("28", _("Madrid")),
("29", _("Malaga")),
("30", _("Murcia")),
("31", _("Navarre")),
("32", _("Ourense")),
("33", _("Asturias")),
("34", _("Palencia")),
("35", _("Las Palmas")),
("36", _("Pontevedra")),
("37", _("Salamanca")),
("38", _("Santa Cruz de Tenerife")),
("39", _("Cantabria")),
("40", _("Segovia")),
("41", _("Seville")),
("42", _("Soria")),
("43", _("Tarragona")),
("44", _("Teruel")),
("45", _("Toledo")),
("46", _("Valencia")),
("47", _("Valladolid")),
("48", _("Bizkaia")),
("49", _("Zamora")),
("50", _("Zaragoza")),
("51", _("Ceuta")),
("52", _("Melilla")),
)
<|endoftext|> |
<|endoftext|>from django.contrib.messages import constants
from django.contrib.messages.tests.base import BaseTest
from django.contrib.messages.storage.base import Message
from django.contrib.messages.storage.session import SessionStorage
from django.utils.safestring import SafeData, mark_safe
def set_session_data(storage, messages):
"""
Sets the messages into the backend request's session and remove the
backend's loaded data cache.
"""
storage.request.session[storage.session_key] = messages
if hasattr(storage, "_loaded_data"):
del storage._loaded_data
def stored_session_messages_count(storage):
data = storage.request.session.get(storage.session_key, [])
return len(data)
class SessionTest(BaseTest):
storage_class = SessionStorage
def get_request(self):
self.session = {}
request = super(SessionTest, self).get_request()
request.session = self.session
return request
def stored_messages_count(self, storage, response):
return stored_session_messages_count(storage)
def test_get(self):
storage = self.storage_class(self.get_request())
# Set initial data.
example_messages = ["test", "me"]
set_session_data(storage, example_messages)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_safedata(self):
"""
Tests that a message containing SafeData is keeping its safe status when
retrieved from the message storage.
"""
storage = self.get_storage()
message = Message(constants.DEBUG, mark_safe("<b>Hello Django!</b>"))
set_session_data(storage, [message])
self.assertIsInstance(list(storage)[0].message, SafeData)
<|endoftext|> |
<|endoftext|>from optparse import make_option
from django.conf import settings
from django.core.management.commands.runserver import Command as RunserverCommand
from django.contrib.staticfiles.handlers import StaticFilesHandler
class Command(RunserverCommand):
option_list = RunserverCommand.option_list + (
make_option(
"--nostatic",
action="store_false",
dest="use_static_handler",
default=True,
help="Tells Django to NOT automatically serve static files at STATIC_URL.",
),
make_option(
"--insecure",
action="store_true",
dest="insecure_serving",
default=False,
help="Allows serving static files even if DEBUG is False.",
),
)
help = (
"Starts a lightweight Web server for development and also serves static files."
)
def get_handler(self, *args, **options):
"""
Returns the static files serving handler wrapping the default handler,
if static files should be served. Otherwise just returns the default
handler.
"""
handler = super(Command, self).get_handler(*args, **options)
use_static_handler = options.get("use_static_handler", True)
insecure_serving = options.get("insecure_serving", False)
if use_static_handler and (settings.DEBUG or insecure_serving):
return StaticFilesHandler(handler)
return handler
<|endoftext|> |
<|endoftext|>"""SMTP email backend class."""
import smtplib
import ssl
import threading
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.utils import DNS_NAME
from django.core.mail.message import sanitize_address
from django.utils.encoding import force_bytes
class EmailBackend(BaseEmailBackend):
"""
A wrapper that manages the SMTP network connection.
"""
def __init__(
self,
host=None,
port=None,
username=None,
password=None,
use_tls=None,
fail_silently=False,
**kwargs
):
super(EmailBackend, self).__init__(fail_silently=fail_silently)
self.host = host or settings.EMAIL_HOST
self.port = port or settings.EMAIL_PORT
if username is None:
self.username = settings.EMAIL_HOST_USER
else:
self.username = username
if password is None:
self.password = settings.EMAIL_HOST_PASSWORD
else:
self.password = password
if use_tls is None:
self.use_tls = settings.EMAIL_USE_TLS
else:
self.use_tls = use_tls
self.connection = None
self._lock = threading.RLock()
def open(self):
"""
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
try:
# If local_hostname is not specified, socket.getfqdn() gets used.
# For performance, we use the cached FQDN for local_hostname.
self.connection = smtplib.SMTP(
self.host, self.port, local_hostname=DNS_NAME.get_fqdn()
)
if self.use_tls:
self.connection.ehlo()
self.connection.starttls()
self.connection.ehlo()
if self.username and self.password:
self.connection.login(self.username, self.password)
return True
except:
if not self.fail_silently:
raise
def close(self):
"""Closes the connection to the email server."""
if self.connection is None:
return
try:
try:
self.connection.quit()
except (ssl.SSLError, smtplib.SMTPServerDisconnected):
# This happens when calling quit() on a TLS connection
# sometimes, or when the connection was already disconnected
# by the server.
self.connection.close()
except:
if self.fail_silently:
return
raise
finally:
self.connection = None
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
if not email_messages:
return
with self._lock:
new_conn_created = self.open()
if not self.connection:
# We failed silently on open().
# Trying to send would be pointless.
return
num_sent = 0
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
from_email = sanitize_address(email_message.from_email, email_message.encoding)
recipients = [
sanitize_address(addr, email_message.encoding)
for addr in email_message.recipients()
]
message = email_message.message()
charset = (
message.get_charset().get_output_charset()
if message.get_charset()
else "utf-8"
)
try:
self.connection.sendmail(
from_email, recipients, force_bytes(message.as_string(), charset)
)
except:
if not self.fail_silently:
raise
return False
return True
<|endoftext|> |
<|endoftext|>"""
Module for abstract serializer/unserializer base classes.
"""
from django.db import models
from django.utils.encoding import smart_text
from django.utils import six
class SerializerDoesNotExist(KeyError):
"""The requested serializer was not found."""
pass
class SerializationError(Exception):
"""Something bad happened during serialization."""
pass
class DeserializationError(Exception):
"""Something bad happened during deserialization."""
pass
class Serializer(object):
"""
Abstract serializer base class.
"""
# Indicates if the implemented serializer is only available for
# internal Django use.
internal_use_only = False
def serialize(self, queryset, **options):
"""
Serialize a queryset.
"""
self.options = options
self.stream = options.pop("stream", six.StringIO())
self.selected_fields = options.pop("fields", None)
self.use_natural_keys = options.pop("use_natural_keys", False)
self.start_serialization()
self.first = True
for obj in queryset:
self.start_object(obj)
# Use the concrete parent class' _meta instead of the object's _meta
# This is to avoid local_fields problems for proxy models. Refs #17717.
concrete_model = obj._meta.concrete_model
for field in concrete_model._meta.local_fields:
if field.serialize:
if field.rel is None:
if (
self.selected_fields is None
or field.attname in self.selected_fields
):
self.handle_field(obj, field)
else:
if (
self.selected_fields is None
or field.attname[:-3] in self.selected_fields
):
self.handle_fk_field(obj, field)
for field in concrete_model._meta.many_to_many:
if field.serialize:
if (
self.selected_fields is None
or field.attname in self.selected_fields
):
self.handle_m2m_field(obj, field)
self.end_object(obj)
if self.first:
self.first = False
self.end_serialization()
return self.getvalue()
def start_serialization(self):
"""
Called when serializing of the queryset starts.
"""
raise NotImplementedError
def end_serialization(self):
"""
Called when serializing of the queryset ends.
"""
pass
def start_object(self, obj):
"""
Called when serializing of an object starts.
"""
raise NotImplementedError
def end_object(self, obj):
"""
Called when serializing of an object ends.
"""
pass
def handle_field(self, obj, field):
"""
Called to handle each individual (non-relational) field on an object.
"""
raise NotImplementedError
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey field.
"""
raise NotImplementedError
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField.
"""
raise NotImplementedError
def getvalue(self):
"""
Return the fully serialized queryset (or None if the output stream is
not seekable).
"""
if callable(getattr(self.stream, "getvalue", None)):
return self.stream.getvalue()
class Deserializer(six.Iterator):
"""
Abstract base deserializer class.
"""
def __init__(self, stream_or_string, **options):
"""
Init this serializer given a stream or a string
"""
self.options = options
if isinstance(stream_or_string, six.string_types):
self.stream = six.StringIO(stream_or_string)
else:
self.stream = stream_or_string
# hack to make sure that the models have all been loaded before
# deserialization starts (otherwise subclass calls to get_model()
# and friends might fail...)
models.get_apps()
def __iter__(self):
return self
def __next__(self):
"""Iteration iterface -- return the next item in the stream"""
raise NotImplementedError
class DeserializedObject(object):
"""
A deserialized model.
Basically a container for holding the pre-saved deserialized data along
with the many-to-many data saved with the object.
Call ``save()`` to save the object (with the many-to-many data) to the
database; call ``save(save_m2m=False)`` to save just the object fields
(and not touch the many-to-many stuff.)
"""
def __init__(self, obj, m2m_data=None):
self.object = obj
self.m2m_data = m2m_data
def __repr__(self):
return "<DeserializedObject: %s.%s(pk=%s)>" % (
self.object._meta.app_label,
self.object._meta.object_name,
self.object.pk,
)
def save(self, save_m2m=True, using=None):
# Call save on the Model baseclass directly. This bypasses any
# model-defined save. The save is also forced to be raw.
# This ensures that the data that is deserialized is literally
# what came from the file, not post-processed by pre_save/save
# methods.
models.Model.save_base(self.object, using=using, raw=True)
if self.m2m_data and save_m2m:
for accessor_name, object_list in list(self.m2m_data.items()):
setattr(self.object, accessor_name, object_list)
# prevent a second (possibly accidental) call to save() from saving
# the m2m data twice.
self.m2m_data = None
<|endoftext|> |
<|endoftext|>import re
from django.db.backends import BaseDatabaseIntrospection
field_size_re = re.compile(r"^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$")
def get_field_size(name):
"""Extract the size number from a "varchar(11)" type name"""
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict(object):
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
"bool": "BooleanField",
"boolean": "BooleanField",
"smallint": "SmallIntegerField",
"smallint unsigned": "PositiveSmallIntegerField",
"smallinteger": "SmallIntegerField",
"int": "IntegerField",
"integer": "IntegerField",
"bigint": "BigIntegerField",
"integer unsigned": "PositiveIntegerField",
"decimal": "DecimalField",
"real": "FloatField",
"text": "TextField",
"char": "CharField",
"date": "DateField",
"datetime": "DateTimeField",
"time": "TimeField",
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
size = get_field_size(key)
if size is not None:
return ("CharField", {"max_length": size})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute(
"""
SELECT name FROM sqlite_master
WHERE type='table' AND NOT name='sqlite_sequence'
ORDER BY name"""
)
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [
(
info["name"],
info["type"],
None,
info["size"],
None,
None,
info["null_ok"],
)
for info in self._table_info(cursor, table_name)
]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute(
"SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s",
[table_name, "table"],
)
results = cursor.fetchone()[0].strip()
results = results[results.index("(") + 1 : results.rindex(")")]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(",")):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index("("), other_table_results.rindex(")")
other_table_results = other_table_results[li + 1 : ri]
for other_index, other_desc in enumerate(other_table_results.split(",")):
other_desc = other_desc.strip()
if other_desc.startswith("UNIQUE"):
continue
name = other_desc.split(" ", 1)[0].strip('"')
if name == column:
relations[field_index] = (other_index, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute(
"SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s",
[table_name, "table"],
)
results = cursor.fetchone()[0].strip()
results = results[results.index("(") + 1 : results.rindex(")")]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(",")):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple([s.strip('"') for s in m.groups()]))
return key_columns
def get_indexes(self, cursor, table_name):
indexes = {}
for info in self._table_info(cursor, table_name):
if info["pk"] != 0:
indexes[info["name"]] = {"primary_key": True, "unique": False}
cursor.execute(
"PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name)
)
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
cursor.execute(
"PRAGMA index_info(%s)" % self.connection.ops.quote_name(index)
)
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name] = {"primary_key": False, "unique": unique}
return indexes
def get_primary_key_column(self, cursor, table_name):
"""
Get the column name of the primary key for the given table.
"""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute(
"SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s",
[table_name, "table"],
)
results = cursor.fetchone()[0].strip()
results = results[results.index("(") + 1 : results.rindex(")")]
for field_desc in results.split(","):
field_desc = field_desc.strip()
m = re.search('"(.*)".*PRIMARY KEY$', field_desc)
if m:
return m.groups()[0]
return None
def _table_info(self, cursor, name):
cursor.execute("PRAGMA table_info(%s)" % self.connection.ops.quote_name(name))
# cid, name, type, notnull, dflt_value, pk
return [
{
"name": field[1],
"type": field[2],
"size": get_field_size(field[2]),
"null_ok": not field[3],
"pk": field[5], # undocumented
}
for field in cursor.fetchall()
]
<|endoftext|> |
<|endoftext|>"""
"Safe weakrefs", originally from pyDispatcher.
Provides a way to safely weakref any function, including bound methods (which
aren't handled by the core weakref module).
"""
import traceback
import weakref
def safeRef(target, onDelete=None):
"""Return a *safe* weak reference to a callable target
target -- the object to be weakly referenced, if it's a
bound method reference, will create a BoundMethodWeakref,
otherwise creates a simple weakref.
onDelete -- if provided, will have a hard reference stored
to the callable to be called after the safe reference
goes out of scope with the reference object, (either a
weakref or a BoundMethodWeakref) as argument.
"""
if hasattr(target, "__self__"):
if target.__self__ is not None:
# Turn a bound method into a BoundMethodWeakref instance.
# Keep track of these instances for lookup by disconnect().
assert hasattr(target, "__func__"), (
"""safeRef target %r has __self__, but no __func__, don't know how to create reference"""
% (target,)
)
reference = get_bound_method_weakref(target=target, onDelete=onDelete)
return reference
if callable(onDelete):
return weakref.ref(target, onDelete)
else:
return weakref.ref(target)
class BoundMethodWeakref(object):
"""'Safe' and reusable weak references to instance methods
BoundMethodWeakref objects provide a mechanism for
referencing a bound method without requiring that the
method object itself (which is normally a transient
object) is kept alive. Instead, the BoundMethodWeakref
object keeps weak references to both the object and the
function which together define the instance method.
Attributes:
key -- the identity key for the reference, calculated
by the class's calculateKey method applied to the
target instance method
deletionMethods -- sequence of callable objects taking
single argument, a reference to this object which
will be called when *either* the target object or
target function is garbage collected (i.e. when
this object becomes invalid). These are specified
as the onDelete parameters of safeRef calls.
weakSelf -- weak reference to the target object
weakFunc -- weak reference to the target function
Class Attributes:
_allInstances -- class attribute pointing to all live
BoundMethodWeakref objects indexed by the class's
calculateKey(target) method applied to the target
objects. This weak value dictionary is used to
short-circuit creation so that multiple references
to the same (object, function) pair produce the
same BoundMethodWeakref instance.
"""
_allInstances = weakref.WeakValueDictionary()
def __new__(cls, target, onDelete=None, *arguments, **named):
"""Create new instance or return current instance
Basically this method of construction allows us to
short-circuit creation of references to already-
referenced instance methods. The key corresponding
to the target is calculated, and if there is already
an existing reference, that is returned, with its
deletionMethods attribute updated. Otherwise the
new instance is created and registered in the table
of already-referenced methods.
"""
key = cls.calculateKey(target)
current = cls._allInstances.get(key)
if current is not None:
current.deletionMethods.append(onDelete)
return current
else:
base = super(BoundMethodWeakref, cls).__new__(cls)
cls._allInstances[key] = base
base.__init__(target, onDelete, *arguments, **named)
return base
def __init__(self, target, onDelete=None):
"""Return a weak-reference-like instance for a bound method
target -- the instance-method target for the weak
reference, must have __self__ and __func__ attributes
and be reconstructable via:
target.__func__.__get__( target.__self__ )
which is true of built-in instance methods.
onDelete -- optional callback which will be called
when this weak reference ceases to be valid
(i.e. either the object or the function is garbage
collected). Should take a single argument,
which will be passed a pointer to this object.
"""
def remove(weak, self=self):
"""Set self.isDead to true when method or instance is destroyed"""
methods = self.deletionMethods[:]
del self.deletionMethods[:]
try:
del self.__class__._allInstances[self.key]
except KeyError:
pass
for function in methods:
try:
if callable(function):
function(self)
except Exception as e:
try:
traceback.print_exc()
except AttributeError:
print(
(
"Exception during saferef %s cleanup function %s: %s"
% (self, function, e)
)
)
self.deletionMethods = [onDelete]
self.key = self.calculateKey(target)
self.weakSelf = weakref.ref(target.__self__, remove)
self.weakFunc = weakref.ref(target.__func__, remove)
self.selfName = str(target.__self__)
self.funcName = str(target.__func__.__name__)
def calculateKey(cls, target):
"""Calculate the reference key for this reference
Currently this is a two-tuple of the id()'s of the
target object and the target function respectively.
"""
return (id(target.__self__), id(target.__func__))
calculateKey = classmethod(calculateKey)
def __str__(self):
"""Give a friendly representation of the object"""
return """%s( %s.%s )""" % (
self.__class__.__name__,
self.selfName,
self.funcName,
)
__repr__ = __str__
def __hash__(self):
return hash(self.key)
def __bool__(self):
"""Whether we are still a valid reference"""
return self() is not None
def __bool__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __eq__(self, other):
"""Compare with another reference"""
if not isinstance(other, self.__class__):
return self.__class__ == type(other)
return self.key == other.key
def __call__(self):
"""Return a strong reference to the bound method
If the target cannot be retrieved, then will
return None, otherwise returns a bound instance
method for our object and function.
Note:
You may call this method any number of times,
as it does not invalidate the reference.
"""
target = self.weakSelf()
if target is not None:
function = self.weakFunc()
if function is not None:
return function.__get__(target)
return None
class BoundNonDescriptorMethodWeakref(BoundMethodWeakref):
"""A specialized BoundMethodWeakref, for platforms where instance methods
are not descriptors.
It assumes that the function name and the target attribute name are the
same, instead of assuming that the function is a descriptor. This approach
is equally fast, but not 100% reliable because functions can be stored on an
attribute named differenty than the function's name such as in:
class A: pass
def foo(self): return "foo"
A.bar = foo
But this shouldn't be a common use case. So, on platforms where methods
aren't descriptors (such as Jython) this implementation has the advantage
of working in the most cases.
"""
def __init__(self, target, onDelete=None):
"""Return a weak-reference-like instance for a bound method
target -- the instance-method target for the weak
reference, must have __self__ and __func__ attributes
and be reconstructable via:
target.__func__.__get__( target.__self__ )
which is true of built-in instance methods.
onDelete -- optional callback which will be called
when this weak reference ceases to be valid
(i.e. either the object or the function is garbage
collected). Should take a single argument,
which will be passed a pointer to this object.
"""
assert (
getattr(target.__self__, target.__name__) == target
), "method %s isn't available as the attribute %s of %s" % (
target,
target.__name__,
target.__self__,
)
super(BoundNonDescriptorMethodWeakref, self).__init__(target, onDelete)
def __call__(self):
"""Return a strong reference to the bound method
If the target cannot be retrieved, then will
return None, otherwise returns a bound instance
method for our object and function.
Note:
You may call this method any number of times,
as it does not invalidate the reference.
"""
target = self.weakSelf()
if target is not None:
function = self.weakFunc()
if function is not None:
# Using partial() would be another option, but it erases the
# "signature" of the function. That is, after a function is
# curried, the inspect module can't be used to determine how
# many arguments the function expects, nor what keyword
# arguments it supports, and pydispatcher needs this
# information.
return getattr(target, function.__name__)
return None
def get_bound_method_weakref(target, onDelete):
"""Instantiates the appropiate BoundMethodWeakRef, depending on the details of
the underlying class method implementation"""
if hasattr(target, "__get__"):
# target method is a descriptor, so the default implementation works:
return BoundMethodWeakref(target=target, onDelete=onDelete)
else:
# no luck, use the alternative implementation:
return BoundNonDescriptorMethodWeakref(target=target, onDelete=onDelete)
<|endoftext|> |
<|endoftext|>"""Default tags used by the template system, available to all templates."""
import sys
import re
from datetime import datetime
from itertools import groupby, cycle as itertools_cycle
from django.conf import settings
from django.template.base import (
Node,
NodeList,
Template,
Context,
Library,
TemplateSyntaxError,
VariableDoesNotExist,
InvalidTemplateLibrary,
BLOCK_TAG_START,
BLOCK_TAG_END,
VARIABLE_TAG_START,
VARIABLE_TAG_END,
SINGLE_BRACE_START,
SINGLE_BRACE_END,
COMMENT_TAG_START,
COMMENT_TAG_END,
VARIABLE_ATTRIBUTE_SEPARATOR,
get_library,
token_kwargs,
kwarg_re,
)
from django.template.smartif import IfParser, Literal
from django.template.defaultfilters import date
from django.utils.encoding import smart_text
from django.utils.safestring import mark_safe
from django.utils.html import format_html
from django.utils import six
from django.utils import timezone
register = Library()
class AutoEscapeControlNode(Node):
"""Implements the actions of the autoescape tag."""
def __init__(self, setting, nodelist):
self.setting, self.nodelist = setting, nodelist
def render(self, context):
old_setting = context.autoescape
context.autoescape = self.setting
output = self.nodelist.render(context)
context.autoescape = old_setting
if self.setting:
return mark_safe(output)
else:
return output
class CommentNode(Node):
def render(self, context):
return ""
class CsrfTokenNode(Node):
def render(self, context):
csrf_token = context.get("csrf_token", None)
if csrf_token:
if csrf_token == "NOTPROVIDED":
return format_html("")
else:
return format_html(
"<input type='hidden' name='csrfmiddlewaretoken' value='{0}' />",
csrf_token,
)
else:
# It's very probable that the token is missing because of
# misconfiguration, so we raise a warning
from django.conf import settings
if settings.DEBUG:
import warnings
warnings.warn(
"A {% csrf_token %} was used in a template, but the context did not provide the value. This is usually caused by not using RequestContext."
)
return ""
class CycleNode(Node):
def __init__(self, cyclevars, variable_name=None, silent=False):
self.cyclevars = cyclevars
self.variable_name = variable_name
self.silent = silent
def render(self, context):
if self not in context.render_context:
# First time the node is rendered in template
context.render_context[self] = itertools_cycle(self.cyclevars)
cycle_iter = context.render_context[self]
value = next(cycle_iter).resolve(context)
if self.variable_name:
context[self.variable_name] = value
if self.silent:
return ""
return value
class DebugNode(Node):
def render(self, context):
from pprint import pformat
output = [pformat(val) for val in context]
output.append("\n\n")
output.append(pformat(sys.modules))
return "".join(output)
class FilterNode(Node):
def __init__(self, filter_expr, nodelist):
self.filter_expr, self.nodelist = filter_expr, nodelist
def render(self, context):
output = self.nodelist.render(context)
# Apply filters.
context.update({"var": output})
filtered = self.filter_expr.resolve(context)
context.pop()
return filtered
class FirstOfNode(Node):
def __init__(self, vars):
self.vars = vars
def render(self, context):
for var in self.vars:
value = var.resolve(context, True)
if value:
return smart_text(value)
return ""
class ForNode(Node):
child_nodelists = ("nodelist_loop", "nodelist_empty")
def __init__(
self, loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty=None
):
self.loopvars, self.sequence = loopvars, sequence
self.is_reversed = is_reversed
self.nodelist_loop = nodelist_loop
if nodelist_empty is None:
self.nodelist_empty = NodeList()
else:
self.nodelist_empty = nodelist_empty
def __repr__(self):
reversed_text = self.is_reversed and " reversed" or ""
return "<For Node: for %s in %s, tail_len: %d%s>" % (
", ".join(self.loopvars),
self.sequence,
len(self.nodelist_loop),
reversed_text,
)
def __iter__(self):
for node in self.nodelist_loop:
yield node
for node in self.nodelist_empty:
yield node
def render(self, context):
if "forloop" in context:
parentloop = context["forloop"]
else:
parentloop = {}
context.push()
try:
values = self.sequence.resolve(context, True)
except VariableDoesNotExist:
values = []
if values is None:
values = []
if not hasattr(values, "__len__"):
values = list(values)
len_values = len(values)
if len_values < 1:
context.pop()
return self.nodelist_empty.render(context)
nodelist = NodeList()
if self.is_reversed:
values = reversed(values)
unpack = len(self.loopvars) > 1
# Create a forloop value in the context. We'll update counters on each
# iteration just below.
loop_dict = context["forloop"] = {"parentloop": parentloop}
for i, item in enumerate(values):
# Shortcuts for current loop iteration number.
loop_dict["counter0"] = i
loop_dict["counter"] = i + 1
# Reverse counter iteration numbers.
loop_dict["revcounter"] = len_values - i
loop_dict["revcounter0"] = len_values - i - 1
# Boolean values designating first and last times through loop.
loop_dict["first"] = i == 0
loop_dict["last"] = i == len_values - 1
pop_context = False
if unpack:
# If there are multiple loop variables, unpack the item into
# them.
try:
unpacked_vars = dict(list(zip(self.loopvars, item)))
except TypeError:
pass
else:
pop_context = True
context.update(unpacked_vars)
else:
context[self.loopvars[0]] = item
# In TEMPLATE_DEBUG mode provide source of the node which
# actually raised the exception
if settings.TEMPLATE_DEBUG:
for node in self.nodelist_loop:
try:
nodelist.append(node.render(context))
except Exception as e:
if not hasattr(e, "django_template_source"):
e.django_template_source = node.source
raise
else:
for node in self.nodelist_loop:
nodelist.append(node.render(context))
if pop_context:
# The loop variables were pushed on to the context so pop them
# off again. This is necessary because the tag lets the length
# of loopvars differ to the length of each set of items and we
# don't want to leave any vars from the previous loop on the
# context.
context.pop()
context.pop()
return nodelist.render(context)
class IfChangedNode(Node):
child_nodelists = ("nodelist_true", "nodelist_false")
def __init__(self, nodelist_true, nodelist_false, *varlist):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self._last_seen = None
self._varlist = varlist
self._id = str(id(self))
def render(self, context):
if "forloop" in context and self._id not in context["forloop"]:
self._last_seen = None
context["forloop"][self._id] = 1
try:
if self._varlist:
# Consider multiple parameters. This automatically behaves
# like an OR evaluation of the multiple variables.
compare_to = [var.resolve(context, True) for var in self._varlist]
else:
compare_to = self.nodelist_true.render(context)
except VariableDoesNotExist:
compare_to = None
if compare_to != self._last_seen:
self._last_seen = compare_to
content = self.nodelist_true.render(context)
return content
elif self.nodelist_false:
return self.nodelist_false.render(context)
return ""
class IfEqualNode(Node):
child_nodelists = ("nodelist_true", "nodelist_false")
def __init__(self, var1, var2, nodelist_true, nodelist_false, negate):
self.var1, self.var2 = var1, var2
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.negate = negate
def __repr__(self):
return "<IfEqualNode>"
def render(self, context):
val1 = self.var1.resolve(context, True)
val2 = self.var2.resolve(context, True)
if (self.negate and val1 != val2) or (not self.negate and val1 == val2):
return self.nodelist_true.render(context)
return self.nodelist_false.render(context)
class IfNode(Node):
def __init__(self, conditions_nodelists):
self.conditions_nodelists = conditions_nodelists
def __repr__(self):
return "<IfNode>"
def __iter__(self):
for _, nodelist in self.conditions_nodelists:
for node in nodelist:
yield node
@property
def nodelist(self):
return NodeList(
node for _, nodelist in self.conditions_nodelists for node in nodelist
)
def render(self, context):
for condition, nodelist in self.conditions_nodelists:
if condition is not None: # if / elif clause
try:
match = condition.eval(context)
except VariableDoesNotExist:
match = None
else: # else clause
match = True
if match:
return nodelist.render(context)
return ""
class RegroupNode(Node):
def __init__(self, target, expression, var_name):
self.target, self.expression = target, expression
self.var_name = var_name
def resolve_expression(self, obj, context):
# This method is called for each object in self.target. See regroup()
# for the reason why we temporarily put the object in the context.
context[self.var_name] = obj
return self.expression.resolve(context, True)
def render(self, context):
obj_list = self.target.resolve(context, True)
if obj_list == None:
# target variable wasn't found in context; fail silently.
context[self.var_name] = []
return ""
# List of dictionaries in the format:
# {'grouper': 'key', 'list': [list of contents]}.
context[self.var_name] = [
{"grouper": key, "list": list(val)}
for key, val in groupby(
obj_list, lambda obj: self.resolve_expression(obj, context)
)
]
return ""
def include_is_allowed(filepath):
for root in settings.ALLOWED_INCLUDE_ROOTS:
if filepath.startswith(root):
return True
return False
class SsiNode(Node):
def __init__(self, filepath, parsed):
self.filepath = filepath
self.parsed = parsed
def render(self, context):
filepath = self.filepath.resolve(context)
if not include_is_allowed(filepath):
if settings.DEBUG:
return "[Didn't have permission to include file]"
else:
return "" # Fail silently for invalid includes.
try:
with open(filepath, "r") as fp:
output = fp.read()
except IOError:
output = ""
if self.parsed:
try:
t = Template(output, name=filepath)
return t.render(context)
except TemplateSyntaxError as e:
if settings.DEBUG:
return "[Included template had syntax error: %s]" % e
else:
return "" # Fail silently for invalid included templates.
return output
class LoadNode(Node):
def render(self, context):
return ""
class NowNode(Node):
def __init__(self, format_string):
self.format_string = format_string
def render(self, context):
tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None
return date(datetime.now(tz=tzinfo), self.format_string)
class SpacelessNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
from django.utils.html import strip_spaces_between_tags
return strip_spaces_between_tags(self.nodelist.render(context).strip())
class TemplateTagNode(Node):
mapping = {
"openblock": BLOCK_TAG_START,
"closeblock": BLOCK_TAG_END,
"openvariable": VARIABLE_TAG_START,
"closevariable": VARIABLE_TAG_END,
"openbrace": SINGLE_BRACE_START,
"closebrace": SINGLE_BRACE_END,
"opencomment": COMMENT_TAG_START,
"closecomment": COMMENT_TAG_END,
}
def __init__(self, tagtype):
self.tagtype = tagtype
def render(self, context):
return self.mapping.get(self.tagtype, "")
class URLNode(Node):
def __init__(self, view_name, args, kwargs, asvar):
self.view_name = view_name
self.args = args
self.kwargs = kwargs
self.asvar = asvar
def render(self, context):
from django.core.urlresolvers import reverse, NoReverseMatch
args = [arg.resolve(context) for arg in self.args]
kwargs = dict(
[
(smart_text(k, "ascii"), v.resolve(context))
for k, v in list(self.kwargs.items())
]
)
view_name = self.view_name.resolve(context)
if not view_name:
raise NoReverseMatch(
"'url' requires a non-empty first argument. "
"The syntax changed in Django 1.5, see the docs."
)
# Try to look up the URL twice: once given the view name, and again
# relative to what we guess is the "main" app. If they both fail,
# re-raise the NoReverseMatch unless we're using the
# {% url ... as var %} construct in which case return nothing.
url = ""
try:
url = reverse(
view_name, args=args, kwargs=kwargs, current_app=context.current_app
)
except NoReverseMatch as e:
if settings.SETTINGS_MODULE:
project_name = settings.SETTINGS_MODULE.split(".")[0]
try:
url = reverse(
project_name + "." + view_name,
args=args,
kwargs=kwargs,
current_app=context.current_app,
)
except NoReverseMatch:
if self.asvar is None:
# Re-raise the original exception, not the one with
# the path relative to the project. This makes a
# better error message.
raise e
else:
if self.asvar is None:
raise e
if self.asvar:
context[self.asvar] = url
return ""
else:
return url
class VerbatimNode(Node):
def __init__(self, content):
self.content = content
def render(self, context):
return self.content
class WidthRatioNode(Node):
def __init__(self, val_expr, max_expr, max_width):
self.val_expr = val_expr
self.max_expr = max_expr
self.max_width = max_width
def render(self, context):
try:
value = self.val_expr.resolve(context)
max_value = self.max_expr.resolve(context)
max_width = int(self.max_width.resolve(context))
except VariableDoesNotExist:
return ""
except (ValueError, TypeError):
raise TemplateSyntaxError("widthratio final argument must be an number")
try:
value = float(value)
max_value = float(max_value)
ratio = (value / max_value) * max_width
except ZeroDivisionError:
return "0"
except (ValueError, TypeError):
return ""
return str(int(round(ratio)))
class WithNode(Node):
def __init__(self, var, name, nodelist, extra_context=None):
self.nodelist = nodelist
# var and name are legacy attributes, being left in case they are used
# by third-party subclasses of this Node.
self.extra_context = extra_context or {}
if name:
self.extra_context[name] = var
def __repr__(self):
return "<WithNode>"
def render(self, context):
values = dict(
[
(key, val.resolve(context))
for key, val in six.iteritems(self.extra_context)
]
)
context.update(values)
output = self.nodelist.render(context)
context.pop()
return output
@register.tag
def autoescape(parser, token):
"""
Force autoescape behavior for this block.
"""
args = token.contents.split()
if len(args) != 2:
raise TemplateSyntaxError("'autoescape' tag requires exactly one argument.")
arg = args[1]
if arg not in ("on", "off"):
raise TemplateSyntaxError("'autoescape' argument should be 'on' or 'off'")
nodelist = parser.parse(("endautoescape",))
parser.delete_first_token()
return AutoEscapeControlNode((arg == "on"), nodelist)
@register.tag
def comment(parser, token):
"""
Ignores everything between ``{% comment %}`` and ``{% endcomment %}``.
"""
parser.skip_past("endcomment")
return CommentNode()
@register.tag
def cycle(parser, token):
"""
Cycles among the given strings each time this tag is encountered.
Within a loop, cycles among the given strings each time through
the loop::
{% for o in some_list %}
<tr class="{% cycle 'row1' 'row2' %}">
...
</tr>
{% endfor %}
Outside of a loop, give the values a unique name the first time you call
it, then use that name each sucessive time through::
<tr class="{% cycle 'row1' 'row2' 'row3' as rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
<tr class="{% cycle rowcolors %}">...</tr>
You can use any number of values, separated by spaces. Commas can also
be used to separate values; if a comma is used, the cycle values are
interpreted as literal strings.
The optional flag "silent" can be used to prevent the cycle declaration
from returning any value::
{% for o in some_list %}
{% cycle 'row1' 'row2' as rowcolors silent %}
<tr class="{{ rowcolors }}">{% include "subtemplate.html " %}</tr>
{% endfor %}
"""
# Note: This returns the exact same node on each {% cycle name %} call;
# that is, the node object returned from {% cycle a b c as name %} and the
# one returned from {% cycle name %} are the exact same object. This
# shouldn't cause problems (heh), but if it does, now you know.
#
# Ugly hack warning: This stuffs the named template dict into parser so
# that names are only unique within each template (as opposed to using
# a global variable, which would make cycle names have to be unique across
# *all* templates.
args = token.split_contents()
if len(args) < 2:
raise TemplateSyntaxError("'cycle' tag requires at least two arguments")
if "," in args[1]:
# Backwards compatibility: {% cycle a,b %} or {% cycle a,b as foo %}
# case.
args[1:2] = ['"%s"' % arg for arg in args[1].split(",")]
if len(args) == 2:
# {% cycle foo %} case.
name = args[1]
if not hasattr(parser, "_namedCycleNodes"):
raise TemplateSyntaxError(
"No named cycles in template. '%s' is not defined" % name
)
if not name in parser._namedCycleNodes:
raise TemplateSyntaxError("Named cycle '%s' does not exist" % name)
return parser._namedCycleNodes[name]
as_form = False
if len(args) > 4:
# {% cycle ... as foo [silent] %} case.
if args[-3] == "as":
if args[-1] != "silent":
raise TemplateSyntaxError(
"Only 'silent' flag is allowed after cycle's name, not '%s'."
% args[-1]
)
as_form = True
silent = True
args = args[:-1]
elif args[-2] == "as":
as_form = True
silent = False
if as_form:
name = args[-1]
values = [parser.compile_filter(arg) for arg in args[1:-2]]
node = CycleNode(values, name, silent=silent)
if not hasattr(parser, "_namedCycleNodes"):
parser._namedCycleNodes = {}
parser._namedCycleNodes[name] = node
else:
values = [parser.compile_filter(arg) for arg in args[1:]]
node = CycleNode(values)
return node
@register.tag
def csrf_token(parser, token):
return CsrfTokenNode()
@register.tag
def debug(parser, token):
"""
Outputs a whole load of debugging information, including the current
context and imported modules.
Sample usage::
<pre>
{% debug %}
</pre>
"""
return DebugNode()
@register.tag("filter")
def do_filter(parser, token):
"""
Filters the contents of the block through variable filters.
Filters can also be piped through each other, and they can have
arguments -- just like in variable syntax.
Sample usage::
{% filter force_escape|lower %}
This text will be HTML-escaped, and will appear in lowercase.
{% endfilter %}
Note that the ``escape`` and ``safe`` filters are not acceptable arguments.
Instead, use the ``autoescape`` tag to manage autoescaping for blocks of
template code.
"""
_, rest = token.contents.split(None, 1)
filter_expr = parser.compile_filter("var|%s" % (rest))
for func, unused in filter_expr.filters:
if getattr(func, "_decorated_function", func).__name__ in ("escape", "safe"):
raise TemplateSyntaxError(
'"filter %s" is not permitted. Use the "autoescape" tag instead.'
% func.__name__
)
nodelist = parser.parse(("endfilter",))
parser.delete_first_token()
return FilterNode(filter_expr, nodelist)
@register.tag
def firstof(parser, token):
"""
Outputs the first variable passed that is not False, without escaping.
Outputs nothing if all the passed variables are False.
Sample usage::
{% firstof var1 var2 var3 %}
This is equivalent to::
{% if var1 %}
{{ var1|safe }}
{% else %}{% if var2 %}
{{ var2|safe }}
{% else %}{% if var3 %}
{{ var3|safe }}
{% endif %}{% endif %}{% endif %}
but obviously much cleaner!
You can also use a literal string as a fallback value in case all
passed variables are False::
{% firstof var1 var2 var3 "fallback value" %}
If you want to escape the output, use a filter tag::
{% filter force_escape %}
{% firstof var1 var2 var3 "fallback value" %}
{% endfilter %}
"""
bits = token.split_contents()[1:]
if len(bits) < 1:
raise TemplateSyntaxError("'firstof' statement requires at least one argument")
return FirstOfNode([parser.compile_filter(bit) for bit in bits])
@register.tag("for")
def do_for(parser, token):
"""
Loops over each item in an array.
For example, to display a list of athletes given ``athlete_list``::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
</ul>
You can loop over a list in reverse by using
``{% for obj in list reversed %}``.
You can also unpack multiple values from a two-dimensional array::
{% for key,value in dict.items %}
{{ key }}: {{ value }}
{% endfor %}
The ``for`` tag can take an optional ``{% empty %}`` clause that will
be displayed if the given array is empty or could not be found::
<ul>
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% empty %}
<li>Sorry, no athletes in this list.</li>
{% endfor %}
<ul>
The above is equivalent to -- but shorter, cleaner, and possibly faster
than -- the following::
<ul>
{% if althete_list %}
{% for athlete in athlete_list %}
<li>{{ athlete.name }}</li>
{% endfor %}
{% else %}
<li>Sorry, no athletes in this list.</li>
{% endif %}
</ul>
The for loop sets a number of variables available within the loop:
========================== ================================================
Variable Description
========================== ================================================
``forloop.counter`` The current iteration of the loop (1-indexed)
``forloop.counter0`` The current iteration of the loop (0-indexed)
``forloop.revcounter`` The number of iterations from the end of the
loop (1-indexed)
``forloop.revcounter0`` The number of iterations from the end of the
loop (0-indexed)
``forloop.first`` True if this is the first time through the loop
``forloop.last`` True if this is the last time through the loop
``forloop.parentloop`` For nested loops, this is the loop "above" the
current one
========================== ================================================
"""
bits = token.contents.split()
if len(bits) < 4:
raise TemplateSyntaxError(
"'for' statements should have at least four" " words: %s" % token.contents
)
is_reversed = bits[-1] == "reversed"
in_index = is_reversed and -3 or -2
if bits[in_index] != "in":
raise TemplateSyntaxError(
"'for' statements should use the format"
" 'for x in y': %s" % token.contents
)
loopvars = re.split(r" *, *", " ".join(bits[1:in_index]))
for var in loopvars:
if not var or " " in var:
raise TemplateSyntaxError(
"'for' tag received an invalid argument:" " %s" % token.contents
)
sequence = parser.compile_filter(bits[in_index + 1])
nodelist_loop = parser.parse(
(
"empty",
"endfor",
)
)
token = parser.next_token()
if token.contents == "empty":
nodelist_empty = parser.parse(("endfor",))
parser.delete_first_token()
else:
nodelist_empty = None
return ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty)
def do_ifequal(parser, token, negate):
bits = list(token.split_contents())
if len(bits) != 3:
raise TemplateSyntaxError("%r takes two arguments" % bits[0])
end_tag = "end" + bits[0]
nodelist_true = parser.parse(("else", end_tag))
token = parser.next_token()
if token.contents == "else":
nodelist_false = parser.parse((end_tag,))
parser.delete_first_token()
else:
nodelist_false = NodeList()
val1 = parser.compile_filter(bits[1])
val2 = parser.compile_filter(bits[2])
return IfEqualNode(val1, val2, nodelist_true, nodelist_false, negate)
@register.tag
def ifequal(parser, token):
"""
Outputs the contents of the block if the two arguments equal each other.
Examples::
{% ifequal user.id comment.user_id %}
...
{% endifequal %}
{% ifnotequal user.id comment.user_id %}
...
{% else %}
...
{% endifnotequal %}
"""
return do_ifequal(parser, token, False)
@register.tag
def ifnotequal(parser, token):
"""
Outputs the contents of the block if the two arguments are not equal.
See ifequal.
"""
return do_ifequal(parser, token, True)
class TemplateLiteral(Literal):
def __init__(self, value, text):
self.value = value
self.text = text # for better error messages
def display(self):
return self.text
def eval(self, context):
return self.value.resolve(context, ignore_failures=True)
class TemplateIfParser(IfParser):
error_class = TemplateSyntaxError
def __init__(self, parser, *args, **kwargs):
self.template_parser = parser
super(TemplateIfParser, self).__init__(*args, **kwargs)
def create_var(self, value):
return TemplateLiteral(self.template_parser.compile_filter(value), value)
@register.tag("if")
def do_if(parser, token):
"""
The ``{% if %}`` tag evaluates a variable, and if that variable is "true"
(i.e., exists, is not empty, and is not a false boolean value), the
contents of the block are output:
::
{% if athlete_list %}
Number of athletes: {{ athlete_list|count }}
{% elif athlete_in_locker_room_list %}
Athletes should be out of the locker room soon!
{% else %}
No athletes.
{% endif %}
In the above, if ``athlete_list`` is not empty, the number of athletes will
be displayed by the ``{{ athlete_list|count }}`` variable.
As you can see, the ``if`` tag may take one or several `` {% elif %}``
clauses, as well as an ``{% else %}`` clause that will be displayed if all
previous conditions fail. These clauses are optional.
``if`` tags may use ``or``, ``and`` or ``not`` to test a number of
variables or to negate a given variable::
{% if not athlete_list %}
There are no athletes.
{% endif %}
{% if athlete_list or coach_list %}
There are some athletes or some coaches.
{% endif %}
{% if athlete_list and coach_list %}
Both atheletes and coaches are available.
{% endif %}
{% if not athlete_list or coach_list %}
There are no athletes, or there are some coaches.
{% endif %}
{% if athlete_list and not coach_list %}
There are some athletes and absolutely no coaches.
{% endif %}
Comparison operators are also available, and the use of filters is also
allowed, for example::
{% if articles|length >= 5 %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid if tag.
All supported operators are: ``or``, ``and``, ``in``, ``not in``
``==`` (or ``=``), ``!=``, ``>``, ``>=``, ``<`` and ``<=``.
Operator precedence follows Python.
"""
# {% if ... %}
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(("elif", "else", "endif"))
conditions_nodelists = [(condition, nodelist)]
token = parser.next_token()
# {% elif ... %} (repeatable)
while token.contents.startswith("elif"):
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse(("elif", "else", "endif"))
conditions_nodelists.append((condition, nodelist))
token = parser.next_token()
# {% else %} (optional)
if token.contents == "else":
nodelist = parser.parse(("endif",))
conditions_nodelists.append((None, nodelist))
token = parser.next_token()
# {% endif %}
assert token.contents == "endif"
return IfNode(conditions_nodelists)
@register.tag
def ifchanged(parser, token):
"""
Checks if a value has changed from the last iteration of a loop.
The ``{% ifchanged %}`` block tag is used within a loop. It has two
possible uses.
1. Checks its own rendered contents against its previous state and only
displays the content if it has changed. For example, this displays a
list of days, only displaying the month if it changes::
<h1>Archive for {{ year }}</h1>
{% for date in days %}
{% ifchanged %}<h3>{{ date|date:"F" }}</h3>{% endifchanged %}
<a href="{{ date|date:"M/d"|lower }}/">{{ date|date:"j" }}</a>
{% endfor %}
2. If given one or more variables, check whether any variable has changed.
For example, the following shows the date every time it changes, while
showing the hour if either the hour or the date has changed::
{% for date in days %}
{% ifchanged date.date %} {{ date.date }} {% endifchanged %}
{% ifchanged date.hour date.date %}
{{ date.hour }}
{% endifchanged %}
{% endfor %}
"""
bits = token.contents.split()
nodelist_true = parser.parse(("else", "endifchanged"))
token = parser.next_token()
if token.contents == "else":
nodelist_false = parser.parse(("endifchanged",))
parser.delete_first_token()
else:
nodelist_false = NodeList()
values = [parser.compile_filter(bit) for bit in bits[1:]]
return IfChangedNode(nodelist_true, nodelist_false, *values)
@register.tag
def ssi(parser, token):
"""
Outputs the contents of a given file into the page.
Like a simple "include" tag, the ``ssi`` tag includes the contents
of another file -- which must be specified using an absolute path --
in the current page::
{% ssi "/home/html/ljworld.com/includes/right_generic.html" %}
If the optional "parsed" parameter is given, the contents of the included
file are evaluated as template code, with the current context::
{% ssi "/home/html/ljworld.com/includes/right_generic.html" parsed %}
"""
bits = token.split_contents()
parsed = False
if len(bits) not in (2, 3):
raise TemplateSyntaxError(
"'ssi' tag takes one argument: the path to" " the file to be included"
)
if len(bits) == 3:
if bits[2] == "parsed":
parsed = True
else:
raise TemplateSyntaxError(
"Second (optional) argument to %s tag" " must be 'parsed'" % bits[0]
)
filepath = parser.compile_filter(bits[1])
return SsiNode(filepath, parsed)
@register.tag
def load(parser, token):
"""
Loads a custom template tag set.
For example, to load the template tags in
``django/templatetags/news/photos.py``::
{% load news.photos %}
Can also be used to load an individual tag/filter from
a library::
{% load byline from news %}
"""
bits = token.contents.split()
if len(bits) >= 4 and bits[-2] == "from":
try:
taglib = bits[-1]
lib = get_library(taglib)
except InvalidTemplateLibrary as e:
raise TemplateSyntaxError(
"'%s' is not a valid tag library: %s" % (taglib, e)
)
else:
temp_lib = Library()
for name in bits[1:-2]:
if name in lib.tags:
temp_lib.tags[name] = lib.tags[name]
# a name could be a tag *and* a filter, so check for both
if name in lib.filters:
temp_lib.filters[name] = lib.filters[name]
elif name in lib.filters:
temp_lib.filters[name] = lib.filters[name]
else:
raise TemplateSyntaxError(
"'%s' is not a valid tag or filter in tag library '%s'"
% (name, taglib)
)
parser.add_library(temp_lib)
else:
for taglib in bits[1:]:
# add the library to the parser
try:
lib = get_library(taglib)
parser.add_library(lib)
except InvalidTemplateLibrary as e:
raise TemplateSyntaxError(
"'%s' is not a valid tag library: %s" % (taglib, e)
)
return LoadNode()
@register.tag
def now(parser, token):
"""
Displays the date, formatted according to the given string.
Uses the same format as PHP's ``date()`` function; see http://php.net/date
for all the possible values.
Sample usage::
It is {% now "jS F Y H:i" %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'now' statement takes one argument")
format_string = bits[1][1:-1]
return NowNode(format_string)
@register.tag
def regroup(parser, token):
"""
Regroups a list of alike objects by a common attribute.
This complex tag is best illustrated by use of an example: say that
``people`` is a list of ``Person`` objects that have ``first_name``,
``last_name``, and ``gender`` attributes, and you'd like to display a list
that looks like:
* Male:
* George Bush
* Bill Clinton
* Female:
* Margaret Thatcher
* Colendeeza Rice
* Unknown:
* Pat Smith
The following snippet of template code would accomplish this dubious task::
{% regroup people by gender as grouped %}
<ul>
{% for group in grouped %}
<li>{{ group.grouper }}
<ul>
{% for item in group.list %}
<li>{{ item }}</li>
{% endfor %}
</ul>
{% endfor %}
</ul>
As you can see, ``{% regroup %}`` populates a variable with a list of
objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the
item that was grouped by; ``list`` contains the list of objects that share
that ``grouper``. In this case, ``grouper`` would be ``Male``, ``Female``
and ``Unknown``, and ``list`` is the list of people with those genders.
Note that ``{% regroup %}`` does not work when the list to be grouped is not
sorted by the key you are grouping by! This means that if your list of
people was not sorted by gender, you'd need to make sure it is sorted
before using it, i.e.::
{% regroup people|dictsort:"gender" by gender as grouped %}
"""
firstbits = token.contents.split(None, 3)
if len(firstbits) != 4:
raise TemplateSyntaxError("'regroup' tag takes five arguments")
target = parser.compile_filter(firstbits[1])
if firstbits[2] != "by":
raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'")
lastbits_reversed = firstbits[3][::-1].split(None, 2)
if lastbits_reversed[1][::-1] != "as":
raise TemplateSyntaxError(
"next-to-last argument to 'regroup' tag must" " be 'as'"
)
var_name = lastbits_reversed[0][::-1]
# RegroupNode will take each item in 'target', put it in the context under
# 'var_name', evaluate 'var_name'.'expression' in the current context, and
# group by the resulting value. After all items are processed, it will
# save the final result in the context under 'var_name', thus clearing the
# temporary values. This hack is necessary because the template engine
# doesn't provide a context-aware equivalent of Python's getattr.
expression = parser.compile_filter(
var_name + VARIABLE_ATTRIBUTE_SEPARATOR + lastbits_reversed[2][::-1]
)
return RegroupNode(target, expression, var_name)
@register.tag
def spaceless(parser, token):
"""
Removes whitespace between HTML tags, including tab and newline characters.
Example usage::
{% spaceless %}
<p>
<a href="foo/">Foo</a>
</p>
{% endspaceless %}
This example would return this HTML::
<p><a href="foo/">Foo</a></p>
Only space between *tags* is normalized -- not space between tags and text.
In this example, the space around ``Hello`` won't be stripped::
{% spaceless %}
<strong>
Hello
</strong>
{% endspaceless %}
"""
nodelist = parser.parse(("endspaceless",))
parser.delete_first_token()
return SpacelessNode(nodelist)
@register.tag
def templatetag(parser, token):
"""
Outputs one of the bits used to compose template tags.
Since the template system has no concept of "escaping", to display one of
the bits used in template tags, you must use the ``{% templatetag %}`` tag.
The argument tells which template bit to output:
================== =======
Argument Outputs
================== =======
``openblock`` ``{%``
``closeblock`` ``%}``
``openvariable`` ``{{``
``closevariable`` ``}}``
``openbrace`` ``{``
``closebrace`` ``}``
``opencomment`` ``{#``
``closecomment`` ``#}``
================== =======
"""
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'templatetag' statement takes one argument")
tag = bits[1]
if tag not in TemplateTagNode.mapping:
raise TemplateSyntaxError(
"Invalid templatetag argument: '%s'."
" Must be one of: %s" % (tag, list(TemplateTagNode.mapping))
)
return TemplateTagNode(tag)
@register.tag
def url(parser, token):
"""
Returns an absolute URL matching given view with its parameters.
This is a way to define links that aren't tied to a particular URL
configuration::
{% url "path.to.some_view" arg1 arg2 %}
or
{% url "path.to.some_view" name1=value1 name2=value2 %}
The first argument is a path to a view. It can be an absolute Python path
or just ``app_name.view_name`` without the project name if the view is
located inside the project.
Other arguments are space-separated values that will be filled in place of
positional and keyword arguments in the URL. Don't mix positional and
keyword arguments.
All arguments for the URL should be present.
For example if you have a view ``app_name.client`` taking client's id and
the corresponding line in a URLconf looks like this::
('^client/(\d+)/$', 'app_name.client')
and this app's URLconf is included into the project's URLconf under some
path::
('^clients/', include('project_name.app_name.urls'))
then in a template you can create a link for a certain client like this::
{% url "app_name.client" client.id %}
The URL will look like ``/clients/client/123/``.
The first argument can also be a named URL instead of the Python path to
the view callable. For example if the URLconf entry looks like this::
url('^client/(\d+)/$', name='client-detail-view')
then in the template you can use::
{% url "client-detail-view" client.id %}
There is even another possible value type for the first argument. It can be
the name of a template variable that will be evaluated to obtain the view
name or the URL name, e.g.::
{% with view_path="app_name.client" %}
{% url view_path client.id %}
{% endwith %}
or,
{% with url_name="client-detail-view" %}
{% url url_name client.id %}
{% endwith %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError(
"'%s' takes at least one argument" " (path to a view)" % bits[0]
)
try:
viewname = parser.compile_filter(bits[1])
except TemplateSyntaxError as exc:
exc.args = (
(
exc.args[0] + ". "
"The syntax of 'url' changed in Django 1.5, see the docs."
),
)
raise
args = []
kwargs = {}
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == "as":
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return URLNode(viewname, args, kwargs, asvar)
@register.tag
def verbatim(parser, token):
"""
Stops the template engine from rendering the contents of this block tag.
Usage::
{% verbatim %}
{% don't process this %}
{% endverbatim %}
You can also designate a specific closing tag block (allowing the
unrendered use of ``{% endverbatim %}``)::
{% verbatim myblock %}
...
{% endverbatim myblock %}
"""
nodelist = parser.parse(("endverbatim",))
parser.delete_first_token()
return VerbatimNode(nodelist.render(Context()))
@register.tag
def widthratio(parser, token):
"""
For creating bar charts and such, this tag calculates the ratio of a given
value to a maximum value, and then applies that ratio to a constant.
For example::
<img src='bar.gif' height='10' width='{% widthratio this_value max_value max_width %}' />
If ``this_value`` is 175, ``max_value`` is 200, and ``max_width`` is 100,
the image in the above example will be 88 pixels wide
(because 175/200 = .875; .875 * 100 = 87.5 which is rounded up to 88).
"""
bits = token.contents.split()
if len(bits) != 4:
raise TemplateSyntaxError("widthratio takes three arguments")
tag, this_value_expr, max_value_expr, max_width = bits
return WidthRatioNode(
parser.compile_filter(this_value_expr),
parser.compile_filter(max_value_expr),
parser.compile_filter(max_width),
)
@register.tag("with")
def do_with(parser, token):
"""
Adds one or more values to the context (inside of this block) for caching
and easy access.
For example::
{% with total=person.some_sql_method %}
{{ total }} object{{ total|pluralize }}
{% endwith %}
Multiple values can be added to the context::
{% with foo=1 bar=2 %}
...
{% endwith %}
The legacy format of ``{% with person.some_sql_method as total %}`` is
still accepted.
"""
bits = token.split_contents()
remaining_bits = bits[1:]
extra_context = token_kwargs(remaining_bits, parser, support_legacy=True)
if not extra_context:
raise TemplateSyntaxError(
"%r expected at least one variable " "assignment" % bits[0]
)
if remaining_bits:
raise TemplateSyntaxError(
"%r received an invalid token: %r" % (bits[0], remaining_bits[0])
)
nodelist = parser.parse(("endwith",))
parser.delete_first_token()
return WithNode(None, None, nodelist, extra_context=extra_context)
<|endoftext|> |
<|endoftext|>"""
Fixes Python 2.4's failure to deepcopy unbound functions.
"""
import copy
import types
import warnings
warnings.warn(
"django.utils.copycompat is deprecated; use the native copy module instead",
DeprecationWarning,
)
# Monkeypatch copy's deepcopy registry to handle functions correctly.
if (
hasattr(copy, "_deepcopy_dispatch")
and types.FunctionType not in copy._deepcopy_dispatch
):
copy._deepcopy_dispatch[types.FunctionType] = copy._deepcopy_atomic
# Pose as the copy module now.
del copy, types
from copy import *
<|endoftext|> |
<|endoftext|>import datetime
from django.utils.timezone import is_aware, utc
from django.utils.translation import ungettext, ugettext
def timesince(d, now=None, reversed=False):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
chunks = (
(60 * 60 * 24 * 365, lambda n: ungettext("year", "years", n)),
(60 * 60 * 24 * 30, lambda n: ungettext("month", "months", n)),
(60 * 60 * 24 * 7, lambda n: ungettext("week", "weeks", n)),
(60 * 60 * 24, lambda n: ungettext("day", "days", n)),
(60 * 60, lambda n: ungettext("hour", "hours", n)),
(60, lambda n: ungettext("minute", "minutes", n)),
)
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
now = datetime.datetime.now(utc if is_aware(d) else None)
delta = (d - now) if reversed else (now - d)
# ignore microseconds
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return "0 " + ugettext("minutes")
for i, (seconds, name) in enumerate(chunks):
count = since // seconds
if count != 0:
break
s = ugettext("%(number)d %(type)s") % {"number": count, "type": name(count)}
if i + 1 < len(chunks):
# Now get the second item
seconds2, name2 = chunks[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
s += ugettext(", %(number)d %(type)s") % {
"number": count2,
"type": name2(count2),
}
return s
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
return timesince(d, now, reversed=True)
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
#
# This python file contains utility scripts to manage Django translations.
# It has to be run inside the django git root directory.
#
# The following commands are available:
#
# * update_catalogs: check for new strings in core and contrib catalogs, and
# output how much strings are new/changed.
#
# * lang_stats: output statistics for each catalog/language combination
#
# * fetch: fetch translations from transifex.com
#
# Each command support the --languages and --resources options to limit their
# operation to the specified language or resource. For example, to get stats
# for Spanish in contrib.admin, run:
#
# $ python scripts/manage_translations.py lang_stats --language=es --resources=admin
import os
from optparse import OptionParser
from subprocess import call, Popen, PIPE
from django.core.management import call_command
HAVE_JS = ["admin"]
def _get_locale_dirs(include_core=True):
"""
Return a tuple (contrib name, absolute path) for all locale directories,
optionally including the django core catalog.
"""
contrib_dir = os.path.join(os.getcwd(), "django", "contrib")
dirs = []
for contrib_name in os.listdir(contrib_dir):
path = os.path.join(contrib_dir, contrib_name, "locale")
if os.path.isdir(path):
dirs.append((contrib_name, path))
if contrib_name in HAVE_JS:
dirs.append(("%s-js" % contrib_name, path))
if include_core:
dirs.insert(0, ("core", os.path.join(os.getcwd(), "django", "conf", "locale")))
return dirs
def _tx_resource_for_name(name):
"""Return the Transifex resource name"""
if name == "core":
return "django.core"
else:
return "django.contrib-%s" % name
def _check_diff(cat_name, base_path):
"""
Output the approximate number of changed/added strings in the en catalog.
"""
po_path = "%(path)s/en/LC_MESSAGES/django%(ext)s.po" % {
"path": base_path,
"ext": "js" if cat_name.endswith("-js") else "",
}
p = Popen(
"git diff -U0 %s | egrep -v '^@@|^[-+]#|^..POT-Creation' | wc -l" % po_path,
stdout=PIPE,
stderr=PIPE,
shell=True,
)
output, errors = p.communicate()
num_changes = int(output.strip()) - 4
print(("%d changed/added messages in '%s' catalog." % (num_changes, cat_name)))
def update_catalogs(resources=None, languages=None):
"""
Update the en/LC_MESSAGES/django.po (main and contrib) files with
new/updated translatable strings.
"""
contrib_dirs = _get_locale_dirs(include_core=False)
os.chdir(os.path.join(os.getcwd(), "django"))
print("Updating main en catalog")
call_command("makemessages", locale="en")
_check_diff("core", os.path.join(os.getcwd(), "conf", "locale"))
# Contrib catalogs
for name, dir_ in contrib_dirs:
if resources and not name in resources:
continue
os.chdir(os.path.join(dir_, ".."))
print(("Updating en catalog in %s" % dir_))
if name.endswith("-js"):
call_command("makemessages", locale="en", domain="djangojs")
else:
call_command("makemessages", locale="en")
_check_diff(name, dir_)
def lang_stats(resources=None, languages=None):
"""
Output language statistics of committed translation files for each
Django catalog.
If resources is provided, it should be a list of translation resource to
limit the output (e.g. ['core', 'gis']).
"""
locale_dirs = _get_locale_dirs()
for name, dir_ in locale_dirs:
if resources and not name in resources:
continue
print(("\nShowing translations stats for '%s':" % name))
langs = sorted([d for d in os.listdir(dir_) if not d.startswith("_")])
for lang in langs:
if languages and not lang in languages:
continue
# TODO: merge first with the latest en catalog
p = Popen(
"msgfmt -vc -o /dev/null %(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po"
% {
"path": dir_,
"lang": lang,
"ext": "js" if name.endswith("-js") else "",
},
stdout=PIPE,
stderr=PIPE,
shell=True,
)
output, errors = p.communicate()
if p.returncode == 0:
# msgfmt output stats on stderr
print(("%s: %s" % (lang, errors.strip())))
def fetch(resources=None, languages=None):
"""
Fetch translations from Transifex, wrap long lines, generate mo files.
"""
locale_dirs = _get_locale_dirs()
for name, dir_ in locale_dirs:
if resources and not name in resources:
continue
# Transifex pull
if languages is None:
call(
"tx pull -r %(res)s -a -f" % {"res": _tx_resource_for_name(name)},
shell=True,
)
languages = sorted([d for d in os.listdir(dir_) if not d.startswith("_")])
else:
for lang in languages:
call(
"tx pull -r %(res)s -f -l %(lang)s"
% {"res": _tx_resource_for_name(name), "lang": lang},
shell=True,
)
# msgcat to wrap lines and msgfmt for compilation of .mo file
for lang in languages:
po_path = "%(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po" % {
"path": dir_,
"lang": lang,
"ext": "js" if name.endswith("-js") else "",
}
call("msgcat -o %s %s" % (po_path, po_path), shell=True)
mo_path = "%s.mo" % po_path[:-3]
call("msgfmt -o %s %s" % (mo_path, po_path), shell=True)
if __name__ == "__main__":
RUNABLE_SCRIPTS = ("update_catalogs", "lang_stats", "fetch")
parser = OptionParser(usage="usage: %prog [options] cmd")
parser.add_option(
"-r",
"--resources",
action="append",
help="limit operation to the specified resources",
)
parser.add_option(
"-l",
"--languages",
action="append",
help="limit operation to the specified languages",
)
options, args = parser.parse_args()
if not args:
parser.print_usage()
exit(1)
if args[0] in RUNABLE_SCRIPTS:
eval(args[0])(options.resources, options.languages)
else:
print(("Available commands are: %s" % ", ".join(RUNABLE_SCRIPTS)))
<|endoftext|> |
<|endoftext|>"""
Tests for field subclassing.
"""
from django.db import models
from django.utils.encoding import force_text
from .fields import SmallField, SmallerField, JSONField
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class MyModel(models.Model):
name = models.CharField(max_length=10)
data = SmallField("small field")
def __str__(self):
return force_text(self.name)
class OtherModel(models.Model):
data = SmallerField()
class DataModel(models.Model):
data = JSONField()
<|endoftext|> |
<|endoftext|>"""
5. Many-to-many relationships
To define a many-to-many relationship, use ``ManyToManyField()``.
In this example, an ``Article`` can be published in multiple ``Publication``
objects, and a ``Publication`` has multiple ``Article`` objects.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Publication(models.Model):
title = models.CharField(max_length=30)
def __str__(self):
return self.title
class Meta:
ordering = ("title",)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
publications = models.ManyToManyField(Publication)
def __str__(self):
return self.headline
class Meta:
ordering = ("headline",)
<|endoftext|> |
<|endoftext|>"""
13. Adding hooks before/after saving and deleting
To execute arbitrary code around ``save()`` and ``delete()``, just subclass
the methods.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
def __init__(self, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
self.data = []
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
def save(self, *args, **kwargs):
self.data.append("Before save")
# Call the "real" save() method
super(Person, self).save(*args, **kwargs)
self.data.append("After save")
def delete(self):
self.data.append("Before deletion")
# Call the "real" delete() method
super(Person, self).delete()
self.data.append("After deletion")
<|endoftext|> |
<|endoftext|>import datetime
from django.core.exceptions import ValidationError
from django.test import TestCase
from django.utils import unittest
from .models import (
CustomPKModel,
UniqueTogetherModel,
UniqueFieldsModel,
UniqueForDateModel,
ModelToValidate,
Post,
FlexibleDatePost,
UniqueErrorsModel,
)
class GetUniqueCheckTests(unittest.TestCase):
def test_unique_fields_get_collected(self):
m = UniqueFieldsModel()
self.assertEqual(
(
[
(UniqueFieldsModel, ("id",)),
(UniqueFieldsModel, ("unique_charfield",)),
(UniqueFieldsModel, ("unique_integerfield",)),
],
[],
),
m._get_unique_checks(),
)
def test_unique_together_gets_picked_up_and_converted_to_tuple(self):
m = UniqueTogetherModel()
self.assertEqual(
(
[
(
UniqueTogetherModel,
(
"ifield",
"cfield",
),
),
(UniqueTogetherModel, ("ifield", "efield")),
(UniqueTogetherModel, ("id",)),
],
[],
),
m._get_unique_checks(),
)
def test_primary_key_is_considered_unique(self):
m = CustomPKModel()
self.assertEqual(
([(CustomPKModel, ("my_pk_field",))], []), m._get_unique_checks()
)
def test_unique_for_date_gets_picked_up(self):
m = UniqueForDateModel()
self.assertEqual(
(
[(UniqueForDateModel, ("id",))],
[
(UniqueForDateModel, "date", "count", "start_date"),
(UniqueForDateModel, "year", "count", "end_date"),
(UniqueForDateModel, "month", "order", "end_date"),
],
),
m._get_unique_checks(),
)
def test_unique_for_date_exclusion(self):
m = UniqueForDateModel()
self.assertEqual(
(
[(UniqueForDateModel, ("id",))],
[
(UniqueForDateModel, "year", "count", "end_date"),
(UniqueForDateModel, "month", "order", "end_date"),
],
),
m._get_unique_checks(exclude="start_date"),
)
class PerformUniqueChecksTest(TestCase):
def test_primary_key_unique_check_not_performed_when_adding_and_pk_not_specified(
self,
):
# Regression test for #12560
with self.assertNumQueries(0):
mtv = ModelToValidate(number=10, name="Some Name")
setattr(mtv, "_adding", True)
mtv.full_clean()
def test_primary_key_unique_check_performed_when_adding_and_pk_specified(self):
# Regression test for #12560
with self.assertNumQueries(1):
mtv = ModelToValidate(number=10, name="Some Name", id=123)
setattr(mtv, "_adding", True)
mtv.full_clean()
def test_primary_key_unique_check_not_performed_when_not_adding(self):
# Regression test for #12132
with self.assertNumQueries(0):
mtv = ModelToValidate(number=10, name="Some Name")
mtv.full_clean()
def test_unique_for_date(self):
p1 = Post.objects.create(
title="Django 1.0 is released",
slug="Django 1.0",
subtitle="Finally",
posted=datetime.date(2008, 9, 3),
)
p = Post(title="Django 1.0 is released", posted=datetime.date(2008, 9, 3))
with self.assertRaises(ValidationError) as cm:
p.full_clean()
self.assertEqual(
cm.exception.message_dict,
{"title": ["Title must be unique for Posted date."]},
)
# Should work without errors
p = Post(title="Work on Django 1.1 begins", posted=datetime.date(2008, 9, 3))
p.full_clean()
# Should work without errors
p = Post(title="Django 1.0 is released", posted=datetime.datetime(2008, 9, 4))
p.full_clean()
p = Post(slug="Django 1.0", posted=datetime.datetime(2008, 1, 1))
with self.assertRaises(ValidationError) as cm:
p.full_clean()
self.assertEqual(
cm.exception.message_dict,
{"slug": ["Slug must be unique for Posted year."]},
)
p = Post(subtitle="Finally", posted=datetime.datetime(2008, 9, 30))
with self.assertRaises(ValidationError) as cm:
p.full_clean()
self.assertEqual(
cm.exception.message_dict,
{"subtitle": ["Subtitle must be unique for Posted month."]},
)
p = Post(title="Django 1.0 is released")
with self.assertRaises(ValidationError) as cm:
p.full_clean()
self.assertEqual(
cm.exception.message_dict, {"posted": ["This field cannot be null."]}
)
def test_unique_for_date_with_nullable_date(self):
p1 = FlexibleDatePost.objects.create(
title="Django 1.0 is released",
slug="Django 1.0",
subtitle="Finally",
posted=datetime.date(2008, 9, 3),
)
p = FlexibleDatePost(title="Django 1.0 is released")
try:
p.full_clean()
except ValidationError:
self.fail(
"unique_for_date checks shouldn't trigger when the associated DateField is None."
)
p = FlexibleDatePost(slug="Django 1.0")
try:
p.full_clean()
except ValidationError:
self.fail(
"unique_for_year checks shouldn't trigger when the associated DateField is None."
)
p = FlexibleDatePost(subtitle="Finally")
try:
p.full_clean()
except ValidationError:
self.fail(
"unique_for_month checks shouldn't trigger when the associated DateField is None."
)
def test_unique_errors(self):
m1 = UniqueErrorsModel.objects.create(name="Some Name", no=10)
m = UniqueErrorsModel(name="Some Name", no=11)
with self.assertRaises(ValidationError) as cm:
m.full_clean()
self.assertEqual(
cm.exception.message_dict, {"name": ["Custom unique name message."]}
)
m = UniqueErrorsModel(name="Some Other Name", no=10)
with self.assertRaises(ValidationError) as cm:
m.full_clean()
self.assertEqual(
cm.exception.message_dict, {"no": ["Custom unique number message."]}
)
<|endoftext|> |
<|endoftext|># -*- coding: utf-8 -*-
import tempfile
import os
from django import forms
from django.contrib import admin
from django.contrib.admin.views.main import ChangeList
from django.core.files.storage import FileSystemStorage
from django.core.mail import EmailMessage
from django.conf.urls import patterns, url
from django.db import models
from django.forms.models import BaseModelFormSet
from django.http import HttpResponse
from django.contrib.admin import BooleanFieldListFilter
from .models import (
Article,
Chapter,
Account,
Media,
Child,
Parent,
Picture,
Widget,
DooHickey,
Grommet,
Whatsit,
FancyDoodad,
Category,
Link,
PrePopulatedPost,
PrePopulatedSubPost,
CustomArticle,
Section,
ModelWithStringPrimaryKey,
Color,
Thing,
Actor,
Inquisition,
Sketch,
Person,
Persona,
Subscriber,
ExternalSubscriber,
OldSubscriber,
Vodcast,
EmptyModel,
Fabric,
Gallery,
Language,
Recommendation,
Recommender,
Collector,
Post,
Gadget,
Villain,
SuperVillain,
Plot,
PlotDetails,
CyclicOne,
CyclicTwo,
WorkHour,
Reservation,
FoodDelivery,
RowLevelChangePermissionModel,
Paper,
CoverLetter,
Story,
OtherStory,
Book,
Promo,
ChapterXtra1,
Pizza,
Topping,
Album,
Question,
Answer,
ComplexSortedPerson,
PrePopulatedPostLargeSlug,
AdminOrderedField,
AdminOrderedModelMethod,
AdminOrderedAdminMethod,
AdminOrderedCallable,
Report,
Color2,
UnorderedObject,
MainPrepopulated,
RelatedPrepopulated,
UndeletableObject,
UserMessenger,
Simple,
Choice,
ShortMessage,
Telegram,
)
def callable_year(dt_value):
try:
return dt_value.year
except AttributeError:
return None
callable_year.admin_order_field = "date"
class ArticleInline(admin.TabularInline):
model = Article
prepopulated_fields = {"title": ("content",)}
fieldsets = (
("Some fields", {"classes": ("collapse",), "fields": ("title", "content")}),
("Some other fields", {"classes": ("wide",), "fields": ("date", "section")}),
)
class ChapterInline(admin.TabularInline):
model = Chapter
class ChapterXtra1Admin(admin.ModelAdmin):
list_filter = (
"chap",
"chap__title",
"chap__book",
"chap__book__name",
"chap__book__promo",
"chap__book__promo__name",
)
class ArticleAdmin(admin.ModelAdmin):
list_display = ("content", "date", callable_year, "model_year", "modeladmin_year")
list_filter = ("date", "section")
def changelist_view(self, request):
"Test that extra_context works"
return super(ArticleAdmin, self).changelist_view(
request, extra_context={"extra_var": "Hello!"}
)
def modeladmin_year(self, obj):
return obj.date.year
modeladmin_year.admin_order_field = "date"
modeladmin_year.short_description = None
def delete_model(self, request, obj):
EmailMessage(
"Greetings from a deleted object",
"I hereby inform you that some user deleted me",
"[email protected]",
["[email protected]"],
).send()
return super(ArticleAdmin, self).delete_model(request, obj)
def save_model(self, request, obj, form, change=True):
EmailMessage(
"Greetings from a created object",
"I hereby inform you that some user created me",
"[email protected]",
["[email protected]"],
).send()
return super(ArticleAdmin, self).save_model(request, obj, form, change)
class RowLevelChangePermissionModelAdmin(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
"""Only allow changing objects with even id number"""
return request.user.is_staff and (obj is not None) and (obj.id % 2 == 0)
class CustomArticleAdmin(admin.ModelAdmin):
"""
Tests various hooks for using custom templates and contexts.
"""
change_list_template = "custom_admin/change_list.html"
change_form_template = "custom_admin/change_form.html"
add_form_template = "custom_admin/add_form.html"
object_history_template = "custom_admin/object_history.html"
delete_confirmation_template = "custom_admin/delete_confirmation.html"
delete_selected_confirmation_template = (
"custom_admin/delete_selected_confirmation.html"
)
def changelist_view(self, request):
"Test that extra_context works"
return super(CustomArticleAdmin, self).changelist_view(
request, extra_context={"extra_var": "Hello!"}
)
class ThingAdmin(admin.ModelAdmin):
list_filter = (
"color__warm",
"color__value",
"pub_date",
)
class InquisitionAdmin(admin.ModelAdmin):
list_display = ("leader", "country", "expected")
class SketchAdmin(admin.ModelAdmin):
raw_id_fields = ("inquisition",)
class FabricAdmin(admin.ModelAdmin):
list_display = ("surface",)
list_filter = ("surface",)
class BasePersonModelFormSet(BaseModelFormSet):
def clean(self):
for person_dict in self.cleaned_data:
person = person_dict.get("id")
alive = person_dict.get("alive")
if person and alive and person.name == "Grace Hopper":
raise forms.ValidationError("Grace is not a Zombie")
class PersonAdmin(admin.ModelAdmin):
list_display = ("name", "gender", "alive")
list_editable = ("gender", "alive")
list_filter = ("gender",)
search_fields = ("^name",)
save_as = True
def get_changelist_formset(self, request, **kwargs):
return super(PersonAdmin, self).get_changelist_formset(
request, formset=BasePersonModelFormSet, **kwargs
)
def queryset(self, request):
# Order by a field that isn't in list display, to be able to test
# whether ordering is preserved.
return super(PersonAdmin, self).queryset(request).order_by("age")
class FooAccount(Account):
"""A service-specific account of type Foo."""
servicename = "foo"
class BarAccount(Account):
"""A service-specific account of type Bar."""
servicename = "bar"
class FooAccountAdmin(admin.StackedInline):
model = FooAccount
extra = 1
class BarAccountAdmin(admin.StackedInline):
model = BarAccount
extra = 1
class PersonaAdmin(admin.ModelAdmin):
inlines = (FooAccountAdmin, BarAccountAdmin)
class SubscriberAdmin(admin.ModelAdmin):
actions = ["mail_admin"]
def mail_admin(self, request, selected):
EmailMessage(
"Greetings from a ModelAdmin action",
"This is the test email from a admin action",
"[email protected]",
["[email protected]"],
).send()
def external_mail(modeladmin, request, selected):
EmailMessage(
"Greetings from a function action",
"This is the test email from a function action",
"[email protected]",
["[email protected]"],
).send()
external_mail.short_description = "External mail (Another awesome action)"
def redirect_to(modeladmin, request, selected):
from django.http import HttpResponseRedirect
return HttpResponseRedirect("/some-where-else/")
redirect_to.short_description = "Redirect to (Awesome action)"
class ExternalSubscriberAdmin(admin.ModelAdmin):
actions = [redirect_to, external_mail]
class Podcast(Media):
release_date = models.DateField()
class Meta:
ordering = ("release_date",) # overridden in PodcastAdmin
class PodcastAdmin(admin.ModelAdmin):
list_display = ("name", "release_date")
list_editable = ("release_date",)
date_hierarchy = "release_date"
ordering = ("name",)
class VodcastAdmin(admin.ModelAdmin):
list_display = ("name", "released")
list_editable = ("released",)
ordering = ("name",)
class ChildInline(admin.StackedInline):
model = Child
class ParentAdmin(admin.ModelAdmin):
model = Parent
inlines = [ChildInline]
list_editable = ("name",)
def save_related(self, request, form, formsets, change):
super(ParentAdmin, self).save_related(request, form, formsets, change)
first_name, last_name = form.instance.name.split()
for child in form.instance.child_set.all():
if len(child.name.split()) < 2:
child.name = child.name + " " + last_name
child.save()
class EmptyModelAdmin(admin.ModelAdmin):
def queryset(self, request):
return super(EmptyModelAdmin, self).queryset(request).filter(pk__gt=1)
class OldSubscriberAdmin(admin.ModelAdmin):
actions = None
temp_storage = FileSystemStorage(
tempfile.mkdtemp(dir=os.environ["DJANGO_TEST_TEMP_DIR"])
)
UPLOAD_TO = os.path.join(temp_storage.location, "test_upload")
class PictureInline(admin.TabularInline):
model = Picture
extra = 1
class GalleryAdmin(admin.ModelAdmin):
inlines = [PictureInline]
class PictureAdmin(admin.ModelAdmin):
pass
class LanguageAdmin(admin.ModelAdmin):
list_display = ["iso", "shortlist", "english_name", "name"]
list_editable = ["shortlist"]
class RecommendationAdmin(admin.ModelAdmin):
search_fields = (
"=titletranslation__text",
"=recommender__titletranslation__text",
)
class WidgetInline(admin.StackedInline):
model = Widget
class DooHickeyInline(admin.StackedInline):
model = DooHickey
class GrommetInline(admin.StackedInline):
model = Grommet
class WhatsitInline(admin.StackedInline):
model = Whatsit
class FancyDoodadInline(admin.StackedInline):
model = FancyDoodad
class CategoryAdmin(admin.ModelAdmin):
list_display = ("id", "collector", "order")
list_editable = ("order",)
class CategoryInline(admin.StackedInline):
model = Category
class CollectorAdmin(admin.ModelAdmin):
inlines = [
WidgetInline,
DooHickeyInline,
GrommetInline,
WhatsitInline,
FancyDoodadInline,
CategoryInline,
]
class LinkInline(admin.TabularInline):
model = Link
extra = 1
readonly_fields = ("posted", "multiline")
def multiline(self, instance):
return "InlineMultiline\ntest\nstring"
class SubPostInline(admin.TabularInline):
model = PrePopulatedSubPost
prepopulated_fields = {"subslug": ("subtitle",)}
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ("subslug",)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PrePopulatedPostAdmin(admin.ModelAdmin):
list_display = ["title", "slug"]
prepopulated_fields = {"slug": ("title",)}
inlines = [SubPostInline]
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ("slug",)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PostAdmin(admin.ModelAdmin):
list_display = ["title", "public"]
readonly_fields = (
"posted",
"awesomeness_level",
"coolness",
"value",
"multiline",
lambda obj: "foo",
)
inlines = [LinkInline]
def coolness(self, instance):
if instance.pk:
return "%d amount of cool." % instance.pk
else:
return "Unkown coolness."
def value(self, instance):
return 1000
def multiline(self, instance):
return "Multiline\ntest\nstring"
value.short_description = "Value in $US"
class CustomChangeList(ChangeList):
def get_query_set(self, request):
return self.root_query_set.filter(pk=9999) # Does not exist
class GadgetAdmin(admin.ModelAdmin):
def get_changelist(self, request, **kwargs):
return CustomChangeList
class PizzaAdmin(admin.ModelAdmin):
readonly_fields = ("toppings",)
class WorkHourAdmin(admin.ModelAdmin):
list_display = ("datum", "employee")
list_filter = ("employee",)
class FoodDeliveryAdmin(admin.ModelAdmin):
list_display = ("reference", "driver", "restaurant")
list_editable = ("driver", "restaurant")
class CoverLetterAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing CoverLetter
instances.
Note that the CoverLetter model defines a __unicode__ method.
For testing fix for ticket #14529.
"""
def queryset(self, request):
return super(CoverLetterAdmin, self).queryset(request).defer("date_written")
class PaperAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Paper
instances.
For testing fix for ticket #14529.
"""
def queryset(self, request):
return super(PaperAdmin, self).queryset(request).only("title")
class ShortMessageAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing ShortMessage
instances.
For testing fix for ticket #14529.
"""
def queryset(self, request):
return super(ShortMessageAdmin, self).queryset(request).defer("timestamp")
class TelegramAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Telegram
instances.
Note that the Telegram model defines a __unicode__ method.
For testing fix for ticket #14529.
"""
def queryset(self, request):
return super(TelegramAdmin, self).queryset(request).only("title")
class StoryForm(forms.ModelForm):
class Meta:
widgets = {"title": forms.HiddenInput}
class StoryAdmin(admin.ModelAdmin):
list_display = ("id", "title", "content")
list_display_links = ("title",) # 'id' not in list_display_links
list_editable = ("content",)
form = StoryForm
ordering = ["-pk"]
class OtherStoryAdmin(admin.ModelAdmin):
list_display = ("id", "title", "content")
list_display_links = ("title", "id") # 'id' in list_display_links
list_editable = ("content",)
ordering = ["-pk"]
class ComplexSortedPersonAdmin(admin.ModelAdmin):
list_display = ("name", "age", "is_employee", "colored_name")
ordering = ("name",)
def colored_name(self, obj):
return '<span style="color: #%s;">%s</span>' % ("ff00ff", obj.name)
colored_name.allow_tags = True
colored_name.admin_order_field = "name"
class AlbumAdmin(admin.ModelAdmin):
list_filter = ["title"]
class WorkHourAdmin(admin.ModelAdmin):
list_display = ("datum", "employee")
list_filter = ("employee",)
class PrePopulatedPostLargeSlugAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title",)}
class AdminOrderedFieldAdmin(admin.ModelAdmin):
ordering = ("order",)
list_display = ("stuff", "order")
class AdminOrderedModelMethodAdmin(admin.ModelAdmin):
ordering = ("order",)
list_display = ("stuff", "some_order")
class AdminOrderedAdminMethodAdmin(admin.ModelAdmin):
def some_admin_order(self, obj):
return obj.order
some_admin_order.admin_order_field = "order"
ordering = ("order",)
list_display = ("stuff", "some_admin_order")
def admin_ordered_callable(obj):
return obj.order
admin_ordered_callable.admin_order_field = "order"
class AdminOrderedCallableAdmin(admin.ModelAdmin):
ordering = ("order",)
list_display = ("stuff", admin_ordered_callable)
class ReportAdmin(admin.ModelAdmin):
def extra(self, request):
return HttpResponse()
def get_urls(self):
# Corner case: Don't call parent implementation
return patterns(
"",
url(r"^extra/$", self.extra, name="cable_extra"),
)
class CustomTemplateBooleanFieldListFilter(BooleanFieldListFilter):
template = "custom_filter_template.html"
class CustomTemplateFilterColorAdmin(admin.ModelAdmin):
list_filter = (("warm", CustomTemplateBooleanFieldListFilter),)
# For Selenium Prepopulated tests -------------------------------------
class RelatedPrepopulatedInline1(admin.StackedInline):
fieldsets = (
(
None,
{
"fields": (
("pubdate", "status"),
(
"name",
"slug1",
"slug2",
),
)
},
),
)
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {"slug1": ["name", "pubdate"], "slug2": ["status", "name"]}
class RelatedPrepopulatedInline2(admin.TabularInline):
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {"slug1": ["name", "pubdate"], "slug2": ["status", "name"]}
class MainPrepopulatedAdmin(admin.ModelAdmin):
inlines = [RelatedPrepopulatedInline1, RelatedPrepopulatedInline2]
fieldsets = (
(
None,
{
"fields": (
("pubdate", "status"),
(
"name",
"slug1",
"slug2",
),
)
},
),
)
prepopulated_fields = {"slug1": ["name", "pubdate"], "slug2": ["status", "name"]}
class UnorderedObjectAdmin(admin.ModelAdmin):
list_display = ["name"]
list_editable = ["name"]
list_per_page = 2
class UndeletableObjectAdmin(admin.ModelAdmin):
def change_view(self, *args, **kwargs):
kwargs["extra_context"] = {"show_delete": False}
return super(UndeletableObjectAdmin, self).change_view(*args, **kwargs)
def callable_on_unknown(obj):
return obj.unknown
class AttributeErrorRaisingAdmin(admin.ModelAdmin):
list_display = [
callable_on_unknown,
]
class MessageTestingAdmin(admin.ModelAdmin):
actions = [
"message_debug",
"message_info",
"message_success",
"message_warning",
"message_error",
"message_extra_tags",
]
def message_debug(self, request, selected):
self.message_user(request, "Test debug", level="debug")
def message_info(self, request, selected):
self.message_user(request, "Test info", level="info")
def message_success(self, request, selected):
self.message_user(request, "Test success", level="success")
def message_warning(self, request, selected):
self.message_user(request, "Test warning", level="warning")
def message_error(self, request, selected):
self.message_user(request, "Test error", level="error")
def message_extra_tags(self, request, selected):
self.message_user(request, "Test tags", extra_tags="extra_tag")
class ChoiceList(admin.ModelAdmin):
list_display = ["choice"]
readonly_fields = ["choice"]
fields = ["choice"]
site = admin.AdminSite(name="admin")
site.register(Article, ArticleAdmin)
site.register(CustomArticle, CustomArticleAdmin)
site.register(Section, save_as=True, inlines=[ArticleInline])
site.register(ModelWithStringPrimaryKey)
site.register(Color)
site.register(Thing, ThingAdmin)
site.register(Actor)
site.register(Inquisition, InquisitionAdmin)
site.register(Sketch, SketchAdmin)
site.register(Person, PersonAdmin)
site.register(Persona, PersonaAdmin)
site.register(Subscriber, SubscriberAdmin)
site.register(ExternalSubscriber, ExternalSubscriberAdmin)
site.register(OldSubscriber, OldSubscriberAdmin)
site.register(Podcast, PodcastAdmin)
site.register(Vodcast, VodcastAdmin)
site.register(Parent, ParentAdmin)
site.register(EmptyModel, EmptyModelAdmin)
site.register(Fabric, FabricAdmin)
site.register(Gallery, GalleryAdmin)
site.register(Picture, PictureAdmin)
site.register(Language, LanguageAdmin)
site.register(Recommendation, RecommendationAdmin)
site.register(Recommender)
site.register(Collector, CollectorAdmin)
site.register(Category, CategoryAdmin)
site.register(Post, PostAdmin)
site.register(Gadget, GadgetAdmin)
site.register(Villain)
site.register(SuperVillain)
site.register(Plot)
site.register(PlotDetails)
site.register(CyclicOne)
site.register(CyclicTwo)
site.register(WorkHour, WorkHourAdmin)
site.register(Reservation)
site.register(FoodDelivery, FoodDeliveryAdmin)
site.register(RowLevelChangePermissionModel, RowLevelChangePermissionModelAdmin)
site.register(Paper, PaperAdmin)
site.register(CoverLetter, CoverLetterAdmin)
site.register(ShortMessage, ShortMessageAdmin)
site.register(Telegram, TelegramAdmin)
site.register(Story, StoryAdmin)
site.register(OtherStory, OtherStoryAdmin)
site.register(Report, ReportAdmin)
site.register(MainPrepopulated, MainPrepopulatedAdmin)
site.register(UnorderedObject, UnorderedObjectAdmin)
site.register(UndeletableObject, UndeletableObjectAdmin)
# We intentionally register Promo and ChapterXtra1 but not Chapter nor ChapterXtra2.
# That way we cover all four cases:
# related ForeignKey object registered in admin
# related ForeignKey object not registered in admin
# related OneToOne object registered in admin
# related OneToOne object not registered in admin
# when deleting Book so as exercise all four troublesome (w.r.t escaping
# and calling force_text to avoid problems on Python 2.3) paths through
# contrib.admin.util's get_deleted_objects function.
site.register(Book, inlines=[ChapterInline])
site.register(Promo)
site.register(ChapterXtra1, ChapterXtra1Admin)
site.register(Pizza, PizzaAdmin)
site.register(Topping)
site.register(Album, AlbumAdmin)
site.register(Question)
site.register(Answer)
site.register(PrePopulatedPost, PrePopulatedPostAdmin)
site.register(ComplexSortedPerson, ComplexSortedPersonAdmin)
site.register(PrePopulatedPostLargeSlug, PrePopulatedPostLargeSlugAdmin)
site.register(AdminOrderedField, AdminOrderedFieldAdmin)
site.register(AdminOrderedModelMethod, AdminOrderedModelMethodAdmin)
site.register(AdminOrderedAdminMethod, AdminOrderedAdminMethodAdmin)
site.register(AdminOrderedCallable, AdminOrderedCallableAdmin)
site.register(Color2, CustomTemplateFilterColorAdmin)
site.register(Simple, AttributeErrorRaisingAdmin)
site.register(UserMessenger, MessageTestingAdmin)
site.register(Choice, ChoiceList)
# Register core models we need in our tests
from django.contrib.auth.models import User, Group
from django.contrib.auth.admin import UserAdmin, GroupAdmin
site.register(User, UserAdmin)
site.register(Group, GroupAdmin)
<|endoftext|> |
<|endoftext|># -*- coding:utf-8 -*-
from datetime import datetime
from django.test import TestCase
FULL_RESPONSE = "Test conditional get response"
LAST_MODIFIED = datetime(2007, 10, 21, 23, 21, 47)
LAST_MODIFIED_STR = "Sun, 21 Oct 2007 23:21:47 GMT"
LAST_MODIFIED_NEWER_STR = "Mon, 18 Oct 2010 16:56:23 GMT"
LAST_MODIFIED_INVALID_STR = "Mon, 32 Oct 2010 16:56:23 GMT"
EXPIRED_LAST_MODIFIED_STR = "Sat, 20 Oct 2007 23:21:47 GMT"
ETAG = "b4246ffc4f62314ca13147c9d4f76974"
EXPIRED_ETAG = "7fae4cd4b0f81e7d2914700043aa8ed6"
class ConditionalGet(TestCase):
urls = "regressiontests.conditional_processing.urls"
def assertFullResponse(self, response, check_last_modified=True, check_etag=True):
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, FULL_RESPONSE.encode())
if check_last_modified:
self.assertEqual(response["Last-Modified"], LAST_MODIFIED_STR)
if check_etag:
self.assertEqual(response["ETag"], '"%s"' % ETAG)
def assertNotModified(self, response):
self.assertEqual(response.status_code, 304)
self.assertEqual(response.content, b"")
def testWithoutConditions(self):
response = self.client.get("/condition/")
self.assertFullResponse(response)
def testIfModifiedSince(self):
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_STR
response = self.client.get("/condition/")
self.assertNotModified(response)
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_NEWER_STR
response = self.client.get("/condition/")
self.assertNotModified(response)
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_INVALID_STR
response = self.client.get("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get("/condition/")
self.assertFullResponse(response)
def testIfNoneMatch(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = '"%s"' % ETAG
response = self.client.get("/condition/")
self.assertNotModified(response)
self.client.defaults["HTTP_IF_NONE_MATCH"] = '"%s"' % EXPIRED_ETAG
response = self.client.get("/condition/")
self.assertFullResponse(response)
# Several etags in If-None-Match is a bit exotic but why not?
self.client.defaults["HTTP_IF_NONE_MATCH"] = '"%s", "%s"' % (ETAG, EXPIRED_ETAG)
response = self.client.get("/condition/")
self.assertNotModified(response)
def testIfMatch(self):
self.client.defaults["HTTP_IF_MATCH"] = '"%s"' % ETAG
response = self.client.put("/condition/etag/")
self.assertEqual(response.status_code, 200)
self.client.defaults["HTTP_IF_MATCH"] = '"%s"' % EXPIRED_ETAG
response = self.client.put("/condition/etag/")
self.assertEqual(response.status_code, 412)
def testBothHeaders(self):
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_NONE_MATCH"] = '"%s"' % ETAG
response = self.client.get("/condition/")
self.assertNotModified(response)
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_NONE_MATCH"] = '"%s"' % ETAG
response = self.client.get("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_NONE_MATCH"] = '"%s"' % EXPIRED_ETAG
response = self.client.get("/condition/")
self.assertFullResponse(response)
def testSingleCondition1(self):
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_STR
response = self.client.get("/condition/last_modified/")
self.assertNotModified(response)
response = self.client.get("/condition/etag/")
self.assertFullResponse(response, check_last_modified=False)
def testSingleCondition2(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = '"%s"' % ETAG
response = self.client.get("/condition/etag/")
self.assertNotModified(response)
response = self.client.get("/condition/last_modified/")
self.assertFullResponse(response, check_etag=False)
def testSingleCondition3(self):
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get("/condition/last_modified/")
self.assertFullResponse(response, check_etag=False)
def testSingleCondition4(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = '"%s"' % EXPIRED_ETAG
response = self.client.get("/condition/etag/")
self.assertFullResponse(response, check_last_modified=False)
def testSingleCondition5(self):
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_STR
response = self.client.get("/condition/last_modified2/")
self.assertNotModified(response)
response = self.client.get("/condition/etag2/")
self.assertFullResponse(response, check_last_modified=False)
def testSingleCondition6(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = '"%s"' % ETAG
response = self.client.get("/condition/etag2/")
self.assertNotModified(response)
response = self.client.get("/condition/last_modified2/")
self.assertFullResponse(response, check_etag=False)
def testInvalidETag(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = r'"\"'
response = self.client.get("/condition/etag/")
self.assertFullResponse(response, check_last_modified=False)
<|endoftext|> |
<|endoftext|>from .error_messages import (
FormsErrorMessagesTestCase,
ModelChoiceFieldErrorMessagesTestCase,
)
from .extra import FormsExtraTestCase, FormsExtraL10NTestCase
from .fields import FieldsTests
from .forms import FormsTestCase
from .formsets import (
FormsFormsetTestCase,
FormsetAsFooTests,
TestIsBoundBehavior,
TestEmptyFormSet,
)
from .input_formats import (
LocalizedTimeTests,
CustomTimeInputFormatsTests,
SimpleTimeFormatTests,
LocalizedDateTests,
CustomDateInputFormatsTests,
SimpleDateFormatTests,
LocalizedDateTimeTests,
CustomDateTimeInputFormatsTests,
SimpleDateTimeFormatTests,
)
from .media import FormsMediaTestCase, StaticFormsMediaTestCase
from .models import (
TestTicket12510,
ModelFormCallableModelDefault,
FormsModelTestCase,
RelatedModelFormTests,
)
from .regressions import FormsRegressionsTestCase
from .util import FormsUtilTestCase
from .validators import TestFieldWithValidators
from .widgets import (
FormsWidgetTestCase,
FormsI18NWidgetsTestCase,
WidgetTests,
LiveWidgetTests,
ClearableFileInputTests,
)
<|endoftext|> |
<|endoftext|>"""
Various edge-cases for model managers.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class OnlyFred(models.Manager):
def get_query_set(self):
return super(OnlyFred, self).get_query_set().filter(name="fred")
class OnlyBarney(models.Manager):
def get_query_set(self):
return super(OnlyBarney, self).get_query_set().filter(name="barney")
class Value42(models.Manager):
def get_query_set(self):
return super(Value42, self).get_query_set().filter(value=42)
class AbstractBase1(models.Model):
name = models.CharField(max_length=50)
class Meta:
abstract = True
# Custom managers
manager1 = OnlyFred()
manager2 = OnlyBarney()
objects = models.Manager()
class AbstractBase2(models.Model):
value = models.IntegerField()
class Meta:
abstract = True
# Custom manager
restricted = Value42()
# No custom manager on this class to make sure the default case doesn't break.
class AbstractBase3(models.Model):
comment = models.CharField(max_length=50)
class Meta:
abstract = True
@python_2_unicode_compatible
class Parent(models.Model):
name = models.CharField(max_length=50)
manager = OnlyFred()
def __str__(self):
return self.name
# Managers from base classes are inherited and, if no manager is specified
# *and* the parent has a manager specified, the first one (in the MRO) will
# become the default.
@python_2_unicode_compatible
class Child1(AbstractBase1):
data = models.CharField(max_length=25)
def __str__(self):
return self.data
@python_2_unicode_compatible
class Child2(AbstractBase1, AbstractBase2):
data = models.CharField(max_length=25)
def __str__(self):
return self.data
@python_2_unicode_compatible
class Child3(AbstractBase1, AbstractBase3):
data = models.CharField(max_length=25)
def __str__(self):
return self.data
@python_2_unicode_compatible
class Child4(AbstractBase1):
data = models.CharField(max_length=25)
# Should be the default manager, although the parent managers are
# inherited.
default = models.Manager()
def __str__(self):
return self.data
@python_2_unicode_compatible
class Child5(AbstractBase3):
name = models.CharField(max_length=25)
default = OnlyFred()
objects = models.Manager()
def __str__(self):
return self.name
# Will inherit managers from AbstractBase1, but not Child4.
class Child6(Child4):
value = models.IntegerField()
# Will not inherit default manager from parent.
class Child7(Parent):
pass
<|endoftext|> |
<|endoftext|>"""
Regression tests for proper working of ForeignKey(null=True). Tests these bugs:
* #7512: including a nullable foreign key reference in Meta ordering has un
xpected results
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# The first two models represent a very simple null FK ordering case.
class Author(models.Model):
name = models.CharField(max_length=150)
@python_2_unicode_compatible
class Article(models.Model):
title = models.CharField(max_length=150)
author = models.ForeignKey(Author, null=True)
def __str__(self):
return "Article titled: %s" % (self.title,)
class Meta:
ordering = [
"author__name",
]
# These following 4 models represent a far more complex ordering case.
class SystemInfo(models.Model):
system_name = models.CharField(max_length=32)
class Forum(models.Model):
system_info = models.ForeignKey(SystemInfo)
forum_name = models.CharField(max_length=32)
@python_2_unicode_compatible
class Post(models.Model):
forum = models.ForeignKey(Forum, null=True)
title = models.CharField(max_length=32)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Comment(models.Model):
post = models.ForeignKey(Post, null=True)
comment_text = models.CharField(max_length=250)
class Meta:
ordering = ["post__forum__system_info__system_name", "comment_text"]
def __str__(self):
return self.comment_text
<|endoftext|> |
<|endoftext|>from django.core.xheaders import populate_xheaders
from django.http import HttpResponse
from django.utils.decorators import decorator_from_middleware
from django.views.generic import View
from django.middleware.doc import XViewMiddleware
from .models import Article
xview_dec = decorator_from_middleware(XViewMiddleware)
def xview(request):
return HttpResponse()
def xview_xheaders(request, object_id):
response = HttpResponse()
populate_xheaders(request, response, Article, 1)
return response
class XViewClass(View):
def get(self, request):
return HttpResponse()
<|endoftext|> |
<|endoftext|># Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth 1.0 credentials.
"""
__author__ = "[email protected] (Joe Gregorio)"
import pickle
import threading
from apiclient.oauth import Storage as BaseStorage
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from a file."""
def __init__(self, filename):
self._filename = filename
self._lock = threading.Lock()
def get(self):
"""Retrieve Credential from file.
Returns:
apiclient.oauth.Credentials
"""
self._lock.acquire()
try:
f = open(self._filename, "r")
credentials = pickle.loads(f.read())
f.close()
credentials.set_store(self.put)
except:
credentials = None
self._lock.release()
return credentials
def put(self, credentials):
"""Write a pickled Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
self._lock.acquire()
f = open(self._filename, "w")
f.write(pickle.dumps(credentials))
f.close()
self._lock.release()
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
# ---------------------------------------------------------------------------
"""
Provides a front-end to the Python standard ``optparse`` module. The
``CommandLineParser`` class makes two changes to the standard behavior.
- The output for the '-h' option is slightly different.
- A bad option causes the parser to generate the entire usage output,
not just an error message.
It also provides a couple extra utility modules.
"""
__docformat__ = "restructuredtext en"
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
from optparse import OptionParser
import sys
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ["CommandLineParser"]
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class CommandLineParser(OptionParser):
"""Custom version of command line option parser."""
def __init__(self, *args, **kw):
"""Create a new instance."""
OptionParser.__init__(self, *args, **kw)
# I like my help option message better than the default...
self.remove_option("-h")
self.add_option(
"-h", "--help", action="help", help="Show this message and exit."
)
self.epilogue = None
def print_help(self, out=sys.stderr):
"""
Print the help message, followed by the epilogue (if set), to the
specified output file. You can define an epilogue by setting the
``epilogue`` field.
:Parameters:
out : file
where to write the usage message
"""
OptionParser.print_help(self, out)
if self.epilogue:
import textwrap
print("\n%s" % textwrap.fill(self.epilogue, 80), file=out)
out.flush()
def die_with_usage(self, msg=None, exit_code=2):
"""
Display a usage message and exit.
:Parameters:
msg : str
If not set to ``None`` (the default), this message will be
displayed before the usage message
exit_code : int
The process exit code. Defaults to 2.
"""
if msg != None:
print(msg, file=sys.stderr)
self.print_help(sys.stderr)
sys.exit(exit_code)
def error(self, msg):
"""
Overrides parent ``OptionParser`` class's ``error()`` method and
forces the full usage message on error.
"""
sys.stderr.write("%s: error: %s\n" % (self.get_prog_name(), msg))
self.die_with_usage(msg)
<|endoftext|> |
<|endoftext|># Nose program for testing grizzled.proxy class.
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
from grizzled.proxy import Forwarder
import tempfile
from grizzled.file import unlink_quietly
from .test_helpers import exception_expected
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class ForwardToFile(Forwarder):
def __init__(self, file, *exceptions):
Forwarder.__init__(self, file, exceptions)
class TestProxyPackage(object):
def test_forward_all(self):
path = self._create_file()
try:
with open(path) as f:
contents = "".join(f.readlines())
with open(path) as f:
fwd = ForwardToFile(f)
contents2 = "".join(fwd.readlines())
assert contents2 == contents
finally:
unlink_quietly(path)
def test_forward_all_but_name(self):
path = self._create_file()
try:
with exception_expected(AttributeError):
with open(path) as f:
fwd = ForwardToFile(f, "name", "foo")
fwd.name
finally:
unlink_quietly(path)
def test_forward_all_but_name_mode(self):
path = self._create_file()
try:
with open(path) as f:
fwd = ForwardToFile(f, "name", "mode")
fwd.closed # should not fail
with exception_expected(AttributeError):
fwd.name
with exception_expected(AttributeError):
fwd.mode
finally:
unlink_quietly(path)
def _create_file(self):
temp = tempfile.NamedTemporaryFile(prefix="fwdtest", delete=False)
temp.write(", ".join([str(x) for x in range(1, 81)]))
temp.write(", ".join([str(x) for x in range(1, 21)]))
temp.close
return temp.name
<|endoftext|> |
<|endoftext|>from jinja2 import Environment
from jinja2.loaders import DictLoader
env = Environment(
loader=DictLoader(
{
"child.html": """\
{% extends master_layout or 'master.html' %}
{% include helpers = 'helpers.html' %}
{% macro get_the_answer() %}42{% endmacro %}
{% title = 'Hello World' %}
{% block body %}
{{ get_the_answer() }}
{{ helpers.conspirate() }}
{% endblock %}
""",
"master.html": """\
<!doctype html>
<title>{{ title }}</title>
{% block body %}{% endblock %}
""",
"helpers.html": """\
{% macro conspirate() %}23{% endmacro %}
""",
}
)
)
tmpl = env.get_template("child.html")
print(tmpl.render())
<|endoftext|> |
<|endoftext|># -*- coding: utf-8 -*-
"""
jinja2.ext
~~~~~~~~~~
Jinja extensions allow to add custom tags similar to the way django custom
tags work. By default two example extensions exist: an i18n and a cache
extension.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from collections import deque
from jinja2 import nodes
from jinja2.defaults import *
from jinja2.environment import Environment
from jinja2.runtime import Undefined, concat
from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
from jinja2.utils import contextfunction, import_string, Markup, next
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
GETTEXT_FUNCTIONS = ("_", "gettext", "ngettext")
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
rv.identifier = rv.__module__ + "." + rv.__name__
return rv
class Extension(object, metaclass=ExtensionRegistry):
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the `environment` attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. ``prefix`` for example
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
name as includes the name of the extension (fragment cache).
"""
#: if this extension parses this is the list of tags it's listening to.
tags = set()
#: the priority of that extension. This is especially useful for
#: extensions that preprocess values. A lower value means higher
#: priority.
#:
#: .. versionadded:: 2.4
priority = 100
def __init__(self, environment):
self.environment = environment
def bind(self, environment):
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
def preprocess(self, source, name, filename=None):
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
def filter_stream(self, stream):
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
In the `ext` folder of the Jinja2 source distribution there is a file
called `inlinegettext.py` which implements a filter that utilizes this
method.
"""
return stream
def parse(self, parser):
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
"""
raise NotImplementedError()
def attr(self, name, lineno=None):
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno)
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(
self, name, args=None, kwargs=None, dyn_args=None, dyn_kwargs=None, lineno=None
):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
return nodes.Call(
self.attr(name, lineno=lineno),
args,
kwargs,
dyn_args,
dyn_kwargs,
lineno=lineno,
)
@contextfunction
def _gettext_alias(__context, *args, **kwargs):
return __context.call(__context.resolve("gettext"), *args, **kwargs)
def _make_new_gettext(func):
@contextfunction
def gettext(__context, __string, **variables):
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return gettext
def _make_new_ngettext(func):
@contextfunction
def ngettext(__context, __singular, __plural, __num, **variables):
variables.setdefault("num", __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return ngettext
class InternationalizationExtension(Extension):
"""This extension adds gettext support to Jinja2."""
tags = set(["trans"])
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
# {% trans count=something() %}{{ count }} foo{% pluralize
# %}{{ count }} fooss{% endtrans %}
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
def __init__(self, environment):
Extension.__init__(self, environment)
environment.globals["_"] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
newstyle_gettext=False,
)
def _install(self, translations, newstyle=None):
gettext = getattr(translations, "ugettext", None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, "ungettext", None)
if ngettext is None:
ngettext = translations.ngettext
self._install_callables(gettext, ngettext, newstyle)
def _install_null(self, newstyle=None):
self._install_callables(
lambda x: x, lambda s, p, n: (n != 1 and (p,) or (s,))[0], newstyle
)
def _install_callables(self, gettext, ngettext, newstyle=None):
if newstyle is not None:
self.environment.newstyle_gettext = newstyle
if self.environment.newstyle_gettext:
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
self.environment.globals.update(gettext=gettext, ngettext=ngettext)
def _uninstall(self, translations):
for key in "gettext", "ngettext":
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
if isinstance(source, str):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
def parse(self, parser):
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
num_called_num = False
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr = None
variables = {}
while parser.stream.current.type != "block_end":
if variables:
parser.stream.expect("comma")
# skip colon for python compatibility
if parser.stream.skip_if("colon"):
break
name = parser.stream.expect("name")
if name.value in variables:
parser.fail(
"translatable variable %r defined twice." % name.value,
name.lineno,
exc=TemplateAssertionError,
)
# expressions
if parser.stream.current.type == "assign":
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
else:
variables[name.value] = var = nodes.Name(name.value, "load")
if plural_expr is None:
plural_expr = var
num_called_num = name.value == "num"
parser.stream.expect("block_end")
plural = plural_names = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], "load")
num_called_num = singular_names[0] == "num"
# if we have a pluralize block, we parse that too
if parser.stream.current.test("name:pluralize"):
have_plural = True
next(parser.stream)
if parser.stream.current.type != "block_end":
name = parser.stream.expect("name")
if name.value not in variables:
parser.fail(
"unknown variable %r for pluralization" % name.value,
name.lineno,
exc=TemplateAssertionError,
)
plural_expr = variables[name.value]
num_called_num = name.value == "num"
parser.stream.expect("block_end")
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for var in referenced:
if var not in variables:
variables[var] = nodes.Name(var, "load")
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail("pluralize without variables", lineno)
node = self._make_node(
singular,
plural,
variables,
plural_expr,
bool(referenced),
num_called_num and have_plural,
)
node.set_lineno(lineno)
return node
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
if parser.stream.current.type == "data":
buf.append(parser.stream.current.value.replace("%", "%%"))
next(parser.stream)
elif parser.stream.current.type == "variable_begin":
next(parser.stream)
name = parser.stream.expect("name").value
referenced.append(name)
buf.append("%%(%s)s" % name)
parser.stream.expect("variable_end")
elif parser.stream.current.type == "block_begin":
next(parser.stream)
if parser.stream.current.test("name:endtrans"):
break
elif parser.stream.current.test("name:pluralize"):
if allow_pluralize:
break
parser.fail(
"a translatable section can have only one " "pluralize section"
)
parser.fail(
"control structures in translatable sections are " "not allowed"
)
elif parser.stream.eos:
parser.fail("unclosed translation block")
else:
assert False, "internal parser error"
return referenced, concat(buf)
def _make_node(
self, singular, plural, variables, plural_expr, vars_referenced, num_called_num
):
"""Generates a useful node from the data provided."""
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not self.environment.newstyle_gettext:
singular = singular.replace("%%", "%")
if plural:
plural = plural.replace("%%", "%")
# singular only:
if plural_expr is None:
gettext = nodes.Name("gettext", "load")
node = nodes.Call(gettext, [nodes.Const(singular)], [], None, None)
# singular and plural
else:
ngettext = nodes.Name("ngettext", "load")
node = nodes.Call(
ngettext,
[nodes.Const(singular), nodes.Const(plural), plural_expr],
[],
None,
None,
)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
# handling itself
if self.environment.newstyle_gettext:
for key, value in variables.items():
# the function adds that later anyways in case num was
# called num, so just skip it.
if num_called_num and key == "num":
continue
node.kwargs.append(nodes.Keyword(key, value))
# otherwise do that here
else:
# mark the return value as safe if we are in an
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
node = nodes.Mod(
node,
nodes.Dict(
[
nodes.Pair(nodes.Const(key), value)
for key, value in list(variables.items())
]
),
)
return nodes.Output([node])
class ExprStmtExtension(Extension):
"""Adds a `do` tag to Jinja2 that works like the print statement just
that it doesn't print the return value.
"""
tags = set(["do"])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
tags = set(["break", "continue"])
def parse(self, parser):
token = next(parser.stream)
if token.value == "break":
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
class WithExtension(Extension):
"""Adds support for a django-like with block."""
tags = set(["with"])
def parse(self, parser):
node = nodes.Scope(lineno=next(parser.stream).lineno)
assignments = []
while parser.stream.current.type != "block_end":
lineno = parser.stream.current.lineno
if assignments:
parser.stream.expect("comma")
target = parser.parse_assign_target()
parser.stream.expect("assign")
expr = parser.parse_expression()
assignments.append(nodes.Assign(target, expr, lineno=lineno))
node.body = assignments + list(
parser.parse_statements(("name:endwith",), drop_needle=True)
)
return node
class AutoEscapeExtension(Extension):
"""Changes auto escape rules for a scope."""
tags = set(["autoescape"])
def parse(self, parser):
node = nodes.ScopedEvalContextModifier(lineno=next(parser.stream).lineno)
node.options = [nodes.Keyword("autoescape", parser.parse_expression())]
node.body = parser.parse_statements(("name:endautoescape",), drop_needle=True)
return nodes.Scope([node])
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the `babel_style` parameter to `False`
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This example explains the behavior:
>>> from jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a ``(lineno, function,
message)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string itself (a ``unicode`` object, or a tuple
of ``unicode`` objects for functions with multiple string arguments).
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
if (
not isinstance(node.node, nodes.Name)
or node.node.name not in gettext_functions
):
continue
strings = []
for arg in node.args:
if isinstance(arg, nodes.Const) and isinstance(arg.value, str):
strings.append(arg.value)
else:
strings.append(None)
for arg in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
if node.dyn_kwargs is not None:
strings.append(None)
if not babel_style:
strings = tuple(x for x in strings if x is not None)
if not strings:
continue
else:
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield node.lineno, node.node.name, strings
class _CommentFinder(object):
"""Helper class to find comments in a token stream. Can only
find comments for gettext calls forwards. Once the comment
from line 4 is found, a comment for line 1 will not return a
usable value.
"""
def __init__(self, tokens, comment_tags):
self.tokens = tokens
self.comment_tags = comment_tags
self.offset = 0
self.last_lineno = 0
def find_backwards(self, offset):
try:
for _, token_type, token_value in reversed(
self.tokens[self.offset : offset]
):
if token_type in ("comment", "linecomment"):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
continue
if prefix in self.comment_tags:
return [comment.rstrip()]
return []
finally:
self.offset = offset
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
def babel_extract(fileobj, keywords, comment_tags, options):
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceeding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
.. versionchanged:: 2.5.1
The `newstyle_gettext` flag can be set to `True` to enable newstyle
gettext calls.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions = set()
for extension in options.get("extensions", "").split(","):
extension = extension.strip()
if not extension:
continue
extensions.add(import_string(extension))
if InternationalizationExtension not in extensions:
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
options.get(key, str(default)).lower() in ("1", "on", "yes", "true")
environment = Environment(
options.get("block_start_string", BLOCK_START_STRING),
options.get("block_end_string", BLOCK_END_STRING),
options.get("variable_start_string", VARIABLE_START_STRING),
options.get("variable_end_string", VARIABLE_END_STRING),
options.get("comment_start_string", COMMENT_START_STRING),
options.get("comment_end_string", COMMENT_END_STRING),
options.get("line_statement_prefix") or LINE_STATEMENT_PREFIX,
options.get("line_comment_prefix") or LINE_COMMENT_PREFIX,
getbool(options, "trim_blocks", TRIM_BLOCKS),
NEWLINE_SEQUENCE,
frozenset(extensions),
cache_size=0,
auto_reload=False,
)
if getbool(options, "newstyle_gettext"):
environment.newstyle_gettext = True
source = fileobj.read().decode(options.get("encoding", "utf-8"))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError as e:
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno)
#: nicer import names
i18n = InternationalizationExtension
do = ExprStmtExtension
loopcontrols = LoopControlExtension
with_ = WithExtension
autoescape = AutoEscapeExtension
<|endoftext|> |
<|endoftext|># -*- coding: utf-8 -*-
"""
Jinja2
~~~~~~
Jinja2 is a template engine written in pure Python. It provides a
`Django`_ inspired non-XML syntax but supports inline expressions and
an optional `sandboxed`_ environment.
Nutshell
--------
Here a small example of a Jinja template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
Philosophy
----------
Application logic is for the controller but don't try to make the life
for the template designer too hard by giving him too few functionality.
For more informations visit the new `Jinja2 webpage`_ and `documentation`_.
.. _sandboxed: http://en.wikipedia.org/wiki/Sandbox_(computer_security)
.. _Django: http://www.djangoproject.com/
.. _Jinja2 webpage: http://jinja.pocoo.org/
.. _documentation: http://jinja.pocoo.org/2/documentation/
"""
import sys
from setuptools import setup, Extension, Feature
debugsupport = Feature(
"optional C debug support",
standard=False,
ext_modules=[
Extension("jinja2._debugsupport", ["jinja2/_debugsupport.c"]),
],
)
# tell distribute to use 2to3 with our own fixers.
extra = {}
if sys.version_info >= (3, 0):
extra.update(use_2to3=True, use_2to3_fixers=["custom_fixers"])
# ignore the old '--with-speedups' flag
try:
speedups_pos = sys.argv.index("--with-speedups")
except ValueError:
pass
else:
sys.argv[speedups_pos] = "--with-debugsupport"
sys.stderr.write("*" * 74 + "\n")
sys.stderr.write("WARNING:\n")
sys.stderr.write(
" the --with-speedups flag is deprecated, assuming " "--with-debugsupport\n"
)
sys.stderr.write(" For the actual speedups install the MarkupSafe " "package.\n")
sys.stderr.write("*" * 74 + "\n")
setup(
name="Jinja2",
version="2.6",
url="http://jinja.pocoo.org/",
license="BSD",
author="Armin Ronacher",
author_email="[email protected]",
description="A small but fast and easy to use stand-alone template "
"engine written in pure python.",
long_description=__doc__,
# jinja is egg safe. But we hate eggs
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup :: HTML",
],
packages=[
"jinja2",
"jinja2.testsuite",
"jinja2.testsuite.res",
"jinja2._markupsafe",
],
extras_require={"i18n": ["Babel>=0.8"]},
test_suite="jinja2.testsuite.suite",
include_package_data=True,
entry_points="""
[babel.extractors]
jinja2 = jinja2.ext:babel_extract[i18n]
""",
features={"debugsupport": debugsupport},
**extra
)
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "[email protected] (Rafe Kaplan)"
<|endoftext|> |
<|endoftext|>import os.path, sys, fnmatch
from distutils.command.build_py import build_py as _build_py
from distutils.util import convert_path
from glob import glob
class build_py(_build_py):
"""Enhanced 'build_py' command that includes data files with packages
The data files are specified via a 'package_data' argument to 'setup()'.
See 'setuptools.dist.Distribution' for more details.
Also, this version of the 'build_py' command allows you to specify both
'py_modules' and 'packages' in the same setup operation.
"""
def finalize_options(self):
_build_py.finalize_options(self)
self.package_data = self.distribution.package_data
self.exclude_package_data = self.distribution.exclude_package_data or {}
if "data_files" in self.__dict__:
del self.__dict__["data_files"]
def run(self):
"""Build modules, packages, and copy data files to build directory"""
if not self.py_modules and not self.packages:
return
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
# Only compile actual .py files, using our base class' idea of what our
# output files are.
self.byte_compile(_build_py.get_outputs(self, include_bytecode=0))
def __getattr__(self, attr):
if attr == "data_files": # lazily compute data files
self.data_files = files = self._get_data_files()
return files
return _build_py.__getattr__(self, attr)
def _get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
self.analyze_manifest()
data = []
for package in self.packages or ():
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split(".")))
# Length of path to strip from found files
plen = len(src_dir) + 1
# Strip directory from globbed filenames
filenames = [file[plen:] for file in self.find_data_files(package, src_dir)]
data.append((package, src_dir, build_dir, filenames))
return data
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
globs = self.package_data.get("", []) + self.package_data.get(package, [])
files = self.manifest_files.get(package, [])[:]
for pattern in globs:
# Each pattern has to be converted to a platform-specific path
files.extend(glob(os.path.join(src_dir, convert_path(pattern))))
return self.exclude_data_files(package, src_dir, files)
def build_package_data(self):
"""Copy data files into build directory"""
lastdir = None
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
self.copy_file(os.path.join(src_dir, filename), target)
def analyze_manifest(self):
self.manifest_files = mf = {}
if not self.distribution.include_package_data:
return
src_dirs = {}
for package in self.packages or ():
# Locate package source directory
src_dirs[assert_relative(self.get_package_dir(package))] = package
self.run_command("egg_info")
ei_cmd = self.get_finalized_command("egg_info")
for path in ei_cmd.filelist.files:
d, f = os.path.split(assert_relative(path))
prev = None
oldf = f
while d and d != prev and d not in src_dirs:
prev = d
d, df = os.path.split(d)
f = os.path.join(df, f)
if d in src_dirs:
if path.endswith(".py") and f == oldf:
continue # it's a module, not data
mf.setdefault(src_dirs[d], []).append(path)
def get_data_files(self):
pass # kludge 2.4 for lazy computation
if sys.version < "2.4": # Python 2.4 already has this code
def get_outputs(self, include_bytecode=1):
"""Return complete list of files copied to the build directory
This includes both '.py' files and data files, as well as '.pyc'
and '.pyo' files if 'include_bytecode' is true. (This method is
needed for the 'install_lib' command to do its job properly, and to
generate a correct installation manifest.)
"""
return _build_py.get_outputs(self, include_bytecode) + [
os.path.join(build_dir, filename)
for package, src_dir, build_dir, filenames in self.data_files
for filename in filenames
]
def check_package(self, package, package_dir):
"""Check namespace packages' __init__ for declare_namespace"""
try:
return self.packages_checked[package]
except KeyError:
pass
init_py = _build_py.check_package(self, package, package_dir)
self.packages_checked[package] = init_py
if not init_py or not self.distribution.namespace_packages:
return init_py
for pkg in self.distribution.namespace_packages:
if pkg == package or pkg.startswith(package + "."):
break
else:
return init_py
f = open(init_py, "rU")
if "declare_namespace" not in f.read():
from distutils import log
log.warn(
"WARNING: %s is a namespace package, but its __init__.py does\n"
"not declare_namespace(); setuptools 0.7 will REQUIRE this!\n"
'(See the setuptools manual under "Namespace Packages" for '
"details.)\n",
package,
)
f.close()
return init_py
def initialize_options(self):
self.packages_checked = {}
_build_py.initialize_options(self)
def exclude_data_files(self, package, src_dir, files):
"""Filter filenames for package's data files in 'src_dir'"""
globs = self.exclude_package_data.get("", []) + self.exclude_package_data.get(
package, []
)
bad = []
for pattern in globs:
bad.extend(
fnmatch.filter(files, os.path.join(src_dir, convert_path(pattern)))
)
bad = dict.fromkeys(bad)
seen = {}
return [
f
for f in files
if f not in bad and f not in seen and seen.setdefault(f, 1) # ditch dupes
]
def assert_relative(path):
if not os.path.isabs(path):
return path
from distutils.errors import DistutilsSetupError
raise DistutilsSetupError(
"""Error: setup script specifies an absolute path:
%s
setup() arguments must *always* be /-separated paths relative to the
setup.py directory, *never* absolute paths.
"""
% path
)
<|endoftext|> |
<|endoftext|>"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from simplejson.scanner import make_scanner
def _import_c_scanstring():
try:
from simplejson._speedups import scanstring
return scanstring
except ImportError:
return None
c_scanstring = _import_c_scanstring()
__all__ = ["JSONDecoder"]
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = "7FF80000000000007FF0000000000000".decode("hex")
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != "big":
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack("dd", _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
class JSONDecodeError(ValueError):
"""Subclass of ValueError with the following additional properties:
msg: The unformatted error message
doc: The JSON document being parsed
pos: The start index of doc where parsing failed
end: The end index of doc where parsing failed (may be None)
lineno: The line corresponding to pos
colno: The column corresponding to pos
endlineno: The line corresponding to end (may be None)
endcolno: The column corresponding to end (may be None)
"""
def __init__(self, msg, doc, pos, end=None):
ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
self.msg = msg
self.doc = doc
self.pos = pos
self.end = end
self.lineno, self.colno = linecol(doc, pos)
if end is not None:
self.endlineno, self.endcolno = linecol(doc, end)
else:
self.endlineno, self.endcolno = None, None
def linecol(doc, pos):
lineno = doc.count("\n", 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex("\n", 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
# fmt = '{0}: line {1} column {2} (char {3})'
# return fmt.format(msg, lineno, colno, pos)
fmt = "%s: line %d column %d (char %d)"
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
# fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
# return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = "%s: line %d column %d - line %d column %d (char %d - %d)"
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
"-Infinity": NegInf,
"Infinity": PosInf,
"NaN": NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': '"',
"\\": "\\",
"/": "/",
"b": "\b",
"f": "\f",
"n": "\n",
"r": "\r",
"t": "\t",
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(
s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match
):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError("Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, str):
content = str(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != "\\":
if strict:
msg = "Invalid control character %r at" % (terminator,)
# msg = "Invalid control character {0!r} at".format(terminator)
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError("Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != "u":
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
esc = s[end + 1 : end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise JSONDecodeError(msg, s, end)
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xD800 <= uni <= 0xDBFF and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5 : end + 7] == "\\u":
raise JSONDecodeError(msg, s, end)
esc2 = s[end + 7 : end + 11]
if len(esc2) != 4:
raise JSONDecodeError(msg, s, end)
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xD800) << 10) | (uni2 - 0xDC00))
next_end += 6
char = chr(uni)
end = next_end
# Append the unescaped character
_append(char)
return "".join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r"[ \t\n\r]*", FLAGS)
WHITESPACE_STR = " \t\n\r"
def JSONObject(
xxx_todo_changeme,
encoding,
strict,
scan_once,
object_hook,
object_pairs_hook,
memo=None,
_w=WHITESPACE.match,
_ws=WHITESPACE_STR,
):
# Backwards compatibility
(s, end) = xxx_todo_changeme
if memo is None:
memo = {}
memo_get = memo.setdefault
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end : end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end : end + 1]
# Trivial empty object
if nextchar == "}":
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end + 1
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end : end + 1] != ":":
end = _w(s, end).end()
if s[end : end + 1] != ":":
raise JSONDecodeError("Expecting : delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ""
end += 1
if nextchar == "}":
break
elif nextchar != ",":
raise JSONDecodeError("Expecting , delimiter", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ""
end += 1
if nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray(xxx_todo_changeme1, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
(s, end) = xxx_todo_changeme1
values = []
nextchar = s[end : end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end : end + 1]
# Look-ahead for trivial empty array
if nextchar == "]":
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
_append(value)
nextchar = s[end : end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end : end + 1]
end += 1
if nextchar == "]":
break
elif nextchar != ",":
raise JSONDecodeError("Expecting , delimiter", s, end)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(
self,
encoding=None,
object_hook=None,
parse_float=None,
parse_int=None,
parse_constant=None,
strict=True,
object_pairs_hook=None,
):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise JSONDecodeError("No JSON object could be decoded", s, idx)
return obj, end
<|endoftext|> |
<|endoftext|># -*- coding: utf-8 -*-
"""
webapp2_extras.securecookie
===========================
A serializer for signed cookies.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
import hashlib
import hmac
import logging
import time
from webapp2_extras import json
from webapp2_extras import security
class SecureCookieSerializer(object):
"""Serializes and deserializes secure cookie values.
Extracted from `Tornado`_ and modified.
"""
def __init__(self, secret_key):
"""Initiliazes the serializer/deserializer.
:param secret_key:
A random string to be used as the HMAC secret for the cookie
signature.
"""
self.secret_key = secret_key
def serialize(self, name, value):
"""Serializes a signed cookie value.
:param name:
Cookie name.
:param value:
Cookie value to be serialized.
:returns:
A serialized value ready to be stored in a cookie.
"""
timestamp = str(self._get_timestamp())
value = self._encode(value)
signature = self._get_signature(name, value, timestamp)
return "|".join([value, timestamp, signature])
def deserialize(self, name, value, max_age=None):
"""Deserializes a signed cookie value.
:param name:
Cookie name.
:param value:
A cookie value to be deserialized.
:param max_age:
Maximum age in seconds for a valid cookie. If the cookie is older
than this, returns None.
:returns:
The deserialized secure cookie, or None if it is not valid.
"""
if not value:
return None
parts = value.split("|")
if len(parts) != 3:
return None
signature = self._get_signature(name, parts[0], parts[1])
if not security.compare_hashes(parts[2], signature):
logging.warning("Invalid cookie signature %r", value)
return None
if max_age is not None:
if int(parts[1]) < self._get_timestamp() - max_age:
logging.warning("Expired cookie %r", value)
return None
try:
return self._decode(parts[0])
except Exception as e:
logging.warning("Cookie value failed to be decoded: %r", parts[0])
return None
def _encode(self, value):
return json.b64encode(value)
def _decode(self, value):
return json.b64decode(value)
def _get_timestamp(self):
return int(time.time())
def _get_signature(self, *parts):
"""Generates an HMAC signature."""
signature = hmac.new(self.secret_key, digestmod=hashlib.sha1)
signature.update("|".join(parts))
return signature.hexdigest()
<|endoftext|> |
<|endoftext|>import cgi, warnings
from webob.headers import _trans_key
def html_escape(s):
"""HTML-escape a string or object
This converts any non-string objects passed into it to strings
(actually, using ``unicode()``). All values returned are
non-unicode strings (using ``&#num;`` entities for all non-ASCII
characters).
None is treated specially, and returns the empty string.
"""
if s is None:
return ""
if hasattr(s, "__html__"):
return s.__html__()
if not isinstance(s, str):
if hasattr(s, "__unicode__"):
s = str(s)
else:
s = str(s)
s = cgi.escape(s, True)
if isinstance(s, str):
s = s.encode("ascii", "xmlcharrefreplace")
return s
def header_docstring(header, rfc_section):
if header.isupper():
header = _trans_key(header)
major_section = rfc_section.split(".")[0]
link = "http://www.w3.org/Protocols/rfc2616/rfc2616-sec%s.html#sec%s" % (
major_section,
rfc_section,
)
return "Gets and sets the ``%s`` header (`HTTP spec section %s <%s>`_)." % (
header,
rfc_section,
link,
)
def warn_deprecation(text, version, stacklevel):
# version specifies when to start raising exceptions instead of warnings
if version == "1.2":
cls = DeprecationWarning
elif version == "1.3":
cls = PendingDeprecationWarning
else:
cls = DeprecationWarning
warnings.warn(
"Unknown warn_deprecation version arg: %r" % version,
RuntimeWarning,
stacklevel=1,
)
warnings.warn(text, cls, stacklevel=stacklevel + 1)
status_reasons = {
# Status Codes
# Informational
100: "Continue",
101: "Switching Protocols",
102: "Processing",
# Successful
200: "OK",
201: "Created",
202: "Accepted",
203: "Non-Authoritative Information",
204: "No Content",
205: "Reset Content",
206: "Partial Content",
207: "Multi Status",
226: "IM Used",
# Redirection
300: "Multiple Choices",
301: "Moved Permanently",
302: "Found",
303: "See Other",
304: "Not Modified",
305: "Use Proxy",
307: "Temporary Redirect",
# Client Error
400: "Bad Request",
401: "Unauthorized",
402: "Payment Required",
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed",
406: "Not Acceptable",
407: "Proxy Authentication Required",
408: "Request Timeout",
409: "Conflict",
410: "Gone",
411: "Length Required",
412: "Precondition Failed",
413: "Request Entity Too Large",
414: "Request URI Too Long",
415: "Unsupported Media Type",
416: "Requested Range Not Satisfiable",
417: "Expectation Failed",
422: "Unprocessable Entity",
423: "Locked",
424: "Failed Dependency",
426: "Upgrade Required",
# Server Error
500: "Internal Server Error",
501: "Not Implemented",
502: "Bad Gateway",
503: "Service Unavailable",
504: "Gateway Timeout",
505: "HTTP Version Not Supported",
507: "Insufficient Storage",
510: "Not Extended",
}
<|endoftext|> |
<|endoftext|>"""
Contains some data structures.
"""
from webob.util.dictmixin import DictMixin
class EnvironHeaders(DictMixin):
"""An object that represents the headers as present in a
WSGI environment.
This object is a wrapper (with no internal state) for a WSGI
request object, representing the CGI-style HTTP_* keys as a
dictionary. Because a CGI environment can only hold one value for
each key, this dictionary is single-valued (unlike outgoing
headers).
"""
def __init__(self, environ):
self.environ = environ
def _trans_name(self, name):
key = "HTTP_" + name.replace("-", "_").upper()
if key == "HTTP_CONTENT_LENGTH":
key = "CONTENT_LENGTH"
elif key == "HTTP_CONTENT_TYPE":
key = "CONTENT_TYPE"
return key
def _trans_key(self, key):
if key == "CONTENT_TYPE":
return "Content-Type"
elif key == "CONTENT_LENGTH":
return "Content-Length"
elif key.startswith("HTTP_"):
return key[5:].replace("_", "-").title()
else:
return None
def __getitem__(self, item):
return self.environ[self._trans_name(item)]
def __setitem__(self, item, value):
self.environ[self._trans_name(item)] = value
def __delitem__(self, item):
del self.environ[self._trans_name(item)]
def __iter__(self):
for key in self.environ:
name = self._trans_key(key)
if name is not None:
yield name
def keys(self):
return list(iter(self))
def __contains__(self, item):
return self._trans_name(item) in self.environ
<|endoftext|> |
<|endoftext|>class Token(object):
def __init__(self, start_mark, end_mark):
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
attributes = [key for key in self.__dict__ if not key.endswith("_mark")]
attributes.sort()
arguments = ", ".join(
["%s=%r" % (key, getattr(self, key)) for key in attributes]
)
return "%s(%s)" % (self.__class__.__name__, arguments)
# class BOMToken(Token):
# id = '<byte order mark>'
class DirectiveToken(Token):
id = "<directive>"
def __init__(self, name, value, start_mark, end_mark):
self.name = name
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class DocumentStartToken(Token):
id = "<document start>"
class DocumentEndToken(Token):
id = "<document end>"
class StreamStartToken(Token):
id = "<stream start>"
def __init__(self, start_mark=None, end_mark=None, encoding=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.encoding = encoding
class StreamEndToken(Token):
id = "<stream end>"
class BlockSequenceStartToken(Token):
id = "<block sequence start>"
class BlockMappingStartToken(Token):
id = "<block mapping start>"
class BlockEndToken(Token):
id = "<block end>"
class FlowSequenceStartToken(Token):
id = "["
class FlowMappingStartToken(Token):
id = "{"
class FlowSequenceEndToken(Token):
id = "]"
class FlowMappingEndToken(Token):
id = "}"
class KeyToken(Token):
id = "?"
class ValueToken(Token):
id = ":"
class BlockEntryToken(Token):
id = "-"
class FlowEntryToken(Token):
id = ","
class AliasToken(Token):
id = "<alias>"
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class AnchorToken(Token):
id = "<anchor>"
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class TagToken(Token):
id = "<tag>"
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
class ScalarToken(Token):
id = "<scalar>"
def __init__(self, value, plain, start_mark, end_mark, style=None):
self.value = value
self.plain = plain
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
import http.client
import json
import os
import socket
import sys
import unittest
import urllib.request, urllib.error, urllib.parse
sys.path.append(os.path.join(os.path.dirname(__file__), "../../../lib"))
import file_io
FILE_LOC = "/var/apps/test_app/app/queue.yaml"
def create_test_yaml():
file_loc = FILE_LOC
config = """
queue:
- name: default
rate: 5/s
- name: foo
rate: 10/m
"""
try:
os.mkdir("/var/apps/test_app")
os.mkdir("/var/apps/test_app/app/")
except OSError:
pass
FILE = file_io.write(file_loc, config)
# AppScale must already be running with RabbitMQ
class TestTaskQueueServer(unittest.TestCase):
def test_slave(self):
create_test_yaml()
values = {"app_id": "test_app"}
host = socket.gethostbyname(socket.gethostname())
req = urllib.request.Request("http://" + host + ":64839/startworker")
req.add_header("Content-Type", "application/json")
response = urllib.request.urlopen(req, json.dumps(values))
print(response.read())
self.assertEqual(response.getcode(), 200)
if __name__ == "__main__":
unittest.main()
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
import os
import queue
import sys
import threading
import tornado.httpclient
import unittest
from flexmock import flexmock
sys.path.append(os.path.join(os.path.dirname(__file__), "../../"))
import helper
import hermes_constants
from custom_hermes_exceptions import MissingRequestArgs
sys.path.append(os.path.join(os.path.dirname(__file__), "../../../lib"))
import appscale_info
sys.path.append(os.path.join(os.path.dirname(__file__), "../../../AppServer"))
from google.appengine.api.appcontroller_client import AppControllerClient
class FakeAppControllerClient:
def __init__(self, registered):
self.registered = registered
def deployment_id_exists(self):
return self.registered
def get_deployment_id(self):
return "fake_id"
class FakeAsyncClient:
def fetch(self):
pass
class FakeClient:
def fetch(self):
pass
class FakeLock:
def __init__(self, wrapped_class):
pass
def acquire(self):
pass
def release(self):
pass
class FakeRequest:
def __init__(self):
self.url = fake_url
self.body = fake_data
class FakeResponse:
def __init__(self, request, code):
self.request = request
self.code = code
fake_url = "http://some.url"
fake_data = "some data"
fake_node_info = [
{"host": fake_url, "role": "db_master", "index": None},
{"host": fake_url, "role": "zk", "index": 0},
]
class TestHelper(unittest.TestCase):
"""A set of test cases for Hermes helper functions."""
def test_create_request(self):
# Test with no args.
self.assertRaises(MissingRequestArgs, helper.create_request)
# Test GET.
self.assertIsNotNone(helper.create_request, ["some url", "some method"])
# Test POST.
self.assertIsNotNone(
helper.create_request, ["some url", "some method", "some data"]
)
def test_urlfetch(self):
fake_request = FakeRequest()
fake_response = FakeResponse(fake_request, 200)
fake_client = flexmock(tornado.httpclient.HTTPClient())
fake_client.should_receive("fetch").and_return(fake_response)
self.assertIsNotNone(helper.urlfetch, fake_request)
def test_urlfetch_async(self):
fake_request = FakeRequest()
fake_response = FakeResponse(fake_request, 200)
fake_client = flexmock(tornado.httpclient.AsyncHTTPClient())
fake_client.should_receive("fetch").and_return(fake_response)
self.assertIsNotNone(helper.urlfetch, fake_request)
def test_get_br_service_url(self):
fake_url = "http://host:{0}{1}".format(
hermes_constants.BR_SERVICE_PORT, hermes_constants.BR_SERVICE_PATH
)
self.assertEqual(fake_url, helper.get_br_service_url("host"))
def test_get_deployment_id(self):
# Test with a registered AppScale deployment.
fake_acc = FakeAppControllerClient(True)
flexmock(appscale_info).should_receive("get_appcontroller_client").and_return(
fake_acc
)
flexmock(AppControllerClient).should_receive("deployment_id_exists").and_return(
True
)
flexmock(AppControllerClient).should_receive("get_deployment_id").and_return(
"fake_id"
)
self.assertEqual("fake_id", helper.get_deployment_id())
# Test with an AppScale deployment that's not registered.
fake_acc = FakeAppControllerClient(False)
flexmock(appscale_info).should_receive("get_appcontroller_client").and_return(
fake_acc
)
flexmock(AppControllerClient).should_receive("deployment_id_exists").and_return(
False
)
self.assertIsNone(helper.get_deployment_id())
def test_get_node_info(self):
flexmock(appscale_info).should_receive("get_db_master_ip").and_return("foo")
flexmock(appscale_info).should_receive("get_db_slave_ips").and_return(["bar"])
flexmock(appscale_info).should_receive("get_zk_node_ips").and_return(["baz"])
flexmock(helper).should_receive("get_br_service_url").and_return(
"http://some.url"
).at_least().times(2)
self.assertEqual(fake_node_info, helper.get_node_info())
def test_create_br_json_data(self):
pass
def test_delete_task_from_mem(self):
flexmock(FakeLock(threading.Lock())).should_receive("acquire").and_return()
flexmock(FakeLock(threading.Lock())).should_receive("release").and_return()
helper.delete_task_from_mem("foo")
def test_report_status(self):
pass
def test_send_remote_request(self):
flexmock(queue.Queue).should_receive("put").and_return().at_least().times(1)
flexmock(helper).should_receive("urlfetch").and_return("qux").at_least().times(
1
)
helper.send_remote_request(FakeRequest(), queue.Queue())
if __name__ == "__main__":
unittest.main()
<|endoftext|> |
<|endoftext|>""" Top level server for the Search API. """
from search_api import SearchService
import logging
import tornado.httpserver
import tornado.httputil
import tornado.ioloop
import tornado.web
import time
# Default port for the search API web server.
DEFAULT_PORT = 53423
class MainHandler(tornado.web.RequestHandler):
"""Main handler class."""
def initialize(self, search_service):
"""Class for initializing search service web handler."""
self.search_service = search_service
@tornado.web.asynchronous
def post(self):
"""A POST handler for request to this server."""
request = self.request
http_request_data = request.body
pb_type = request.headers["protocolbuffertype"]
if pb_type == "Request":
response = self.search_service.remote_request(http_request_data)
else:
response = self.search_service.unknown_request(pb_type)
request.connection.write_headers(
tornado.httputil.ResponseStartLine("HTTP/1.1", 200, "OK"),
tornado.httputil.HTTPHeaders({"Content-Length": str(len(response))}),
)
request.connection.write(response)
request.connection.finish()
def get_application():
"""Retrieves the application to feed into tornado."""
return tornado.web.Application(
[
(r"/?", MainHandler, dict(search_service=SearchService())),
],
)
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
logging.info("Starting server on port {0}".format(DEFAULT_PORT))
http_server = tornado.httpserver.HTTPServer(get_application())
http_server.bind(DEFAULT_PORT)
http_server.start(0)
tornado.ioloop.IOLoop.instance().start()
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
#
# This script is used by AppScale with monit: it allows to stop services
# controlled by monit.
from os.path import basename
import subprocess
import sys
# Make sure we have the right number of arguments
if len(sys.argv) != 3:
print(sys.argv[0] + ": needs 2 arguments the process, and an identifier.")
sys.exit(1)
process = sys.argv[1]
identifier = sys.argv[2]
# Get all the lines matching the arguments. We print the group PID and the
# arguments, so that killing the group PID allow to terminate also the
# related processes.
ps_output = subprocess.check_output(["/bin/ps", "-eo", "pgid,args"]).split("\n")
for line in ps_output:
if basename(sys.argv[0]) in line:
continue
if identifier in line and process in line:
pgid = "-" + line.split()[0]
print("Found a match: " + line)
subprocess.call(["/bin/kill", "-SIGKILL", pgid])
sys.exit(0)
print("Didn't find a match.")
sys.exit(2)
<|endoftext|> |
<|endoftext|>"""
Created on Apr 21, 2011
@author: Mark V Systems Limited
(c) Copyright 2011 Mark V Systems Limited, All rights reserved.
"""
import xml.dom, math, re
from arelle.ModelValue import qname
from arelle import XPathContext, XbrlUtil
from arelle.ModelInstanceObject import ModelDimensionValue
from decimal import Decimal
class fnFunctionNotAvailable(Exception):
def __init__(self):
self.args = ("custom function not available",)
def __repr__(self):
return self.args[0]
def call(xc, p, qname, contextItem, args):
try:
cfSig = xc.modelXbrl.modelCustomFunctionSignatures[qname, len(args)]
if cfSig is not None and cfSig.customFunctionImplementation is not None:
return callCfi(xc, p, qname, cfSig, contextItem, args)
elif qname in xc.customFunctions: # plug in method custom functions
return xc.customFunctions[qname](
xc, p, contextItem, args
) # use plug-in's method
elif qname not in customFunctions: # compiled functions in this module
raise fnFunctionNotAvailable
return customFunctions[qname](xc, p, contextItem, args)
except (fnFunctionNotAvailable, KeyError):
raise XPathContext.FunctionNotAvailable(
"custom function:{0}".format(str(qname))
)
def callCfi(xc, p, qname, cfSig, contextItem, args):
if len(args) != len(cfSig.inputTypes):
raise XPathContext.FunctionNumArgs()
cfi = cfSig.customFunctionImplementation
overriddenInScopeVars = {}
traceSource = xc.formulaOptions.traceSource(xc.traceType)
traceEvaluation = xc.formulaOptions.traceEvaluation(xc.traceType)
inputNames = cfi.inputNames
for i, argName in enumerate(inputNames):
if argName in xc.inScopeVars:
overriddenInScopeVars[argName] = xc.inScopeVars[argName]
xc.inScopeVars[argName] = args[i]
if traceEvaluation:
xc.modelXbrl.info(
"formula:trace",
_("%(cfi)s(%(arguments)s)"),
modelObject=cfi,
cfi=qname,
arguments=", ".join(
"{}={}".format(argName, args[i]) for i, argName in enumerate(inputNames)
),
)
for i, step in enumerate(cfi.stepExpressions):
stepQname, stepExpression = step
stepProg = cfi.stepProgs[i]
if traceSource:
xc.modelXbrl.info(
"formula:trace",
_("%(cfi)s step %(step)s \nExpression: \n%(expression)s"),
modelObject=cfi,
cfi=qname,
step=stepQname,
expression=stepExpression,
)
result = xc.evaluate(stepProg)
if traceEvaluation:
xc.modelXbrl.info(
"formula:trace",
_("%(cfi)s step %(step)s \nResult: \n%(expression)s"),
modelObject=cfi,
cfi=qname,
step=stepQname,
expression=result,
)
if stepQname in xc.inScopeVars:
overriddenInScopeVars[stepQname] = xc.inScopeVars[stepQname]
xc.inScopeVars[stepQname] = result
if traceSource:
xc.modelXbrl.info(
"formula:trace",
_("%(cfi)s output \nExpression: \n%(expression)s"),
modelObject=cfi,
cfi=qname,
expression=cfi.outputExpression,
)
result = xc.evaluateAtomicValue(cfi.outputProg, cfSig.outputType)
if traceEvaluation:
xc.modelXbrl.info(
"formula:trace",
_("%(cfi)s output \nResult: \n%(expression)s"),
modelObject=cfi,
cfi=qname,
expression=result,
)
for step in cfi.stepExpressions:
stepQname = step[0]
if stepQname in overriddenInScopeVars:
xc.inScopeVars[stepQname] = overriddenInScopeVars[stepQname]
for i, argName in enumerate(inputNames):
if argName in overriddenInScopeVars:
xc.inScopeVars[argName] = overriddenInScopeVars[argName]
else:
del xc.inScopeVars[argName]
if result is None: # atomic value failed the result cast expression
raise XPathContext.FunctionArgType("output", cfSig.outputType, result)
return result
# for test case 22015 v01
def my_fn_PDxEV(xc, p, contextItem, args):
if len(args) != 2:
raise XPathContext.FunctionNumArgs()
PDseq = args[0] if isinstance(args[0], (list, tuple)) else (args[0],)
EVseq = args[1] if isinstance(args[1], (list, tuple)) else (args[1],)
dimQname = qname("{http://www.example.com/wgt-avg}ExposuresDimension")
PDxEV = []
for pd in PDseq:
if pd.context is not None:
pdDim = pd.context.dimValue(dimQname)
for ev in EVseq:
if ev.context is not None:
evDim = ev.context.dimValue(dimQname)
if pdDim is not None and isinstance(pdDim, ModelDimensionValue):
dimEqual = pdDim.isEqualTo(evDim, equalMode=XbrlUtil.S_EQUAL2)
elif evDim is not None and isinstance(evDim, ModelDimensionValue):
dimEqual = evDim.isEqualTo(pdDim, equalMode=XbrlUtil.S_EQUAL2)
else:
dimEqual = pdDim == evDim
if dimEqual:
pdX = pd.xValue
evX = ev.xValue
# type promotion required
if isinstance(pdX, Decimal) and isinstance(evX, float):
pdX = float(pdX)
elif isinstance(evX, Decimal) and isinstance(pdX, float):
pdX = float(evX)
PDxEV.append(pdX * evX)
break
return PDxEV
customFunctions = {
qname("{http://www.example.com/wgt-avg/function}my-fn:PDxEV"): my_fn_PDxEV
}
<|endoftext|> |
<|endoftext|>"""
Created on Oct 3, 2010
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
"""
from collections import defaultdict
import os, sys, traceback, uuid
import logging
from decimal import Decimal
from arelle import UrlUtil, XmlUtil, ModelValue, XbrlConst, XmlValidate
from arelle.FileSource import FileNamedStringIO
from arelle.ModelObject import ModelObject, ObjectPropertyViewWrapper
from arelle.Locale import format_string
from arelle.PluginManager import pluginClassMethods
from arelle.PrototypeInstanceObject import FactPrototype, DimValuePrototype
from arelle.PythonUtil import flattenSequence
from arelle.UrlUtil import isHttpUrl
from arelle.ValidateXbrlDimensions import isFactDimensionallyValid
ModelRelationshipSet = None # dynamic import
ModelFact = None
profileStatNumber = 0
AUTO_LOCATE_ELEMENT = "771407c0-1d0c-11e1-be5e-028037ec0200" # singleton meaning choose best location for new element
DEFAULT = sys.intern(_STR_8BIT("default"))
NONDEFAULT = sys.intern(_STR_8BIT("non-default"))
DEFAULTorNONDEFAULT = sys.intern(_STR_8BIT("default-or-non-default"))
def load(
modelManager,
url,
nextaction=None,
base=None,
useFileSource=None,
errorCaptureLevel=None,
**kwargs
):
"""Each loaded instance, DTS, testcase, testsuite, versioning report, or RSS feed, is represented by an
instance of a ModelXbrl object. The ModelXbrl object has a collection of ModelDocument objects, each
representing an XML document (for now, with SQL whenever its time comes). One of the modelDocuments of
the ModelXbrl is the entry point (of discovery or of the test suite).
:param url: may be a filename or FileSource object
:type url: str or FileSource
:param nextaction: text to use as status line prompt on conclusion of loading and discovery
:type nextaction: str
:param base: the base URL if any (such as a versioning report's URL when loading to/from DTS modelXbrl).
:type base: str
:param useFileSource: for internal use (when an entry point is in a FileSource archive and discovered files expected to also be in the entry point's archive.
:type useFileSource: bool
:returns: ModelXbrl -- a new modelXbrl, performing DTS discovery for instance, inline XBRL, schema, linkbase, and versioning report entry urls
"""
if nextaction is None:
nextaction = _("loading")
from arelle import ModelDocument, FileSource
modelXbrl = create(modelManager, errorCaptureLevel=errorCaptureLevel)
if useFileSource is not None:
modelXbrl.fileSource = useFileSource
modelXbrl.closeFileSource = False
url = url
elif isinstance(url, FileSource.FileSource):
modelXbrl.fileSource = url
modelXbrl.closeFileSource = True
url = modelXbrl.fileSource.url
else:
modelXbrl.fileSource = FileSource.FileSource(url, modelManager.cntlr)
modelXbrl.closeFileSource = True
modelXbrl.modelDocument = ModelDocument.load(
modelXbrl, url, base, isEntry=True, **kwargs
)
del modelXbrl.entryLoadingUrl
loadSchemalocatedSchemas(modelXbrl)
# from arelle import XmlValidate
# uncomment for trial use of lxml xml schema validation of entry document
# XmlValidate.xmlValidate(modelXbrl.modelDocument)
modelManager.cntlr.webCache.saveUrlCheckTimes()
modelManager.showStatus(_("xbrl loading finished, {0}...").format(nextaction))
return modelXbrl
def create(
modelManager,
newDocumentType=None,
url=None,
schemaRefs=None,
createModelDocument=True,
isEntry=False,
errorCaptureLevel=None,
initialXml=None,
initialComment=None,
base=None,
discover=True,
):
from arelle import ModelDocument, FileSource
modelXbrl = ModelXbrl(modelManager, errorCaptureLevel=errorCaptureLevel)
modelXbrl.locale = modelManager.locale
if newDocumentType:
modelXbrl.fileSource = FileSource.FileSource(
url, modelManager.cntlr
) # url may be an open file handle, use str(url) below
modelXbrl.closeFileSource = True
if createModelDocument:
modelXbrl.modelDocument = ModelDocument.create(
modelXbrl,
newDocumentType,
str(url),
schemaRefs=schemaRefs,
isEntry=isEntry,
initialXml=initialXml,
initialComment=initialComment,
base=base,
discover=discover,
)
if isEntry:
del modelXbrl.entryLoadingUrl
loadSchemalocatedSchemas(modelXbrl)
return modelXbrl
def loadSchemalocatedSchemas(modelXbrl):
from arelle import ModelDocument
if (
modelXbrl.modelDocument is not None
and modelXbrl.modelDocument.type < ModelDocument.Type.DTSENTRIES
):
# at this point DTS is fully discovered but schemaLocated xsd's are not yet loaded
modelDocumentsSchemaLocated = set()
while True: # need this logic because each new pass may add new urlDocs
modelDocuments = (
set(modelXbrl.urlDocs.values()) - modelDocumentsSchemaLocated
)
if not modelDocuments:
break
modelDocument = modelDocuments.pop()
modelDocumentsSchemaLocated.add(modelDocument)
modelDocument.loadSchemalocatedSchemas()
class ModelXbrl:
"""
.. class:: ModelXbrl(modelManager)
ModelXbrl objects represent loaded instances and inline XBRL instances and their DTSes, DTSes
(without instances), versioning reports, testcase indexes, testcase variation documents, and
other document-centric loadable objects.
:param modelManager: The controller's modelManager object for the current session or command line process.
:type modelManager: ModelManager
.. attribute:: urlDocs
Dict, by URL, of loaded modelDocuments
.. attribute:: errorCaptureLevel
Minimum logging level to capture in errors list (default is INCONSISTENCY)
.. attribute:: errors
Captured error codes (at or over minimum error capture logging level) and assertion results, which were sent to logger, via log() methods, used for validation and post-processing
.. attribute:: logErrorCount, logWarningCoutn, logInfoCount
Counts of respective error levels processed by modelXbrl logger
.. attribute:: arcroleTypes
Dict by arcrole of defining modelObjects
.. attribute:: roleTypes
Dict by role of defining modelObjects
.. attribute:: qnameConcepts
Dict by qname (QName) of all top level schema elements, regardless of whether discovered or not discoverable (not in DTS)
.. attribute:: qnameAttributes
Dict by qname of all top level schema attributes
.. attribute:: qnameAttributeGroups
Dict by qname of all top level schema attribute groups
.. attribute:: qnameTypes
Dict by qname of all top level and anonymous types
.. attribute:: baseSets
Dict of base sets by (arcrole, linkrole, arc qname, link qname), (arcrole, linkrole, *, *), (arcrole, *, *, *), and in addition, collectively for dimensions, formula, and rendering, as arcroles 'XBRL-dimensions', 'XBRL-formula', and 'Table-rendering'.
.. attribute:: relationshipSets
Dict of effective relationship sets indexed same as baseSets (including collective indices), but lazily resolved when requested.
.. attribute:: qnameDimensionDefaults
Dict of dimension defaults by qname of dimension
.. attribute:: facts
List of top level facts (not nested in tuples), document order
.. attribute:: factsInInstance
List of all facts in instance (including nested in tuples), document order
.. attribute:: contexts
Dict of contexts by id
.. attribute:: units
Dict of units by id
.. attribute:: modelObjects
Model objects in loaded order, allowing object access by ordinal index (for situations, such as tkinter, where a reference to an object would create a memory freeing difficulty).
.. attribute:: qnameParameters
Dict of formula parameters by their qname
.. attribute:: modelVariableSets
Set of variableSets in formula linkbases
.. attribute:: modelCustomFunctionSignatures
Dict of custom function signatures by qname and by qname,arity
.. attribute:: modelCustomFunctionImplementations
Dict of custom function implementations by qname
.. attribute:: views
List of view objects
.. attribute:: langs
Set of langs in use by modelXbrl
.. attribute:: labelRoles
Set of label roles in use by modelXbrl's linkbases
.. attribute:: hasXDT
True if dimensions discovered
.. attribute:: hasTableRendering
True if table rendering discovered
.. attribute:: hasTableIndexing
True if table indexing discovered
.. attribute:: hasFormulae
True if formulae discovered
.. attribute:: formulaOutputInstance
Standard output instance if formulae produce one.
.. attribute:: hasRendering
True if rendering tables are discovered
.. attribute:: Log
Logger for modelXbrl
"""
def __init__(self, modelManager, errorCaptureLevel=None):
self.modelManager = modelManager
self.skipDTS = modelManager.skipDTS
self.init(errorCaptureLevel=errorCaptureLevel)
def init(self, keepViews=False, errorCaptureLevel=None):
self.uuid = uuid.uuid1().urn
self.namespaceDocs = defaultdict(list)
self.urlDocs = {}
self.urlUnloadableDocs = (
{}
) # if entry is True, entry is blocked and unloadable, False means loadable but warned
self.errorCaptureLevel = errorCaptureLevel or logging._checkLevel(
"INCONSISTENCY"
)
self.errors = []
self.logCount = {}
self.arcroleTypes = defaultdict(list)
self.roleTypes = defaultdict(list)
self.qnameConcepts = {} # indexed by qname of element
self.nameConcepts = defaultdict(list) # contains ModelConcepts by name
self.qnameAttributes = {}
self.qnameAttributeGroups = {}
self.qnameGroupDefinitions = {}
self.qnameTypes = {} # contains ModelTypes by qname key of type
self.baseSets = defaultdict(
list
) # contains ModelLinks for keys arcrole, arcrole#linkrole
self.relationshipSets = {} # contains ModelRelationshipSets by bas set keys
self.qnameDimensionDefaults = (
{}
) # contains qname of dimension (index) and default member(value)
self.facts = []
self.factsInInstance = set()
self.undefinedFacts = [] # elements presumed to be facts but not defined
self.contexts = {}
self.units = {}
self.modelObjects = []
self.qnameParameters = {}
self.modelVariableSets = set()
self.modelCustomFunctionSignatures = {}
self.modelCustomFunctionImplementations = set()
self.modelRenderingTables = set()
if not keepViews:
self.views = []
self.langs = {self.modelManager.defaultLang}
from arelle.XbrlConst import standardLabel
self.labelroles = {standardLabel}
self.hasXDT = False
self.hasTableRendering = False
self.hasTableIndexing = False
self.hasFormulae = False
self.formulaOutputInstance = None
self.logger = logging.getLogger("arelle")
self.logRefObjectProperties = getattr(
self.logger, "logRefObjectProperties", False
)
self.logRefHasPluginAttrs = any(
True for m in pluginClassMethods("Logging.Ref.Attributes")
)
self.logRefHasPluginProperties = any(
True for m in pluginClassMethods("Logging.Ref.Properties")
)
self.profileStats = {}
self.schemaDocsToValidate = set()
self.modelXbrl = self # for consistency in addressing modelXbrl
self.arelleUnitTests = (
{}
) # unit test entries (usually from processing instructions
for pluginXbrlMethod in pluginClassMethods("ModelXbrl.Init"):
pluginXbrlMethod(self)
def close(self):
"""Closes any views, formula output instances, modelDocument(s), and dereferences all memory used"""
if not self.isClosed:
self.closeViews()
if self.formulaOutputInstance:
self.formulaOutputInstance.close()
if hasattr(self, "fileSource") and self.closeFileSource:
self.fileSource.close()
modelDocument = getattr(self, "modelDocument", None)
urlDocs = getattr(self, "urlDocs", None)
for relSet in list(self.relationshipSets.values()):
relSet.clear()
self.__dict__.clear() # dereference everything before closing document
if modelDocument:
modelDocument.close(urlDocs=urlDocs)
@property
def isClosed(self):
"""
:returns: bool -- True if closed (python object has deferenced and deleted all attributes after closing)
"""
return not bool(self.__dict__) # closed when dict is empty
def reload(self, nextaction, reloadCache=False):
"""Reloads all model objects from their original entry point URL, preserving any open views (which are reloaded).
:param nextAction: status line text string, if any, to show upon completion
:type nextAction: str
:param reloadCache: True to force clearing and reloading of web cache, if working online.
:param reloadCache: bool
"""
from arelle import ModelDocument
self.init(keepViews=True)
self.modelDocument = ModelDocument.load(
self, self.fileSource.url, isEntry=True, reloadCache=reloadCache
)
self.modelManager.showStatus(
_("xbrl loading finished, {0}...").format(nextaction), 5000
)
self.modelManager.reloadViews(self)
def closeViews(self):
"""Close views associated with this modelXbrl"""
if not self.isClosed:
for view in range(len(self.views)):
if len(self.views) > 0:
self.views[0].close()
def relationshipSet(
self,
arcrole,
linkrole=None,
linkqname=None,
arcqname=None,
includeProhibits=False,
):
"""Returns a relationship set matching specified parameters (only arcrole is required).
Resolve and determine relationship set. If a relationship set of the same parameters was previously resolved, it is returned from a cache.
:param arcrole: Required arcrole, or special collective arcroles 'XBRL-dimensions', 'XBRL-formula', and 'Table-rendering'
:type arcrole: str
:param linkrole: Linkrole (wild if None)
:type linkrole: str
:param arcqname: Arc element qname (wild if None)
:type arcqname: QName
:param includeProhibits: True to include prohibiting arc elements as relationships
:type includeProhibits: bool
:returns: [ModelRelationship] -- Ordered list of effective relationship objects per parameters
"""
global ModelRelationshipSet
if ModelRelationshipSet is None:
from arelle import ModelRelationshipSet
key = (arcrole, linkrole, linkqname, arcqname, includeProhibits)
if key not in self.relationshipSets:
ModelRelationshipSet.create(
self, arcrole, linkrole, linkqname, arcqname, includeProhibits
)
return self.relationshipSets[key]
def baseSetModelLink(self, linkElement):
for modelLink in self.baseSets[("XBRL-footnotes", None, None, None)]:
if modelLink == linkElement:
return modelLink
return None
def roleTypeDefinition(self, roleURI):
modelRoles = self.roleTypes.get(roleURI, ())
if modelRoles:
return modelRoles[0].definition or roleURI
return roleURI
def roleTypeName(self, roleURI):
# authority-specific role type name
for pluginXbrlMethod in pluginClassMethods("ModelXbrl.RoleTypeName"):
_roleTypeName = pluginXbrlMethod(self, roleURI)
if _roleTypeName:
return _roleTypeName
return self.roleTypeDefinition(roleURI)
def matchSubstitutionGroup(self, elementQname, subsGrpMatchTable):
"""Resolve a subsitutionGroup for the elementQname from the match table
Used by ModelObjectFactory to return Class type for new ModelObject subclass creation, and isInSubstitutionGroup
:param elementQname: Element/Concept QName to find substitution group
:type elementQname: QName
:param subsGrpMatchTable: Table of substitutions used to determine xml proxy object class for xml elements and substitution group membership
:type subsGrpMatchTable: dict
:returns: object -- value matching subsGrpMatchTable key
"""
if elementQname in subsGrpMatchTable:
return subsGrpMatchTable[elementQname] # head of substitution group
elementMdlObj = self.qnameConcepts.get(elementQname)
if elementMdlObj is not None:
subsGrpMdlObj = elementMdlObj.substitutionGroup
while subsGrpMdlObj is not None:
subsGrpQname = subsGrpMdlObj.qname
if subsGrpQname in subsGrpMatchTable:
return subsGrpMatchTable[subsGrpQname]
subsGrpMdlObj = subsGrpMdlObj.substitutionGroup
return subsGrpMatchTable.get(None)
def isInSubstitutionGroup(self, elementQname, subsGrpQnames):
"""Determine if element is in substitution group(s)
Used by ModelObjectFactory to return Class type for new ModelObject subclass creation, and isInSubstitutionGroup
:param elementQname: Element/Concept QName to determine if in substitution group(s)
:type elementQname: QName
:param subsGrpQnames: QName or list of QNames
:type subsGrpMatchTable: QName or [QName]
:returns: bool -- True if element is in any substitution group
"""
return self.matchSubstitutionGroup(
elementQname,
{
qn: (qn is not None)
for qn in (
subsGrpQnames
if hasattr(subsGrpQnames, "__iter__")
else (subsGrpQnames,)
)
+ (None,)
},
)
def createInstance(self, url=None):
"""Creates an instance document for a DTS which didn't have an instance document, such as
to create a new instance for a DTS which was loaded from a taxonomy or linkbase entry point.
:param url: File name to save the new instance document
:type url: str
"""
from arelle import ModelDocument, FileSource
if self.modelDocument.type == ModelDocument.Type.INSTANCE:
# entry already is an instance, delete facts etc.
del self.facts[:]
self.factsInInstance.clear()
del self.undefinedFacts[:]
self.contexts.clear()
self.units.clear()
self.modelDocument.idObjects.clear
del self.modelDocument.hrefObjects[:]
self.modelDocument.schemaLocationElements.clear()
self.modelDocument.referencedNamespaces.clear()
for child in list(self.modelDocument.xmlRootElement):
if not (
isinstance(child, ModelObject)
and child.namespaceURI == XbrlConst.link
and child.localName.endswith("Ref")
): # remove contexts, facts, footnotes
self.modelDocument.xmlRootElement.remove(child)
else:
priorFileSource = self.fileSource
self.fileSource = FileSource.FileSource(url, self.modelManager.cntlr)
if isHttpUrl(self.uri):
schemaRefUri = self.uri
else: # relativize local paths
schemaRefUri = os.path.relpath(self.uri, os.path.dirname(url))
self.modelDocument = ModelDocument.create(
self,
ModelDocument.Type.INSTANCE,
url,
schemaRefs=[schemaRefUri],
isEntry=True,
)
if priorFileSource:
priorFileSource.close()
self.closeFileSource = True
del self.entryLoadingUrl
# reload dts views
from arelle import ViewWinDTS
for view in self.views:
if isinstance(view, ViewWinDTS.ViewDTS):
self.modelManager.cntlr.uiThreadQueue.put((view.view, []))
def saveInstance(self, **kwargs):
"""Saves current instance document file.
:param overrideFilepath: specify to override saving in instance's modelDocument.filepath
"""
self.modelDocument.save(**kwargs)
@property
def prefixedNamespaces(self):
"""Dict of prefixes for namespaces defined in DTS"""
prefixedNamespaces = {}
for nsDocs in list(self.namespaceDocs.values()):
for nsDoc in nsDocs:
ns = nsDoc.targetNamespace
if ns:
prefix = XmlUtil.xmlnsprefix(nsDoc.xmlRootElement, ns)
if prefix and prefix not in prefixedNamespaces:
prefixedNamespaces[prefix] = ns
return prefixedNamespaces
def matchContext(
self,
entityIdentScheme,
entityIdentValue,
periodType,
periodStart,
periodEndInstant,
dims,
segOCCs,
scenOCCs,
):
"""Finds matching context, by aspects, as in formula usage, if any
:param entityIdentScheme: Scheme to match
:type entityIdentScheme: str
:param entityIdentValue: Entity identifier value to match
:type entityIdentValue: str
:param periodType: Period type to match ("instant", "duration", or "forever")
:type periodType: str
:param periodStart: Date or dateTime of period start
:type periodStart: ModelValue.DateTime, datetime.date or datetime.datetime
:param periodEndInstant: Date or dateTime of period send
:type periodEndInstant: ModelValue.DateTime, datetime.date or datetime.datetime
:param dims: Dimensions
:type dims: ModelDimension or QName
:param segOCCs: Segment non-dimensional nodes
:type segOCCs: lxml element
:param scenOCCs: Scenario non-dimensional nodes
:type scenOCCs: lxml element
:returns: ModelContext -- Matching context or None
"""
from arelle.ModelFormulaObject import Aspect
from arelle.ModelValue import dateUnionEqual
from arelle.XbrlUtil import sEqual
if dims:
segAspect, scenAspect = (Aspect.NON_XDT_SEGMENT, Aspect.NON_XDT_SCENARIO)
else:
segAspect, scenAspect = (Aspect.COMPLETE_SEGMENT, Aspect.COMPLETE_SCENARIO)
for c in list(self.contexts.values()):
if (
c.entityIdentifier == (entityIdentScheme, entityIdentValue)
and (
(
c.isInstantPeriod
and periodType == "instant"
and dateUnionEqual(
c.instantDatetime, periodEndInstant, instantEndDate=True
)
)
or (
c.isStartEndPeriod
and periodType == "duration"
and dateUnionEqual(c.startDatetime, periodStart)
and dateUnionEqual(
c.endDatetime, periodEndInstant, instantEndDate=True
)
)
or (c.isForeverPeriod and periodType == "forever")
)
and
# dimensions match if dimensional model
(
dims is None
or (
(list(c.qnameDims.keys()) == list(dims.keys()))
and all(
[
cDim.isEqualTo(dims[cDimQn])
for cDimQn, cDim in list(c.qnameDims.items())
]
)
)
)
and
# OCCs match for either dimensional or non-dimensional modle
all(
all([sEqual(self, cOCCs[i], mOCCs[i]) for i in range(len(mOCCs))])
if len(cOCCs) == len(mOCCs)
else False
for cOCCs, mOCCs in (
(c.nonDimValues(segAspect), segOCCs),
(c.nonDimValues(scenAspect), scenOCCs),
)
)
):
return c
return None
def createContext(
self,
entityIdentScheme,
entityIdentValue,
periodType,
periodStart,
periodEndInstant,
priItem,
dims,
segOCCs,
scenOCCs,
afterSibling=None,
beforeSibling=None,
id=None,
):
"""Creates a new ModelContext and validates (integrates into modelDocument object model).
:param entityIdentScheme: Scheme to match
:type entityIdentScheme: str
:param entityIdentValue: Entity identifier value to match
:type entityIdentValue: str
:param periodType: Period type to match ("instant", "duration", or "forever")
:type periodType: str
:param periodStart: Date or dateTime of period start
:type periodStart: ModelValue.DateTime, datetime.date or datetime.datetime
:param periodEndInstant: Date or dateTime of period send
:type periodEndInstant: ModelValue.DateTime, datetime.date or datetime.datetime
:param dims: Dimensions
:type dims: ModelDimension or QName
:param segOCCs: Segment non-dimensional nodes
:type segOCCs: lxml element
:param scenOCCs: Scenario non-dimensional nodes
:type scenOCCs: lxml element
:param beforeSibling: lxml element in instance to insert new concept before
:type beforeSibling: ModelObject
:param afterSibling: lxml element in instance to insert new concept after
:type afterSibling: ModelObject
:param id: id to assign to new context, if absent an id will be generated
:type id: str
:returns: ModelContext -- New model context object
"""
xbrlElt = self.modelDocument.xmlRootElement
if afterSibling == AUTO_LOCATE_ELEMENT:
afterSibling = XmlUtil.lastChild(
xbrlElt,
XbrlConst.xbrli,
("schemaLocation", "roleType", "arcroleType", "context"),
)
cntxId = id if id else "c-{0:02n}".format(len(self.contexts) + 1)
newCntxElt = XmlUtil.addChild(
xbrlElt,
XbrlConst.xbrli,
"context",
attributes=("id", cntxId),
afterSibling=afterSibling,
beforeSibling=beforeSibling,
)
entityElt = XmlUtil.addChild(newCntxElt, XbrlConst.xbrli, "entity")
XmlUtil.addChild(
entityElt,
XbrlConst.xbrli,
"identifier",
attributes=("scheme", entityIdentScheme),
text=entityIdentValue,
)
periodElt = XmlUtil.addChild(newCntxElt, XbrlConst.xbrli, "period")
if periodType == "forever":
XmlUtil.addChild(periodElt, XbrlConst.xbrli, "forever")
elif periodType == "instant":
XmlUtil.addChild(
periodElt,
XbrlConst.xbrli,
"instant",
text=XmlUtil.dateunionValue(periodEndInstant, subtractOneDay=True),
)
elif periodType == "duration":
XmlUtil.addChild(
periodElt,
XbrlConst.xbrli,
"startDate",
text=XmlUtil.dateunionValue(periodStart),
)
XmlUtil.addChild(
periodElt,
XbrlConst.xbrli,
"endDate",
text=XmlUtil.dateunionValue(periodEndInstant, subtractOneDay=True),
)
segmentElt = None
scenarioElt = None
from arelle.ModelInstanceObject import ModelDimensionValue
if dims: # requires primary item to determin ambiguous concepts
"""in theory we have to check full set of dimensions for validity in source or any other
context element, but for shortcut will see if each dimension is already reported in an
unambiguous valid contextElement
"""
if priItem is not None: # creating concept for a specific fact
dims[
2
] = priItem # Aspect.CONCEPT: prototype needs primary item as an aspect
fp = FactPrototype(self, dims)
del dims[2] # Aspect.CONCEPT
# force trying a valid prototype's context Elements
if not isFactDimensionallyValid(
self, fp, setPrototypeContextElements=True
):
self.info(
"arelle:info",
_(
"Create context for %(priItem)s, cannot determine valid context elements, no suitable hypercubes"
),
modelObject=self,
priItem=priItem,
)
fpDims = fp.context.qnameDims
else:
fpDims = dims # dims known to be valid (such as for inline extraction)
for dimQname in sorted(fpDims.keys()):
dimValue = fpDims[dimQname]
if isinstance(dimValue, (DimValuePrototype, ModelDimensionValue)):
dimMemberQname = dimValue.memberQname # None if typed dimension
contextEltName = dimValue.contextElement
else: # qname for explicit or node for typed
dimMemberQname = None
contextEltName = None
if contextEltName == "segment":
if segmentElt is None:
segmentElt = XmlUtil.addChild(
entityElt, XbrlConst.xbrli, "segment"
)
contextElt = segmentElt
elif contextEltName == "scenario":
if scenarioElt is None:
scenarioElt = XmlUtil.addChild(
newCntxElt, XbrlConst.xbrli, "scenario"
)
contextElt = scenarioElt
else:
self.info(
"arelleLinfo",
_(
"Create context, %(dimension)s, cannot determine context element, either no all relationship or validation issue"
),
modelObject=self,
dimension=dimQname,
),
continue
dimAttr = ("dimension", XmlUtil.addQnameValue(xbrlElt, dimQname))
if dimValue.isTyped:
dimElt = XmlUtil.addChild(
contextElt,
XbrlConst.xbrldi,
"xbrldi:typedMember",
attributes=dimAttr,
)
if (
isinstance(dimValue, (ModelDimensionValue, DimValuePrototype))
and dimValue.isTyped
):
XmlUtil.copyNodes(dimElt, dimValue.typedMember)
elif dimMemberQname:
dimElt = XmlUtil.addChild(
contextElt,
XbrlConst.xbrldi,
"xbrldi:explicitMember",
attributes=dimAttr,
text=XmlUtil.addQnameValue(xbrlElt, dimMemberQname),
)
if segOCCs:
if segmentElt is None:
segmentElt = XmlUtil.addChild(entityElt, XbrlConst.xbrli, "segment")
XmlUtil.copyNodes(segmentElt, segOCCs)
if scenOCCs:
if scenarioElt is None:
scenarioElt = XmlUtil.addChild(newCntxElt, XbrlConst.xbrli, "scenario")
XmlUtil.copyNodes(scenarioElt, scenOCCs)
XmlValidate.validate(self, newCntxElt)
self.modelDocument.contextDiscover(newCntxElt)
return newCntxElt
def matchUnit(self, multiplyBy, divideBy):
"""Finds matching unit, by measures, as in formula usage, if any
:param multiplyBy: List of multiply-by measure QNames (or top level measures if no divideBy)
:type multiplyBy: [QName]
:param divideBy: List of multiply-by measure QNames (or empty list if no divideBy)
:type divideBy: [QName]
:returns: ModelUnit -- Matching unit object or None
"""
multiplyBy.sort()
divideBy.sort()
for u in list(self.units.values()):
if u.measures == (multiplyBy, divideBy):
return u
return None
def createUnit(
self, multiplyBy, divideBy, afterSibling=None, beforeSibling=None, id=None
):
"""Creates new unit, by measures, as in formula usage, if any
:param multiplyBy: List of multiply-by measure QNames (or top level measures if no divideBy)
:type multiplyBy: [QName]
:param divideBy: List of multiply-by measure QNames (or empty list if no divideBy)
:type divideBy: [QName]
:param beforeSibling: lxml element in instance to insert new concept before
:type beforeSibling: ModelObject
:param afterSibling: lxml element in instance to insert new concept after
:type afterSibling: ModelObject
:param id: id to assign to new unit, if absent an id will be generated
:type id: str
:returns: ModelUnit -- New unit object
"""
xbrlElt = self.modelDocument.xmlRootElement
if afterSibling == AUTO_LOCATE_ELEMENT:
afterSibling = XmlUtil.lastChild(
xbrlElt,
XbrlConst.xbrli,
("schemaLocation", "roleType", "arcroleType", "context", "unit"),
)
unitId = id if id else "u-{0:02n}".format(len(self.units) + 1)
newUnitElt = XmlUtil.addChild(
xbrlElt,
XbrlConst.xbrli,
"unit",
attributes=("id", unitId),
afterSibling=afterSibling,
beforeSibling=beforeSibling,
)
if len(divideBy) == 0:
for multiply in multiplyBy:
XmlUtil.addChild(
newUnitElt,
XbrlConst.xbrli,
"measure",
text=XmlUtil.addQnameValue(xbrlElt, multiply),
)
else:
divElt = XmlUtil.addChild(newUnitElt, XbrlConst.xbrli, "divide")
numElt = XmlUtil.addChild(divElt, XbrlConst.xbrli, "unitNumerator")
denElt = XmlUtil.addChild(divElt, XbrlConst.xbrli, "unitDenominator")
for multiply in multiplyBy:
XmlUtil.addChild(
numElt,
XbrlConst.xbrli,
"measure",
text=XmlUtil.addQnameValue(xbrlElt, multiply),
)
for divide in divideBy:
XmlUtil.addChild(
denElt,
XbrlConst.xbrli,
"measure",
text=XmlUtil.addQnameValue(xbrlElt, divide),
)
XmlValidate.validate(self, newUnitElt)
self.modelDocument.unitDiscover(newUnitElt)
return newUnitElt
@property
def nonNilFactsInInstance(self): # indexed by fact (concept) qname
"""Facts in the instance which are not nil, cached
:returns: set -- non-nil facts in instance
"""
try:
return self._nonNilFactsInInstance
except AttributeError:
self._nonNilFactsInInstance = set(
f for f in self.factsInInstance if not f.isNil
)
return self._nonNilFactsInInstance
@property
def factsByQname(self): # indexed by fact (concept) qname
"""Facts in the instance indexed by their QName, cached
:returns: dict -- indexes are QNames, values are ModelFacts
"""
try:
return self._factsByQname
except AttributeError:
self._factsByQname = fbqn = defaultdict(set)
for f in self.factsInInstance:
if f.qname is not None:
fbqn[f.qname].add(f)
return fbqn
def factsByDatatype(self, notStrict, typeQname): # indexed by fact (concept) qname
"""Facts in the instance indexed by data type QName, cached as types are requested
:param notSctrict: if True, fact may be derived
:type notStrict: bool
:returns: set -- ModelFacts that have specified type or (if nonStrict) derived from specified type
"""
try:
return self._factsByDatatype[notStrict, typeQname]
except AttributeError:
self._factsByDatatype = {}
return self.factsByDatatype(notStrict, typeQname)
except KeyError:
self._factsByDatatype[notStrict, typeQname] = fbdt = set()
for f in self.factsInInstance:
c = f.concept
if c.typeQname == typeQname or (
notStrict and c.type.isDerivedFrom(typeQname)
):
fbdt.add(f)
return fbdt
def factsByPeriodType(self, periodType): # indexed by fact (concept) qname
"""Facts in the instance indexed by periodType, cached
:param periodType: Period type to match ("instant", "duration", or "forever")
:type periodType: str
:returns: set -- ModelFacts that have specified periodType
"""
try:
return self._factsByPeriodType[periodType]
except AttributeError:
self._factsByPeriodType = fbpt = defaultdict(set)
for f in self.factsInInstance:
p = f.concept.periodType
if p:
fbpt[p].add(f)
return self.factsByPeriodType(periodType)
except KeyError:
return set() # no facts for this period type
def factsByDimMemQname(
self, dimQname, memQname=None
): # indexed by fact (concept) qname
"""Facts in the instance indexed by their Dimension and Member QName, cached
:returns: dict -- indexes are (Dimension, Member) and (Dimension) QNames, values are ModelFacts
If Member is None, returns facts that have the dimension (explicit or typed)
If Member is NONDEFAULT, returns facts that have the dimension (explicit non-default or typed)
If Member is DEFAULT, returns facts that have the dimension (explicit non-default or typed) defaulted
"""
try:
fbdq = self._factsByDimQname[dimQname]
return fbdq[memQname]
except AttributeError:
self._factsByDimQname = {}
return self.factsByDimMemQname(dimQname, memQname)
except KeyError:
self._factsByDimQname[dimQname] = fbdq = defaultdict(set)
for fact in self.factsInInstance:
if fact.isItem and fact.context is not None:
dimValue = fact.context.dimValue(dimQname)
if isinstance(
dimValue, ModelValue.QName
): # explicit dimension default value
fbdq[None].add(
fact
) # set of all facts that have default value for dimension
if dimQname in self.modelXbrl.qnameDimensionDefaults:
fbdq[self.qnameDimensionDefaults[dimQname]].add(
fact
) # set of facts that have this dim and mem
fbdq[DEFAULT].add(
fact
) # set of all facts that have default value for dimension
elif dimValue is not None: # not default
fbdq[None].add(
fact
) # set of all facts that have default value for dimension
fbdq[NONDEFAULT].add(
fact
) # set of all facts that have non-default value for dimension
if dimValue.isExplicit:
fbdq[dimValue.memberQname].add(
fact
) # set of facts that have this dim and mem
else: # default typed dimension
fbdq[DEFAULT].add(fact)
return fbdq[memQname]
def matchFact(self, otherFact, unmatchedFactsStack=None, deemP0inf=False):
"""Finds matching fact, by XBRL 2.1 duplicate definition (if tuple), or by
QName and VEquality (if an item), lang and accuracy equality, as in formula and test case usage
:param otherFact: Fact to match
:type otherFact: ModelFact
:deemP0inf: boolean for formula validation to deem P0 facts to be VEqual as if they were P=INF
:returns: ModelFact -- Matching fact or None
"""
for fact in self.facts:
if fact.isTuple:
if otherFact.isDuplicateOf(
fact, unmatchedFactsStack=unmatchedFactsStack
):
return fact
elif fact.qname == otherFact.qname and fact.isVEqualTo(
otherFact, deemP0inf=deemP0inf
):
if not fact.isNumeric:
if fact.xmlLang == otherFact.xmlLang:
return fact
else:
if (
fact.decimals == otherFact.decimals
and fact.precision == otherFact.precision
):
return fact
return None
def createFact(
self,
conceptQname,
attributes=None,
text=None,
parent=None,
afterSibling=None,
beforeSibling=None,
validate=True,
):
"""Creates new fact, as in formula output instance creation, and validates into object model
:param conceptQname: QNames of concept
:type conceptQname: QName
:param attributes: Tuple of name, value, or tuples of name, value tuples (name,value) or ((name,value)[,(name,value...)]), where name is either QName or clark-notation name string
:param text: Text content of fact (will be converted to xpath compatible str by FunctionXS.xsString)
:type text: object
:param parent: lxml element in instance to append as child of
:type parent: ModelObject
:param beforeSibling: lxml element in instance to insert new concept before
:type beforeSibling: ModelObject
:param afterSibling: lxml element in instance to insert new concept after
:type afterSibling: ModelObject
:param validate: specify False to block XML Validation (required when constructing a tuple which is invalid until after it's contents are created)
:type validate: boolean
:returns: ModelFact -- New fact object
"""
if parent is None:
parent = self.modelDocument.xmlRootElement
self.makeelementParentModelObject = parent
newFact = XmlUtil.addChild(
parent,
conceptQname,
attributes=attributes,
text=text,
afterSibling=afterSibling,
beforeSibling=beforeSibling,
)
global ModelFact
if ModelFact is None:
from arelle.ModelInstanceObject import ModelFact
if hasattr(self, "_factsByQname"):
self._factsByQname[newFact.qname].add(newFact)
if not isinstance(newFact, ModelFact):
return newFact # unable to create fact for this concept OR DTS not loaded for target instance (e.g., inline extraction, summary output)
del self.makeelementParentModelObject
if validate:
XmlValidate.validate(self, newFact)
self.modelDocument.factDiscover(newFact, parentElement=parent)
# update cached sets
if not newFact.isNil and hasattr(self, "_nonNilFactsInInstance"):
self._nonNilFactsInInstance.add(newFact)
if newFact.concept is not None:
if hasattr(self, "_factsByDatatype"):
del (
self._factsByDatatype
) # would need to iterate derived type ancestry to populate
if hasattr(self, "_factsByPeriodType"):
self._factsByPeriodType[newFact.concept.periodType].add(newFact)
if hasattr(self, "_factsByDimQname"):
del self._factsByDimQname
self.setIsModified()
return newFact
def setIsModified(self):
"""Records that the underlying document has been modified."""
self.modelDocument.isModified = True
def isModified(self):
"""Check if the underlying document has been modified."""
md = self.modelDocument
if md is not None:
return md.isModified
else:
return False
def modelObject(self, objectId):
"""Finds a model object by an ordinal ID which may be buried in a tkinter view id string (e.g., 'somedesignation_ordinalnumber').
:param objectId: string which includes _ordinalNumber, produced by ModelObject.objectId(), or integer object index
:type objectId: str or int
:returns: ModelObject
"""
if isinstance(objectId, _INT_TYPES): # may be long or short in 2.7
return self.modelObjects[objectId]
# assume it is a string with ID in a tokenized representation, like xyz_33
try:
return self.modelObjects[_INT(objectId.rpartition("_")[2])]
except (IndexError, ValueError):
return None
# UI thread viewModelObject
def viewModelObject(self, objectId):
"""Finds model object, if any, and synchronizes any views displaying it to bring the model object into scrollable view region and highlight it
:param objectId: string which includes _ordinalNumber, produced by ModelObject.objectId(), or integer object index
:type objectId: str or int
"""
modelObject = ""
try:
if isinstance(objectId, (ModelObject, FactPrototype)):
modelObject = objectId
elif isinstance(objectId, str) and objectId.startswith("_"):
modelObject = self.modelObject(objectId)
if modelObject is not None:
for view in self.views:
view.viewModelObject(modelObject)
except (IndexError, ValueError, AttributeError) as err:
self.modelManager.addToLog(
_("Exception viewing properties {0} {1} at {2}").format(
modelObject, err, traceback.format_tb(sys.exc_info()[2])
)
)
def effectiveMessageCode(self, messageCodes):
effectiveMessageCode = None
_validationType = self.modelManager.disclosureSystem.validationType
_exclusiveTypesPattern = (
self.modelManager.disclosureSystem.exclusiveTypesPattern
)
for argCode in (
messageCodes if isinstance(messageCodes, tuple) else (messageCodes,)
):
if (
isinstance(argCode, ModelValue.QName)
or (_validationType and argCode.startswith(_validationType))
or (
not _exclusiveTypesPattern
or _exclusiveTypesPattern.match(argCode) == None
)
):
effectiveMessageCode = argCode
break
return effectiveMessageCode
# isLoggingEffectiveFor( messageCodes= messageCode= level= )
def isLoggingEffectiveFor(self, **kwargs): # args can be messageCode(s) and level
logger = self.logger
if "messageCodes" in kwargs or "messageCode" in kwargs:
if "messageCodes" in kwargs:
messageCodes = kwargs["messageCodes"]
else:
messageCodes = kwargs["messageCode"]
messageCode = self.effectiveMessageCode(messageCodes)
codeEffective = messageCode and (
not logger.messageCodeFilter
or logger.messageCodeFilter.match(messageCode)
)
else:
codeEffective = True
if "level" in kwargs and logger.messageLevelFilter:
levelEffective = logger.messageLevelFilter.match(kwargs["level"].lower())
else:
levelEffective = True
return codeEffective and levelEffective
def logArguments(self, codes, msg, codedArgs):
"""Prepares arguments for logger function as per info() below.
If codes includes EFM, GFM, HMRC, or SBR-coded error then the code chosen (if a sequence)
corresponds to whether EFM, GFM, HMRC, or SBR validation is in effect.
"""
def propValues(properties):
# deref objects in properties
return [
(p[0], str(p[1]))
if len(p) == 2
else (p[0], str(p[1]), propValues(p[2]))
for p in properties
if 2 <= len(p) <= 3
]
# determine logCode
messageCode = self.effectiveMessageCode(codes)
# determine message and extra arguments
fmtArgs = {}
extras = {"messageCode": messageCode}
modelObjectArgs = ()
for argName, argValue in list(codedArgs.items()):
if argName in ("modelObject", "modelXbrl", "modelDocument"):
try:
entryUrl = self.modelDocument.uri
except AttributeError:
try:
entryUrl = self.entryLoadingUrl
except AttributeError:
entryUrl = self.fileSource.url
refs = []
modelObjectArgs = (
argValue
if isinstance(argValue, (tuple, list, set))
else (argValue,)
)
for arg in flattenSequence(modelObjectArgs):
if arg is not None:
if isinstance(arg, _STR_BASE):
objectUrl = arg
else:
try:
objectUrl = arg.modelDocument.uri
except AttributeError:
try:
objectUrl = self.modelDocument.uri
except AttributeError:
objectUrl = self.entryLoadingUrl
try:
file = UrlUtil.relativeUri(entryUrl, objectUrl)
except:
file = ""
ref = {}
if isinstance(arg, (ModelObject, ObjectPropertyViewWrapper)):
_arg = (
arg.modelObject
if isinstance(arg, ObjectPropertyViewWrapper)
else arg
)
ref["href"] = (
file + "#" + XmlUtil.elementFragmentIdentifier(_arg)
)
ref["sourceLine"] = _arg.sourceline
ref["objectId"] = _arg.objectId()
if self.logRefObjectProperties:
try:
ref["properties"] = propValues(arg.propertyView)
except AttributeError:
pass # is a default properties entry appropriate or needed?
if self.logRefHasPluginProperties:
refProperties = ref.get("properties", {})
for pluginXbrlMethod in pluginClassMethods(
"Logging.Ref.Properties"
):
pluginXbrlMethod(arg, refProperties, codedArgs)
if refProperties:
ref["properties"] = refProperties
else:
ref["href"] = file
try:
ref["sourceLine"] = arg.sourceline
except AttributeError:
pass # arg may not have sourceline, ignore if so
if self.logRefHasPluginAttrs:
refAttributes = {}
for pluginXbrlMethod in pluginClassMethods(
"Logging.Ref.Attributes"
):
pluginXbrlMethod(arg, refAttributes, codedArgs)
if refAttributes:
ref["customAttributes"] = refAttributes
refs.append(ref)
extras["refs"] = refs
elif argName == "sourceFileLine":
# sourceFileLines is pairs of file and line numbers, e.g., ((file,line),(file2,line2),...)
ref = {}
if isinstance(argValue, (tuple, list)):
ref["href"] = str(argValue[0])
if len(argValue) > 1 and argValue[1]:
ref["sourceLine"] = str(argValue[1])
else:
ref["href"] = str(argValue)
extras["refs"] = [ref]
elif argName == "sourceFileLines":
# sourceFileLines is tuple/list of pairs of file and line numbers, e.g., ((file,line),(file2,line2),...)
refs = []
for arg in (
argValue if isinstance(argValue, (tuple, list)) else (argValue,)
):
ref = {}
if isinstance(arg, (tuple, list)):
ref["href"] = str(arg[0])
if len(arg) > 1 and arg[1]:
ref["sourceLine"] = str(arg[1])
else:
ref["href"] = str(arg)
refs.append(ref)
extras["refs"] = refs
elif argName == "sourceLine":
if isinstance(
argValue, _INT_TYPES
): # must be sortable with int's in logger
extras["sourceLine"] = argValue
elif argName not in ("exc_info", "messageCodes"):
if isinstance(
argValue,
(
ModelValue.QName,
ModelObject,
bool,
FileNamedStringIO,
# might be a set of lxml objects not dereferencable at shutdown
tuple,
list,
set,
),
):
fmtArgs[argName] = str(argValue)
elif argValue is None:
fmtArgs[argName] = "(none)"
elif isinstance(argValue, _INT_TYPES):
# need locale-dependent formatting
fmtArgs[argName] = format_string(
self.modelManager.locale, "%i", argValue
)
elif isinstance(argValue, (float, Decimal)):
# need locale-dependent formatting
fmtArgs[argName] = format_string(
self.modelManager.locale, "%f", argValue
)
elif isinstance(argValue, dict):
fmtArgs[argName] = argValue
else:
fmtArgs[argName] = str(argValue)
if "refs" not in extras:
try:
file = os.path.basename(self.modelDocument.uri)
except AttributeError:
try:
file = os.path.basename(self.entryLoadingUrl)
except:
file = ""
extras["refs"] = [{"href": file}]
for pluginXbrlMethod in pluginClassMethods("Logging.Message.Parameters"):
# plug in can rewrite msg string or return msg if not altering msg
msg = pluginXbrlMethod(messageCode, msg, modelObjectArgs, fmtArgs) or msg
return (messageCode, (msg, fmtArgs) if fmtArgs else (msg,), extras)
def debug(self, codes, msg, **args):
"""Same as error(), but as info"""
"""@messageCatalog=[]"""
self.log("DEBUG", codes, msg, **args)
def info(self, codes, msg, **args):
"""Same as error(), but as info"""
"""@messageCatalog=[]"""
self.log("INFO", codes, msg, **args)
def warning(self, codes, msg, **args):
"""Same as error(), but as warning, and no error code saved for Validate"""
"""@messageCatalog=[]"""
self.log("WARNING", codes, msg, **args)
def log(self, level, codes, msg, **args):
"""Same as error(), but level passed in as argument"""
logger = self.logger
messageCode, logArgs, extras = self.logArguments(codes, msg, args)
if messageCode == "asrtNoLog":
self.errors.append(args["assertionResults"])
elif (
messageCode
and (
not logger.messageCodeFilter
or logger.messageCodeFilter.match(messageCode)
)
and (
not logger.messageLevelFilter
or logger.messageLevelFilter.match(level.lower())
)
):
numericLevel = logging._checkLevel(level)
self.logCount[numericLevel] = self.logCount.get(numericLevel, 0) + 1
if numericLevel >= self.errorCaptureLevel:
self.errors.append(messageCode)
"""@messageCatalog=[]"""
logger.log(
numericLevel, *logArgs, exc_info=args.get("exc_info"), extra=extras
)
def error(self, codes, msg, **args):
"""Logs a message as info, by code, logging-system message text (using %(name)s named arguments
to compose string by locale language), resolving model object references (such as qname),
to prevent non-dereferencable memory usage. Supports logging system parameters, and
special parameters modelObject, modelXbrl, or modelDocument, to provide trace
information to the file, source line, and href (XPath element scheme pointer).
Supports the logging exc_info argument.
Args may include a specification of one or more ModelObjects that identify the source of the
message, as modelObject={single-modelObject, (sequence-of-modelObjects)} or modelXbrl=modelXbrl or
modelDocument=modelDocument.
Args must include a named argument for each msg %(namedArg)s replacement.
:param codes: Message code or tuple/list of message codes
:type codes: str or [str]
:param msg: Message text string to be formatted and replaced with named parameters in **args
:param **args: Named arguments including modelObject, modelXbrl, or modelDocument, named arguments in msg string, and any exc_info argument.
:param messageCodes: If first parameter codes, above, is dynamically formatted, this is a documentation string of the message codes only used for extraction of the message catalog document (not used in run-time processing).
"""
"""@messageCatalog=[]"""
self.log("ERROR", codes, msg, **args)
def exception(self, codes, msg, **args):
"""Same as error(), but as exception"""
"""@messageCatalog=[]"""
self.log("CRITICAL", codes, msg, **args)
def logProfileStats(self):
"""Logs profile stats that were collected"""
timeTotal = format_string(
self.modelManager.locale,
_("%.3f secs"),
self.profileStats.get("total", (0, 0, 0))[1],
)
timeEFM = format_string(
self.modelManager.locale,
_("%.3f secs"),
self.profileStats.get("validateEFM", (0, 0, 0))[1],
)
self.info(
"info:profileStats",
_("Profile statistics \n")
+ " \n".join(
format_string(
self.modelManager.locale,
_("%s %.3f secs, %.0fK"),
(statName, statValue[1], statValue[2]),
grouping=True,
)
for statName, statValue in sorted(
list(self.profileStats.items()), key=lambda item: item[1]
)
)
+ " \n", # put instance reference on fresh line in traces
modelObject=self.modelXbrl.modelDocument,
profileStats=self.profileStats,
timeTotal=timeTotal,
timeEFM=timeEFM,
)
def profileStat(self, name=None, stat=None):
"""
order 1xx - load, import, setup, etc
order 2xx - views, 26x - table lb
3xx diff, other utilities
5xx validation
6xx formula
"""
if self.modelManager.collectProfileStats:
import time
global profileStatNumber
try:
if name:
thisTime = (
stat
if stat is not None
else time.time() - self._startedTimeStat
)
mem = self.modelXbrl.modelManager.cntlr.memoryUsed
prevTime = self.profileStats.get(name, (0, 0, 0))[1]
self.profileStats[name] = (
profileStatNumber,
thisTime + prevTime,
mem,
)
profileStatNumber += 1
except AttributeError:
pass
if stat is None:
self._startedTimeStat = time.time()
def profileActivity(self, activityCompleted=None, minTimeToShow=0):
"""Used to provide interactive GUI messages of long-running processes.
When the time between last profileActivity and this profileActivity exceeds minTimeToShow, then
the time is logged (if it is shorter than it is not logged), thus providing feedback of long
running (and possibly troublesome) processing steps.
:param activityCompleted: Description of activity completed, or None if call is just to demark starting of a profiled activity.
:type activityCompleted: str
:param minTimeToShow: Seconds of elapsed time for activity, if longer then the profile message appears in the log.
:type minTimeToShow: seconds
"""
import time
try:
if activityCompleted:
timeTaken = time.time() - self._startedProfiledActivity
if timeTaken > minTimeToShow:
self.info(
"info:profileActivity",
_("%(activity)s %(time)s secs\n"),
modelObject=self.modelXbrl.modelDocument,
activity=activityCompleted,
time=format_string(
self.modelManager.locale, "%.3f", timeTaken, grouping=True
),
)
except AttributeError:
pass
self._startedProfiledActivity = time.time()
def saveDTSpackage(self):
"""Contributed program to save DTS package as a zip file. Refactored into a plug-in (and may be removed from main code)."""
if self.fileSource.isArchive:
return
from zipfile import ZipFile
import os
entryFilename = self.fileSource.url
pkgFilename = entryFilename + ".zip"
with ZipFile(pkgFilename, "w") as zip:
numFiles = 0
for fileUri in sorted(self.urlDocs.keys()):
if not isHttpUrl(fileUri):
numFiles += 1
# this has to be a relative path because the hrefs will break
zip.write(fileUri, os.path.basename(fileUri))
self.info(
"info",
_(
"DTS of %(entryFile)s has %(numberOfFiles)s files packaged into %(packageOutputFile)s"
),
modelObject=self,
entryFile=os.path.basename(entryFilename),
packageOutputFile=pkgFilename,
numberOfFiles=numFiles,
)
<|endoftext|> |
<|endoftext|>"""
Created on Oct 17, 2010
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
"""
from collections import defaultdict
from math import log10, isnan, isinf, fabs, trunc, fmod, floor, pow
import decimal
try:
from regex import compile as re_compile
except ImportError:
from re import compile as re_compile
import hashlib
from arelle import Locale, XbrlConst, XbrlUtil
from arelle.ModelObject import ObjectPropertyViewWrapper
from arelle.XmlValidate import UNVALIDATED, VALID
numberPattern = re_compile(
"[-+]?[0]*([1-9]?[0-9]*)([.])?(0*)([1-9]?[0-9]*)?([eE])?([-+]?[0-9]*)?"
)
ZERO = decimal.Decimal(0)
ONE = decimal.Decimal(1)
NaN = decimal.Decimal("NaN")
floatNaN = float("NaN")
floatINF = float("INF")
def validate(modelXbrl, inferDecimals=False):
ValidateXbrlCalcs(modelXbrl, inferDecimals).validate()
class ValidateXbrlCalcs:
def __init__(self, modelXbrl, inferDecimals=False):
self.modelXbrl = modelXbrl
self.inferDecimals = inferDecimals
self.mapContext = {}
self.mapUnit = {}
self.sumFacts = defaultdict(list)
self.sumConceptBindKeys = defaultdict(set)
self.itemFacts = defaultdict(list)
self.itemConceptBindKeys = defaultdict(set)
self.duplicateKeyFacts = {}
self.duplicatedFacts = set()
self.esAlFacts = defaultdict(list)
self.esAlConceptBindKeys = defaultdict(set)
self.conceptsInEssencesAlias = set()
self.requiresElementFacts = defaultdict(list)
self.conceptsInRequiresElement = set()
def validate(self):
if not self.modelXbrl.contexts and not self.modelXbrl.facts:
return # skip if no contexts or facts
if (
not self.inferDecimals
): # infering precision is now contrary to XBRL REC section 5.2.5.2
self.modelXbrl.info(
"xbrl.5.2.5.2:inferringPrecision",
"Validating calculations inferring precision.",
)
# identify equal contexts
self.modelXbrl.profileActivity()
uniqueContextHashes = {}
for context in list(self.modelXbrl.contexts.values()):
h = context.contextDimAwareHash
if h in uniqueContextHashes:
if context.isEqualTo(uniqueContextHashes[h]):
self.mapContext[context] = uniqueContextHashes[h]
else:
uniqueContextHashes[h] = context
del uniqueContextHashes
self.modelXbrl.profileActivity("... identify equal contexts", minTimeToShow=1.0)
# identify equal contexts
uniqueUnitHashes = {}
for unit in list(self.modelXbrl.units.values()):
h = unit.hash
if h in uniqueUnitHashes:
if unit.isEqualTo(uniqueUnitHashes[h]):
self.mapUnit[unit] = uniqueUnitHashes[h]
else:
uniqueUnitHashes[h] = unit
self.modelXbrl.profileActivity("... identify equal units", minTimeToShow=1.0)
# identify concepts participating in essence-alias relationships
# identify calcluation & essence-alias base sets (by key)
for baseSetKey in list(self.modelXbrl.baseSets.keys()):
arcrole, ELR, linkqname, arcqname = baseSetKey
if ELR and linkqname and arcqname:
if arcrole in (XbrlConst.essenceAlias, XbrlConst.requiresElement):
conceptsSet = {
XbrlConst.essenceAlias: self.conceptsInEssencesAlias,
XbrlConst.requiresElement: self.conceptsInRequiresElement,
}[arcrole]
for modelRel in self.modelXbrl.relationshipSet(
arcrole, ELR, linkqname, arcqname
).modelRelationships:
for concept in (
modelRel.fromModelObject,
modelRel.toModelObject,
):
if concept is not None and concept.qname is not None:
conceptsSet.add(concept)
self.modelXbrl.profileActivity(
"... identify requires-element and esseance-aliased concepts",
minTimeToShow=1.0,
)
self.bindFacts(
self.modelXbrl.facts, [self.modelXbrl.modelDocument.xmlRootElement]
)
self.modelXbrl.profileActivity("... bind facts", minTimeToShow=1.0)
# identify calcluation & essence-alias base sets (by key)
for baseSetKey in list(self.modelXbrl.baseSets.keys()):
arcrole, ELR, linkqname, arcqname = baseSetKey
if ELR and linkqname and arcqname:
if arcrole in (
XbrlConst.summationItem,
XbrlConst.essenceAlias,
XbrlConst.requiresElement,
):
relsSet = self.modelXbrl.relationshipSet(
arcrole, ELR, linkqname, arcqname
)
if arcrole == XbrlConst.summationItem:
fromRelationships = relsSet.fromModelObjects()
for sumConcept, modelRels in list(fromRelationships.items()):
sumBindingKeys = self.sumConceptBindKeys[sumConcept]
dupBindingKeys = set()
boundSumKeys = set()
# determine boundSums
for modelRel in modelRels:
itemConcept = modelRel.toModelObject
if (
itemConcept is not None
and itemConcept.qname is not None
):
itemBindingKeys = self.itemConceptBindKeys[
itemConcept
]
boundSumKeys |= sumBindingKeys & itemBindingKeys
# add up rounded items
boundSums = defaultdict(
decimal.Decimal
) # sum of facts meeting factKey
boundSummationItems = defaultdict(
list
) # corresponding fact refs for messages
for modelRel in modelRels:
weight = modelRel.weightDecimal
itemConcept = modelRel.toModelObject
if itemConcept is not None:
for itemBindKey in boundSumKeys:
ancestor, contextHash, unit = itemBindKey
factKey = (
itemConcept,
ancestor,
contextHash,
unit,
)
if factKey in self.itemFacts:
for fact in self.itemFacts[factKey]:
if fact in self.duplicatedFacts:
dupBindingKeys.add(itemBindKey)
else:
roundedValue = roundFact(
fact, self.inferDecimals
)
boundSums[itemBindKey] += (
roundedValue * weight
)
boundSummationItems[
itemBindKey
].append(
wrappedFactWithWeight(
fact, weight, roundedValue
)
)
for sumBindKey in boundSumKeys:
ancestor, contextHash, unit = sumBindKey
factKey = (sumConcept, ancestor, contextHash, unit)
if factKey in self.sumFacts:
sumFacts = self.sumFacts[factKey]
for fact in sumFacts:
if fact in self.duplicatedFacts:
dupBindingKeys.add(sumBindKey)
elif sumBindKey not in dupBindingKeys:
roundedSum = roundFact(
fact, self.inferDecimals
)
roundedItemsSum = roundFact(
fact,
self.inferDecimals,
vDecimal=boundSums[sumBindKey],
)
if roundedItemsSum != roundFact(
fact, self.inferDecimals
):
d = inferredDecimals(fact)
if isnan(d) or isinf(d):
d = 4
_boundSummationItems = (
boundSummationItems[sumBindKey]
)
unreportedContribingItemQnames = (
[]
) # list the missing/unreported contributors in relationship order
for modelRel in modelRels:
itemConcept = modelRel.toModelObject
if (
itemConcept is not None
and (
itemConcept,
ancestor,
contextHash,
unit,
)
not in self.itemFacts
):
unreportedContribingItemQnames.append(
str(itemConcept.qname)
)
self.modelXbrl.log(
"INCONSISTENCY",
"xbrl.5.2.5.2:calcInconsistency",
_(
"Calculation inconsistent from %(concept)s in link role %(linkrole)s reported sum %(reportedSum)s computed sum %(computedSum)s context %(contextID)s unit %(unitID)s unreportedContributingItems %(unreportedContributors)s"
),
modelObject=wrappedSummationAndItems(
fact,
roundedSum,
_boundSummationItems,
),
concept=sumConcept.qname,
linkrole=ELR,
linkroleDefinition=self.modelXbrl.roleTypeDefinition(
ELR
),
reportedSum=Locale.format_decimal(
self.modelXbrl.locale,
roundedSum,
1,
max(d, 0),
),
computedSum=Locale.format_decimal(
self.modelXbrl.locale,
roundedItemsSum,
1,
max(d, 0),
),
contextID=fact.context.id,
unitID=fact.unit.id,
unreportedContributors=", ".join(
unreportedContribingItemQnames
)
or "none",
)
del unreportedContribingItemQnames[:]
boundSummationItems.clear() # dereference facts in list
elif arcrole == XbrlConst.essenceAlias:
for modelRel in relsSet.modelRelationships:
essenceConcept = modelRel.fromModelObject
aliasConcept = modelRel.toModelObject
essenceBindingKeys = self.esAlConceptBindKeys[
essenceConcept
]
aliasBindingKeys = self.esAlConceptBindKeys[aliasConcept]
for esAlBindKey in essenceBindingKeys & aliasBindingKeys:
ancestor, contextHash = esAlBindKey
essenceFactsKey = (
essenceConcept,
ancestor,
contextHash,
)
aliasFactsKey = (aliasConcept, ancestor, contextHash)
if (
essenceFactsKey in self.esAlFacts
and aliasFactsKey in self.esAlFacts
):
for eF in self.esAlFacts[essenceFactsKey]:
for aF in self.esAlFacts[aliasFactsKey]:
essenceUnit = self.mapUnit.get(
eF.unit, eF.unit
)
aliasUnit = self.mapUnit.get(
aF.unit, aF.unit
)
if essenceUnit != aliasUnit:
self.modelXbrl.log(
"INCONSISTENCY",
"xbrl.5.2.6.2.2:essenceAliasUnitsInconsistency",
_(
"Essence-Alias inconsistent units from %(essenceConcept)s to %(aliasConcept)s in link role %(linkrole)s context %(contextID)s"
),
modelObject=(modelRel, eF, aF),
essenceConcept=essenceConcept.qname,
aliasConcept=aliasConcept.qname,
linkrole=ELR,
linkroleDefinition=self.modelXbrl.roleTypeDefinition(
ELR
),
contextID=eF.context.id,
)
if not XbrlUtil.vEqual(eF, aF):
self.modelXbrl.log(
"INCONSISTENCY",
"xbrl.5.2.6.2.2:essenceAliasUnitsInconsistency",
_(
"Essence-Alias inconsistent value from %(essenceConcept)s to %(aliasConcept)s in link role %(linkrole)s context %(contextID)s"
),
modelObject=(modelRel, eF, aF),
essenceConcept=essenceConcept.qname,
aliasConcept=aliasConcept.qname,
linkrole=ELR,
linkroleDefinition=self.modelXbrl.roleTypeDefinition(
ELR
),
contextID=eF.context.id,
)
elif arcrole == XbrlConst.requiresElement:
for modelRel in relsSet.modelRelationships:
sourceConcept = modelRel.fromModelObject
requiredConcept = modelRel.toModelObject
if (
sourceConcept in self.requiresElementFacts
and not requiredConcept in self.requiresElementFacts
):
self.modelXbrl.log(
"INCONSISTENCY",
"xbrl.5.2.6.2.4:requiresElementInconsistency",
_(
"Requires-Element %(requiringConcept)s missing required fact for %(requiredConcept)s in link role %(linkrole)s"
),
modelObject=sourceConcept,
requiringConcept=sourceConcept.qname,
requiredConcept=requiredConcept.qname,
linkrole=ELR,
linkroleDefinition=self.modelXbrl.roleTypeDefinition(
ELR
),
)
self.modelXbrl.profileActivity("... find inconsistencies", minTimeToShow=1.0)
self.modelXbrl.profileActivity() # reset
def bindFacts(self, facts, ancestors):
for f in facts:
concept = f.concept
if concept is not None:
# index facts by their calc relationship set
if concept.isNumeric:
for ancestor in ancestors:
# tbd: uniqify context and unit
context = self.mapContext.get(f.context, f.context)
# must use nonDimAwareHash to achieve s-equal comparison of contexts
contextHash = (
context.contextNonDimAwareHash
if context is not None
else hash(None)
)
unit = self.mapUnit.get(f.unit, f.unit)
calcKey = (concept, ancestor, contextHash, unit)
if not f.isNil:
self.itemFacts[calcKey].append(f)
bindKey = (ancestor, contextHash, unit)
self.itemConceptBindKeys[concept].add(bindKey)
if not f.isNil:
self.sumFacts[calcKey].append(
f
) # sum only for immediate parent
self.sumConceptBindKeys[concept].add(bindKey)
# calcKey is the last ancestor added (immediate parent of fact)
if calcKey in self.duplicateKeyFacts:
self.duplicatedFacts.add(f)
self.duplicatedFacts.add(self.duplicateKeyFacts[calcKey])
else:
self.duplicateKeyFacts[calcKey] = f
elif concept.isTuple:
self.bindFacts(f.modelTupleFacts, ancestors + [f])
# index facts by their essence alias relationship set
if concept in self.conceptsInEssencesAlias and not f.isNil:
ancestor = ancestors[-1] # only care about direct parent
context = self.mapContext.get(f.context, f.context)
contextHash = (
context.contextNonDimAwareHash
if context is not None
else hash(None)
)
esAlKey = (concept, ancestor, contextHash)
self.esAlFacts[esAlKey].append(f)
bindKey = (ancestor, contextHash)
self.esAlConceptBindKeys[concept].add(bindKey)
# index facts by their requires element usage
if concept in self.conceptsInRequiresElement:
self.requiresElementFacts[concept].append(f)
def roundFact(fact, inferDecimals=False, vDecimal=None):
if vDecimal is None:
vStr = fact.value
try:
vDecimal = decimal.Decimal(vStr)
vFloatFact = float(vStr)
except (
decimal.InvalidOperation,
ValueError,
): # would have been a schema error reported earlier
vDecimal = NaN
vFloatFact = floatNaN
else: # only vFloat is defined, may not need vStr unless inferring precision from decimals
if vDecimal.is_nan():
return vDecimal
vStr = None
try:
vFloatFact = float(fact.value)
except ValueError:
vFloatFact = floatNaN
dStr = fact.decimals
pStr = fact.precision
if dStr == "INF" or pStr == "INF":
vRounded = vDecimal
elif inferDecimals: # infer decimals, round per 4.6.7.2, e.g., half-down
if pStr:
p = int(pStr)
if p == 0:
vRounded = NaN
elif vDecimal == 0:
vRounded = ZERO
else:
vAbs = fabs(vFloatFact)
d = p - int(floor(log10(vAbs))) - 1
# defeat binary rounding to nearest even
# if trunc(fmod(vFloat * (10 ** d),2)) != 0:
# vFloat += 10 ** (-d - 1) * (1.0 if vFloat > 0 else -1.0)
# vRounded = round(vFloat, d)
vRounded = decimalRound(vDecimal, d, decimal.ROUND_HALF_EVEN)
elif dStr:
d = int(dStr)
# defeat binary rounding to nearest even
# if trunc(fmod(vFloat * (10 ** d),2)) != 0:
# vFloat += 10 ** (-d - 1) * (-1.0 if vFloat > 0 else 1.0)
# vRounded = round(vFloat, d)
# vRounded = round(vFloat,d)
vRounded = decimalRound(vDecimal, d, decimal.ROUND_HALF_EVEN)
else: # no information available to do rounding (other errors xbrl.4.6.3 error)
vRounded = vDecimal
else: # infer precision
if dStr:
match = numberPattern.match(vStr if vStr else str(vDecimal))
if match:
nonZeroInt, period, zeroDec, nonZeroDec, e, exp = match.groups()
p = (
(
len(nonZeroInt)
if nonZeroInt and (len(nonZeroInt)) > 0
else -len(zeroDec)
)
+ (int(exp) if exp and (len(exp) > 0) else 0)
+ (int(dStr))
)
else:
p = 0
elif pStr:
p = int(pStr)
else: # no rounding information
p = None
if p == 0:
vRounded = NaN
elif vDecimal == 0:
vRounded = vDecimal
elif p is not None: # round per 4.6.7.1, half-up
vAbs = vDecimal.copy_abs()
log = vAbs.log10()
# defeat rounding to nearest even
d = p - int(log) - (1 if vAbs >= 1 else 0)
# if trunc(fmod(vFloat * (10 ** d),2)) != 0:
# vFloat += 10 ** (-d - 1) * (1.0 if vFloat > 0 else -1.0)
# vRounded = round(vFloat, d)
vRounded = decimalRound(vDecimal, d, decimal.ROUND_HALF_UP)
else: # no information available to do rounding (other errors xbrl.4.6.3 error)
vRounded = vDecimal
return vRounded
def decimalRound(x, d, rounding):
if (
x.is_normal() and -28 <= d <= 28
): # prevent exception with excessive quantization digits
if d >= 0:
return x.quantize(ONE.scaleb(-d), rounding)
else: # quantize only seems to work on fractional part, convert integer to fraction at scaled point
return x.scaleb(d).quantize(ONE, rounding).scaleb(-d)
return x # infinite, NaN, zero, or excessive decimal digits ( > 28 )
def inferredPrecision(fact):
vStr = fact.value
dStr = fact.decimals
pStr = fact.precision
if dStr == "INF" or pStr == "INF":
return floatINF
try:
vFloat = float(vStr)
if dStr:
match = numberPattern.match(vStr if vStr else str(vFloat))
if match:
nonZeroInt, period, zeroDec, nonZeroDec, e, exp = match.groups()
p = (
(
len(nonZeroInt)
if nonZeroInt
else (-len(zeroDec) if nonZeroDec else 0)
)
+ (int(exp) if exp else 0)
+ (int(dStr))
)
if p < 0:
p = 0 # "pathological case" 2.1 spec example 13 line 7
else:
p = 0
else:
return int(pStr)
except ValueError:
return floatNaN
if p == 0:
return 0
elif vFloat == 0:
return 0
else:
return p
def inferredDecimals(fact):
vStr = fact.value
dStr = fact.decimals
pStr = fact.precision
if dStr == "INF" or pStr == "INF":
return floatINF
try:
if pStr:
p = int(pStr)
if p == 0:
return floatNaN # =0 cannot be determined
vFloat = float(vStr)
if vFloat == 0:
return floatINF # =0 cannot be determined
else:
vAbs = fabs(vFloat)
return p - int(floor(log10(vAbs))) - 1
elif dStr:
return int(dStr)
except ValueError:
pass
return floatNaN
def roundValue(value, precision=None, decimals=None, scale=None):
try:
vDecimal = decimal.Decimal(value)
if scale:
iScale = int(scale)
vDecimal = vDecimal.scaleb(iScale)
if precision is not None:
vFloat = float(value)
if scale:
vFloat = pow(vFloat, iScale)
except (
decimal.InvalidOperation,
ValueError,
): # would have been a schema error reported earlier
return NaN
if precision is not None:
if not isinstance(precision, (int, float)):
if precision == "INF":
precision = floatINF
else:
try:
precision = int(precision)
except ValueError: # would be a schema error
precision = floatNaN
if isinf(precision):
vRounded = vDecimal
elif precision == 0 or isnan(precision):
vRounded = NaN
elif vFloat == 0:
vRounded = ZERO
else:
vAbs = fabs(vFloat)
log = log10(vAbs)
d = precision - int(log) - (1 if vAbs >= 1 else 0)
vRounded = decimalRound(vDecimal, d, decimal.ROUND_HALF_UP)
elif decimals is not None:
if not isinstance(decimals, (int, float)):
if decimals == "INF":
decimals = floatINF
else:
try:
decimals = int(decimals)
except ValueError: # would be a schema error
decimals = floatNaN
if isinf(decimals):
vRounded = vDecimal
elif isnan(decimals):
vRounded = NaN
else:
vRounded = decimalRound(vDecimal, decimals, decimal.ROUND_HALF_EVEN)
else:
vRounded = vDecimal
return vRounded
def insignificantDigits(value, precision=None, decimals=None, scale=None):
try:
vDecimal = decimal.Decimal(value)
if scale:
iScale = int(scale)
vDecimal = vDecimal.scaleb(iScale)
if precision is not None:
vFloat = float(value)
if scale:
vFloat = pow(vFloat, iScale)
except (
decimal.InvalidOperation,
ValueError,
): # would have been a schema error reported earlier
return None
if precision is not None:
if not isinstance(precision, (int, float)):
if precision == "INF":
return None
else:
try:
precision = int(precision)
except ValueError: # would be a schema error
return None
if isinf(precision) or precision == 0 or isnan(precision) or vFloat == 0:
return None
else:
vAbs = fabs(vFloat)
log = log10(vAbs)
decimals = precision - int(log) - (1 if vAbs >= 1 else 0)
elif decimals is not None:
if not isinstance(decimals, (int, float)):
if decimals == "INF":
return None
else:
try:
decimals = int(decimals)
except ValueError: # would be a schema error
return None
if isinf(decimals) or isnan(decimals):
return None
else:
return None
if (
vDecimal.is_normal() and -28 <= decimals <= 28
): # prevent exception with excessive quantization digits
if decimals > 0:
divisor = ONE.scaleb(
-decimals
) # fractional scaling doesn't produce scientific notation
else: # extra quantize step to prevent scientific notation for decimal number
divisor = ONE.scaleb(-decimals).quantize(
ONE, decimal.ROUND_HALF_UP
) # should never round
insignificantDigits = abs(vDecimal) % divisor
if insignificantDigits:
return (
vDecimal // divisor * divisor, # truncated portion of number
insignificantDigits,
) # nsignificant digits portion of number
return None
def wrappedFactWithWeight(fact, weight, roundedValue):
return ObjectPropertyViewWrapper(
fact, (("weight", weight), ("roundedValue", roundedValue))
)
def wrappedSummationAndItems(fact, roundedSum, boundSummationItems):
# need hash of facts and their values from boundSummationItems
"""ARELLE-281, replace: faster python-based hash (replace with hashlib for fewer collisions)
itemValuesHash = hash( tuple(( hash(b.modelObject.qname), hash(b.extraProperties[1][1]) )
# sort by qname so we don't care about reordering of summation terms
for b in sorted(boundSummationItems,
key=lambda b: b.modelObject.qname)) )
sumValueHash = hash( (hash(fact.qname), hash(roundedSum)) )
"""
sha256 = hashlib.sha256()
# items hash: sort by qname so we don't care about reordering of summation terms in linkbase updates
for b in sorted(boundSummationItems, key=lambda b: b.modelObject.qname):
sha256.update(
b.modelObject.qname.namespaceURI.encode("utf-8", "replace")
) # qname of erroneous submission may not be utf-8 perfectly encodable
sha256.update(b.modelObject.qname.localName.encode("utf-8", "replace"))
sha256.update(str(b.extraProperties[1][1]).encode("utf-8", "replace"))
itemValuesHash = sha256.hexdigest()
# summation value hash
sha256 = hashlib.sha256()
sha256.update(fact.qname.namespaceURI.encode("utf-8", "replace"))
sha256.update(fact.qname.localName.encode("utf-8", "replace"))
sha256.update(str(roundedSum).encode("utf-8", "replace"))
sumValueHash = sha256.hexdigest()
# return list of bound summation followed by bound contributing items
return [
ObjectPropertyViewWrapper(
fact,
(
("sumValueHash", sumValueHash),
("itemValuesHash", itemValuesHash),
("roundedSum", roundedSum),
),
)
] + boundSummationItems
<|endoftext|> |
<|endoftext|>"""
Created on Apr 5, 2015
@author: Acsone S. A.
(c) Copyright 2015 Mark V Systems Limited, All rights reserved.
"""
from tkinter import *
try:
from tkinter.ttk import *
except ImportError:
from tkinter.ttk import *
from arelle.CntlrWinTooltip import ToolTip
class ViewPane:
def __init__(
self, modelXbrl, tabWin, tabTitle, contentView, hasToolTip=False, lang=None
):
self.blockViewModelObject = 0
self.tabWin = tabWin
self.viewFrame = contentView
self.viewFrame.view = self
tabWin.add(self.viewFrame, text=tabTitle)
self.modelXbrl = modelXbrl
self.hasToolTip = hasToolTip
self.toolTipText = StringVar()
if hasToolTip:
self.toolTipText = StringVar()
self.toolTip = ToolTip(
self.gridBody,
textvariable=self.toolTipText,
wraplength=480,
follow_mouse=True,
state="disabled",
)
self.toolTipColId = None
self.toolTipRowId = None
self.modelXbrl = modelXbrl
modelManager = self.modelXbrl.modelManager
self.contextMenuClick = modelManager.cntlr.contextMenuClick
self.lang = lang
if modelXbrl:
modelXbrl.views.append(self)
if not lang:
self.lang = modelXbrl.modelManager.defaultLang
def close(self):
del self.viewFrame.view
self.tabWin.forget(self.viewFrame)
if self in self.modelXbrl.views:
self.modelXbrl.views.remove(self)
self.modelXbrl = None
def select(self):
self.tabWin.select(self.viewFrame)
def onClick(self, *args):
if self.modelXbrl:
self.modelXbrl.modelManager.cntlr.currentView = self
def leave(self, *args):
self.toolTipColId = None
self.toolTipRowId = None
def motion(self, *args):
pass
def contextMenu(self):
try:
return self.menu
except AttributeError:
self.menu = Menu(self.viewFrame, tearoff=0)
return self.menu
def bindContextMenu(self, widget):
if not widget.bind(self.contextMenuClick):
widget.bind(self.contextMenuClick, self.popUpMenu)
def popUpMenu(self, event):
self.menu.post(event.x_root, event.y_root)
def menuAddLangs(self):
langsMenu = Menu(self.viewFrame, tearoff=0)
self.menu.add_cascade(label=_("Language"), menu=langsMenu, underline=0)
for lang in sorted(self.modelXbrl.langs):
langsMenu.add_cascade(
label=lang, underline=0, command=lambda l=lang: self.setLang(l)
)
def setLang(self, lang):
self.lang = lang
self.view()
<|endoftext|> |
<|endoftext|>"""
This module is an example to convert Html Tables into Xlsx (Excel) tables
Preconfigured here to use SEC Edgar Rendering R files as input
@author: Mark V Systems Limited
(c) Copyright 2014 Mark V Systems Limited, All rights reserved.
"""
import os, sys, re
from lxml import etree, html
from openpyxl.workbook import Workbook
from openpyxl.worksheet import ColumnDimension
from openpyxl.cell import get_column_letter
from openpyxl.style import Alignment
class Report:
def __init__(self, longName, shortName, htmlFileName):
self.longName = longName
self.shortName = shortName
self.htmlFileName = htmlFileName
def __repr__(self):
return "report(longName='{}', shortName='{}', htmlFileName='{}')".format(
self.longName, self.shortName, self.htmlFileName
)
def intCol(elt, attrName, default=None):
try:
return int(elt.get(attrName, default))
except (TypeError, ValueError):
return default
numberPattern = re.compile(r"\s*([$]\s*)?[(]?\s*[+-]?[0-9,]+([.][0-9]*)?[)-]?\s*$")
displayNonePattern = re.compile(r"\s*display:\s*none;")
def saveTableToExelle(rFilesDir):
# get reports from FilingSummary
reports = []
try:
fsdoc = etree.parse(os.path.join(rFilesDir, "FilingSummary.xml"))
for rElt in fsdoc.iter(tag="Report"):
reports.append(
Report(
rElt.findtext("LongName"),
rElt.findtext("ShortName"),
rElt.findtext("HtmlFileName"),
)
)
except (EnvironmentError, etree.LxmlError) as err:
print(("FilingSummary.xml: directory {0} error: {1}".format(rFilesDir, err)))
wb = Workbook(encoding="utf-8")
# remove predefined sheets
for sheetName in wb.get_sheet_names():
ws = wb.get_sheet_by_name(sheetName)
if ws is not None:
wb.remove_sheet(ws)
sheetNames = set() # prevent duplicates
for reportNum, report in enumerate(reports):
sheetName = report.shortName[:31] # max length 31 for excel title
if sheetName in sheetNames:
sheetName = sheetName[: 31 - len(str(reportNum))] + str(reportNum)
sheetNames.add(sheetName)
ws = wb.create_sheet(title=sheetName)
try:
# doesn't detect utf-8 encoding the normal way, pass it a string
# htmlSource = ''
# with open(os.path.join(rFilesDir, report.htmlFileName), 'rt', encoding='utf-8') as fh:
# htmlSource = fh.read()
# rdoc = html.document_fromstring(htmlSource)
rdoc = html.parse(os.path.join(rFilesDir, report.htmlFileName))
row = -1
mergedAreas = {} # colNumber: (colspan,lastrow)
for tableElt in rdoc.iter(tag="table"):
# skip pop up tables
if tableElt.get("class") == "authRefData":
continue
if tableElt.getparent().tag == "div":
style = tableElt.getparent().get("style")
if style and displayNonePattern.match(style):
continue
colWidths = {}
for rowNum, trElt in enumerate(tableElt.iter(tag="tr")):
# remove passed mergedAreas
for mergeCol in [
col
for col, mergedArea in list(mergedAreas.items())
if mergedArea[1] > rowNum
]:
del mergedAreas[mergeCol]
col = 0
for coltag in ("th", "td"):
for cellElt in trElt.iter(tag=coltag):
if col == 0:
row += 1 # new row
if col in mergedAreas:
col += mergedAreas[col][0] - 1
text = cellElt.text_content()
colspan = intCol(cellElt, "colspan", 1)
rowspan = intCol(cellElt, "rowspan", 1)
# if col not in colWidths:
# colWidths[col] = 10.0 # some kind of default width
for elt in cellElt.iter():
style = elt.get("style")
if style and "width:" in style:
try:
kw, sep, width = style.partition("width:")
if "px" in width:
width, sep, kw = width.partition("px")
width = float(width) * 0.67777777
else:
width = float(width)
colWidths[col] = width
except ValueError:
pass
if rowspan > 1:
mergedAreas[col] = (colspan, row + rowspan - 1)
cell = ws.cell(row=row, column=col)
if text:
cell.value = text
if numberPattern.match(text):
cell.style.alignment.horizontal = (
Alignment.HORIZONTAL_RIGHT
)
else:
cell.style.alignment.wrap_text = True
if colspan > 1 or rowspan > 1:
ws.merge_cells(
start_row=row,
end_row=row + rowspan - 1,
start_column=col,
end_column=col + colspan - 1,
)
cell.style.alignment.vertical = Alignment.VERTICAL_TOP
if coltag == "th":
cell.style.alignment.horizontal = (
Alignment.HORIZONTAL_CENTER
)
cell.style.font.bold = True
cell.style.font.size = 9 # some kind of default size
col += colspan
for col, width in list(colWidths.items()):
ws.column_dimensions[get_column_letter(col + 1)].width = width
except (EnvironmentError, etree.LxmlError) as err:
print(
(
"{0}: directory {1} error: {2}".format(
report.htmlFileName, rFilesDir, err
)
)
)
wb.save(os.path.join(rFilesDir, "exelleOut.xlsx"))
if __name__ == "__main__":
# test directory
saveTableToExelle(
r"C:\Users\Herm Fischer\Documents\mvsl\projects\SEC\14.1\R-files\wpoRfiles"
)
<|endoftext|> |
<|endoftext|>"""
This is an example of a plug-in to both GUI menu and command line/web service
that will provide an option to replace behavior of table linkbase validation to
generate vs diff table linkbase infoset files.
(c) Copyright 2012 Mark V Systems Limited, All rights reserved.
"""
def validateTableInfosetMenuEntender(cntlr, validateMenu):
# Extend menu with an item for the save infoset plugin
cntlr.modelManager.generateTableInfoset = cntlr.config.setdefault(
"generateTableInfoset", False
)
from tkinter import BooleanVar
generateTableInfoset = BooleanVar(value=cntlr.modelManager.generateTableInfoset)
def setTableInfosetOption(*args):
cntlr.config[
"generateTableInfoset"
] = cntlr.modelManager.generateTableInfoset = generateTableInfoset.get()
generateTableInfoset.trace("w", setTableInfosetOption)
validateMenu.add_checkbutton(
label=_("Generate table infosets (instead of diffing them)"),
underline=0,
variable=generateTableInfoset,
onvalue=True,
offvalue=False,
)
def validateTableInfosetCommandLineOptionExtender(parser):
# extend command line options with a save DTS option
parser.add_option(
"--generate-table-infoset",
action="store_true",
dest="generateTableInfoset",
help=_("Generate table instance infosets (instead of diffing them)."),
)
def validateTableInfosetCommandLineXbrlLoaded(cntlr, options, modelXbrl, *args):
cntlr.modelManager.generateTableInfoset = getattr(
options, "generateTableInfoset", False
)
def validateTableInfoset(modelXbrl, resultTableUri):
diffToFile = not getattr(modelXbrl.modelManager, "generateTableInfoset", False)
from arelle import ViewFileRenderedGrid
ViewFileRenderedGrid.viewRenderedGrid(
modelXbrl, resultTableUri, diffToFile=diffToFile
) # false to save infoset files
return True # blocks standard behavior in validate.py
__pluginInfo__ = {
"name": "Validate Table Infoset (Optional behavior)",
"version": "0.9",
"description": "This plug-in adds a feature modify batch validation of table linkbase to save, versus diff, infoset files. ",
"license": "Apache-2",
"author": "Mark V Systems Limited",
"copyright": "(c) Copyright 2012 Mark V Systems Limited, All rights reserved.",
# classes of mount points (required)
"CntlrWinMain.Menu.Validation": validateTableInfosetMenuEntender,
"CntlrCmdLine.Options": validateTableInfosetCommandLineOptionExtender,
"CntlrCmdLine.Xbrl.Loaded": validateTableInfosetCommandLineXbrlLoaded,
"Validate.TableInfoset": validateTableInfoset,
}
<|endoftext|> |
<|endoftext|>"""
sphinxEvaluator processes the Sphinx language in the context of an XBRL DTS and instance.
(c) Copyright 2013 Mark V Systems Limited, California US, All rights reserved.
Mark V copyright applies to this software, which is licensed according to the terms of Arelle(r).
Sphinx is a Rules Language for XBRL described by a Sphinx 2 Primer
(c) Copyright 2012 CoreFiling, Oxford UK.
Sphinx copyright applies to the Sphinx language, not to this software.
Mark V Systems conveys neither rights nor license for the Sphinx language.
"""
import operator
from .SphinxContext import HyperspaceBindings, HyperspaceBinding
from .SphinxParser import (
astFunctionReference,
astHyperspaceExpression,
astNode,
astFormulaRule,
astReportRule,
astVariableReference,
)
from .SphinxMethods import (
methodImplementation,
functionImplementation,
aggreateFunctionImplementation,
aggreateFunctionAcceptsFactArgs,
moduleInit as SphinxMethodsModuleInit,
)
from arelle.ModelFormulaObject import Aspect
from arelle.ModelValue import QName
from arelle.ModelInstanceObject import ModelFact
from arelle.ModelXbrl import DEFAULT, NONDEFAULT, DEFAULTorNONDEFAULT
from arelle import XbrlConst, XmlUtil
class SphinxException(Exception):
def __init__(self, node, code, message, **kwargs):
self.node = node
self.code = code
self.message = message
self.kwargs = kwargs
self.args = (self.__repr__(),)
def __repr__(self):
return _("[{0}] exception: {1} at {2}").format(
self.code, self.message % self.kwargs, self.node.sourceFileLine
)
class SphinxSpecialValue:
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
UNBOUND = SphinxSpecialValue("unbound")
NONE = SphinxSpecialValue("none")
def evaluateRuleBase(sphinxContext):
# clear any residual values
for constantNode in list(sphinxContext.constants.values()):
constantNode.value = None
clearEvaluation(sphinxContext)
# check any rule-base preconditions
for preconditionNode in sphinxContext.ruleBasePreconditionNodes:
preconditionPasses = evaluate(preconditionNode, sphinxContext)
clearEvaluation(sphinxContext)
if not preconditionPasses:
return
# evaluate rules
for ruleProg in sphinxContext.rules:
evaluate(ruleProg, sphinxContext)
clearEvaluation(sphinxContext)
# dereference constants
for constantNode in list(sphinxContext.constants.values()):
constantNode.value = None
def clearEvaluation(sphinxContext):
sphinxContext.tags.clear()
sphinxContext.localVariables.clear()
while sphinxContext.hyperspaceBindings:
sphinxContext.hyperspaceBindings.close() # resets sphinxContext.hyperspaceBindings to parent bindings
def evaluate(node, sphinxContext, value=False, fallback=None, hsBoundFact=False):
if isinstance(node, astNode):
if fallback is None:
result = evaluator[node.__class__.__name__](node, sphinxContext)
else:
try:
result = evaluator[node.__class__.__name__](node, sphinxContext)
except StopIteration:
if sphinxContext.formulaOptions.traceVariableSetExpressionEvaluation:
sphinxContext.modelXbrl.info(
"sphinx:trace",
_("%(node)s has unbound evaluation"),
sourceFileLine=node.sourceFileLine,
node=str(node),
)
return fallback
if sphinxContext.formulaOptions.traceVariableSetExpressionEvaluation:
sphinxContext.modelXbrl.info(
"sphinx:trace",
_("%(node)s evaluation: %(value)s"),
sourceFileLine=node.sourceFileLine,
node=str(node),
value=result,
)
if result is not None:
if isinstance(result, HyperspaceBinding):
if hsBoundFact: # return fact, not the value of fact
return result.yieldedFact
elif value:
return result.value
# dereference nodes to their value
if (value or hsBoundFact) and isinstance(result, astNode):
return evaluate(result, sphinxContext, value, fallback, hsBoundFact)
return result
return result
elif isinstance(node, (tuple, list)):
return [
evaluate(item, sphinxContext, value, fallback, hsBoundFact) for item in node
]
elif isinstance(node, set):
return set(
evaluate(item, sphinxContext, value, fallback, hsBoundFact) for item in node
)
else:
return node
def evaluateAnnotationDeclaration(node, sphinxContext):
return None
def evaluateBinaryOperation(node, sphinxContext):
leftValue = evaluate(node.leftExpr, sphinxContext, value=True, fallback=UNBOUND)
rightValue = evaluate(node.rightExpr, sphinxContext, value=True, fallback=UNBOUND)
op = node.op
if sphinxContext.formulaOptions.traceVariableExpressionEvaluation:
sphinxContext.modelXbrl.info(
"sphinx:trace",
_("Binary op %(op)s v1: %(leftValue)s, v2: %(rightValue)s"),
sourceFileLine=node.sourceFileLine,
op=op,
leftValue=leftValue,
rightValue=rightValue,
)
if op == ":=":
if sphinxContext.ruleNode.bind == "left":
if rightValue is UNBOUND:
raise StopIteration
elif sphinxContext.ruleNode.bind == "right":
if leftValue is UNBOUND:
raise StopIteration
elif sphinxContext.ruleNode.bind == "either":
if leftValue is UNBOUND and rightValue is UNBOUND:
raise StopIteration
else: # both or default
if leftValue is UNBOUND or rightValue is UNBOUND:
raise StopIteration
return (leftValue, rightValue)
elif op in {"|+|", "|+", "+|", "+", "|-|", "|-", "-|", "-"}:
if leftValue is UNBOUND:
if op[0] == "|":
raise StopIteration
else:
leftValue = 0
if rightValue is UNBOUND:
if op[-1] == "|":
raise StopIteration
else:
rightValue = 0
else:
if leftValue is UNBOUND:
return UNBOUND
if rightValue is UNBOUND:
if op == "or" and leftValue:
return True
return UNBOUND
if op == "/" and rightValue == 0: # prevent divide by zero
return UNBOUND
try:
result = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"<": operator.lt,
">": operator.gt,
"<=": operator.le,
">=": operator.ge,
"==": operator.eq,
"!=": operator.ne,
"and": operator.and_,
"or": operator.or_,
}[op](leftValue, rightValue)
return result
except KeyError:
sphinxContext.modelXbrl.error(
"sphinx:error",
_('Operation "%(op)s" not implemented for %(node)s'),
sourceFileLine=node.sourceFileLine,
op=op,
node=str(node),
)
except (TypeError, ZeroDivisionError) as err:
sphinxContext.modelXbrl.error(
"sphinx:error",
_('Operation "%(op)s" raises exception %(error)s for %(node)s'),
sourceFileLine=node.sourceFileLine,
op=op,
node=str(node),
error=str(err),
)
return None
def evaluateConstant(node, sphinxContext):
if node.value is None: # first time
hsBindings = HyperspaceBindings(
sphinxContext
) # must have own hsBindings from caller
previousLocalVariables = sphinxContext.localVariables # save local variables
sphinxContext.localVariables = {}
node.value = evaluate(node.expr, sphinxContext)
if sphinxContext.formulaOptions.traceVariableSetExpressionEvaluation:
sphinxContext.modelXbrl.info(
"sphinx:trace",
_("Constant %(name)s assigned value: %(value)s"),
sourceFileLine=node.sourceFileLine,
name=node.constantName,
value=node.value,
)
hsBindings.close()
sphinxContext.localVariables = previousLocalVariables
return node.value
def evaluateFor(node, sphinxContext):
# add a hyperspaceBinding to sphinxContext for this node
hsBindings = sphinxContext.hyperspaceBindings
forBinding = hsBindings.forBinding(node)
# set variable here because although needed for next() operation, will be cleared outside of for's context
sphinxContext.localVariables[node.name] = forBinding.yieldedValue
return evaluate(node.expr, sphinxContext)
def evaluateFunctionDeclaration(node, sphinxContext, args):
overriddenVariables = {}
if isinstance(args, dict):
# args may not all be used in the function declaration, just want used ones
argDict = dict(
(name, value) for name, value in list(args.items()) if name in node.params
)
else: # purely positional args
# positional parameters named according to function prototype
if len(args) != len(node.params):
sphinxContext.modelXbrl.log(
"ERROR",
"sphinx.functionArgumentsMismatch",
_(
"Function %(name)s requires %(required)s parameters but %(provided)s are provided"
),
sourceFileLine=node.sourceFileLine,
name=node.name,
required=len(node.params),
provided=len(args),
)
return None
argDict = dict((paramName, args[i]) for i, paramName in enumerate(node.params))
for name, value in list(argDict.items()):
if name in sphinxContext.localVariables:
overriddenVariables[name] = sphinxContext.localVariables[name]
sphinxContext.localVariables[name] = value
def clearFunctionArgs():
for name in list(argDict.keys()):
del sphinxContext.localVariables[name]
sphinxContext.localVariables.update(overriddenVariables)
overriddenVariables.clear()
try:
result = evaluate(node.expr, sphinxContext)
clearFunctionArgs()
return result
except StopIteration as ex:
clearFunctionArgs()
raise ex # reraise exception
def evaluateFunctionReference(node, sphinxContext):
name = node.name
if name in ("error", "warning", "info", "pass"):
sphinxContext.dynamicSeverity = node.name
elif name == "unbound":
return UNBOUND
if name in aggreateFunctionImplementation:
return evaluateAggregateFunction(node, sphinxContext, name)
if name in sphinxContext.functions: # user defined function
resolveValues = sphinxContext.functions[name].functionType == "function"
namedParametersAssignedTo = sphinxContext.localVariables
else:
resolveValues = True
if name in ("error", "warning", "info", "pass"):
namedParametersAssignedTo = sphinxContext.tags
else:
namedParametersAssignedTo = sphinxContext.localVariables
# evaluate local variables
for localVar in node.localVariables:
evaluate(localVar, sphinxContext)
# evaluate args
args = []
tagName = None
l = len(node.args)
for i in range(l):
arg = node.args[i]
if arg == "=":
if i > 0:
tagName = node.args[i - 1]
elif i == l - 1 or node.args[i + 1] != "=":
if resolveValues: # macros pass in the argument, not value
arg = evaluate(arg, sphinxContext, value=True)
elif isinstance(arg, astVariableReference) and getattr(
sphinxContext.localVariables.get(arg.variableName),
"isMacroParameter",
False,
):
# pass original macro parameter, not a reference to it (otherwise causes looping)
arg = sphinxContext.localVariables[arg.variableName]
elif isinstance(arg, astNode):
arg.isMacroParameter = True
args.append(arg)
if tagName:
namedParametersAssignedTo[tagName] = arg
tagName = None
if name in ("error", "warning", "info", "pass"):
result = None
# call function here
elif name in sphinxContext.functions: # user defined function
result = evaluateFunctionDeclaration(
sphinxContext.functions[name], sphinxContext, args
)
# call built-in functions
elif name in functionImplementation:
result = functionImplementation[name](node, sphinxContext, args)
else:
raise SphinxException(
node,
"sphinx:functionName",
_("unassigned function name %(name)s"),
name=name,
)
# remove local variables
for localVar in node.localVariables:
del sphinxContext.localVariables[localVar.name]
return result
def evaluateAggregateFunction(node, sphinxContext, name):
# determine if evaluating args found hyperspace (first time)
args = []
iterateAbove, bindingsLen = getattr(node, "aggregationHsBindings", (None, None))
firstTime = bindingsLen is None
hsBindings = sphinxContext.hyperspaceBindings
parentAggregationNode = hsBindings.aggregationNode
parentIsValuesIteration = hsBindings.isValuesIteration
hsBindings.aggregationNode = node # block removing nested aspect bindings
hsBindings.isValuesIteration = False
prevHsBindingsLen = len(hsBindings.hyperspaceBindings)
hsBoundFact = aggreateFunctionAcceptsFactArgs[name]
arg = node.args[0]
try:
while True: # possibly multiple bindings
# evaluate local variables
for localVar in node.localVariables:
evaluate(localVar, sphinxContext)
value = evaluate(arg, sphinxContext, value=True, hsBoundFact=hsBoundFact)
if isinstance(value, (list, set)):
for listArg in value:
if value is not UNBOUND:
args.append(evaluate(listArg, sphinxContext, value=True))
elif value is not UNBOUND:
args.append(value)
if firstTime:
if len(hsBindings.hyperspaceBindings) == prevHsBindingsLen:
# no hs bindings, just scalar
break
else: # has hs bindings, evaluate rest of them
firstTime = False
iterateAbove = prevHsBindingsLen - 1
bindingsLen = len(hsBindings.hyperspaceBindings)
node.aggregationHsBindings = (iterateAbove, bindingsLen)
hsBindings.next(iterateAbove, bindingsLen)
except StopIteration:
pass # no more bindings
hsBindings.isValuesIteration = parentIsValuesIteration
hsBindings.aggregationNode = parentAggregationNode
# remove local variables
for localVar in node.localVariables:
if localVar in sphinxContext.localVariables:
del sphinxContext.localVariables[localVar.name]
if sphinxContext.formulaOptions.traceVariableExpressionEvaluation:
sphinxContext.modelXbrl.info(
"sphinx:trace",
_("Aggregative function %(name)s arguments: %(args)s"),
sourceFileLine=node.sourceFileLine,
name=name,
args=",".join(str(a) for a in args),
)
try:
return aggreateFunctionImplementation[name](node, sphinxContext, args)
except (TypeError, ZeroDivisionError) as err:
sphinxContext.modelXbrl.error(
"sphinx:error",
_("Function %(name)s raises exception %(error)s in %(node)s"),
sourceFileLine=node.sourceFileLine,
name=name,
node=str(node),
error=str(err),
)
return None
def evaluateHyperspaceExpression(node, sphinxContext):
# add a hyperspaceBinding to sphinxContext for this node
hsBindings = sphinxContext.hyperspaceBindings
nodeBinding = hsBindings.nodeBinding(node)
return nodeBinding
def evaluateIf(node, sphinxContext):
condition = evaluate(node.condition, sphinxContext, value=True)
if condition:
expr = node.thenExpr
else:
expr = node.elseExpr
return evaluate(expr, sphinxContext)
def evaluateMessage(node, sphinxContext, resultTags, hsBindings):
def evaluateTagExpr(tagExpr, modifier):
if modifier == "value":
value = evaluate(tagExpr, sphinxContext, value=True)
elif modifier == "context":
value = contextView(sphinxContext, tagExpr)
else:
value = "{0} {1}".format(
evaluate(tagExpr, sphinxContext, value=True), contextView(sphinxContext)
)
return value
msgstr = evaluate(node.message, sphinxContext, value=True)
text = []
args = []
i = 0
while True:
j = msgstr.find("${", i)
if j >= 0:
text.append(msgstr[i:j]) # previous part of string
k = msgstr.find("}", j + 2)
if k > j:
text.append("{" + str(len(args)) + "}")
tag, sep, modifier = msgstr[j + 2 : k].strip().partition(".")
if tag == "context":
value = (contextView(sphinxContext),)
elif tag in resultTags:
value = evaluateTagExpr(resultTags.tags[tag], modifier)
elif tag in sphinxContext.tags:
value = evaluateTagExpr(sphinxContext.tags[tag], modifier)
elif tag in sphinxContext.taggedConstants:
value = evaluateTagExpr(
evaluateConstant(
sphinxContext.taggedConstants[tag], sphinxContext
),
modifier,
)
elif tag in ("trace", "left", "right", "difference"):
value = 'Tag "{0}" is not yet supported'.format(tag)
else:
sphinxContext.modelXbrl.log(
"ERROR",
"sphinx.unboundMessageTag",
_("Validation rule tag %(tag)s is not Bound"),
sourceFileLine=node.sourceFileLine,
tag=tag,
)
value = "${" + tag + "}"
args.append(value)
i = k + 1
else:
text.append(msgstr[i:])
break
messageStr = "".join(text)
return messageStr.format(*args)
def evaluateMethodReference(node, sphinxContext):
args = []
for i, nodeArg in enumerate(node.args):
arg = evaluate(
nodeArg, sphinxContext, value=True, hsBoundFact=(i == 0)
) # don't deref arg 0
args.append(arg)
return methodImplementation.get(
node.name, # requested method
methodImplementation["unknown"], # default if missing method
)(node, sphinxContext, args)
def evaluateNoOp(node, sphinxContext):
return None
def evaluateNumericLiteral(node, sphinxContext):
return node.value
def evaluatePreconditionDeclaration(node, sphinxContext):
hsBindings = HyperspaceBindings(sphinxContext)
result = evaluate(node.expr, sphinxContext, value=True)
hsBindings.close()
return result
def evaluatePreconditionReference(node, sphinxContext):
preconditionPasses = True
for name in node.names:
if name in sphinxContext.preconditionNodes:
if not evaluate(
sphinxContext.preconditionNodes[name], sphinxContext, value=True
):
preconditionPasses = False
clearEvaluation(sphinxContext)
if not preconditionPasses:
break
return preconditionPasses
def evaluateQnameLiteral(node, sphinxContext):
return node.value
def evaluateReportRule(node, sphinxContext):
return None
def evaluateRuleBasePrecondition(node, sphinxContext):
if node.precondition:
return evaluate(node.precondition, sphinxContext, value=True)
return True
def evaluateStringLiteral(node, sphinxContext):
return node.text
def evaluateTagAssignment(node, sphinxContext):
result = evaluate(node.expr, sphinxContext, value=True)
sphinxContext.tags[node.tagName] = result
return result
def evaluateTagReference(node, sphinxContext):
try:
return sphinxContext.tags[node.name]
except KeyError:
raise SphinxException(
node, "sphinx:tagName", _("unassigned tag name %(name)s"), name=node.name
)
def evaluateRule(node, sphinxContext):
isFormulaRule = isinstance(node, astFormulaRule)
isReportRule = isinstance(node, astReportRule)
name = node.name or ("sphinx.report" if isReportRule else "sphinx.raise")
nodeId = node.nodeTypeName + " " + name
if node.precondition:
result = evaluate(node.precondition, sphinxContext, value=True)
if sphinxContext.formulaOptions.traceVariableSetExpressionResult:
sphinxContext.modelXbrl.info(
"sphinx:trace",
_("%(node)s precondition evaluation: %(value)s"),
sourceFileLine=node.sourceFileLine,
node=nodeId,
value=result,
)
if not result:
return None
# nest hyperspace binding
sphinxContext.ruleNode = node
hsBindings = None
ruleIteration = 0
try:
hsBindings = HyperspaceBindings(sphinxContext)
while True:
ruleIteration += 1
sphinxContext.dynamicSeverity = None
sphinxContext.tags.clear()
sphinxContext.localVariables.clear()
if sphinxContext.formulaOptions.traceVariableSetExpressionResult:
sphinxContext.modelXbrl.info(
"sphinx:trace",
_("%(node)s starting iteration %(iteration)s"),
sourceFileLine=node.sourceFileLine,
node=nodeId,
iteration=ruleIteration,
)
for varAssignNode in node.variableAssignments:
evaluateVariableAssignment(varAssignNode, sphinxContext)
result = evaluate(node.expr, sphinxContext, value=True)
if result is UNBOUND:
result = None # nothing to do for this pass
elif isFormulaRule:
left, right = result
if left is UNBOUND:
difference = UNBOUND
elif right is UNBOUND:
difference = UNBOUND
else:
difference = abs(left - right)
result = difference != 0
resultTags = {"left": left, "right": right, "difference": difference}
sphinxContext.dynamicSeverity = None
if node.severity in sphinxContext.functions:
evaluateFunctionDeclaration(
sphinxContext.functions[node.severity],
sphinxContext,
{"difference": difference, "left": left, "right": right},
)
if (
sphinxContext.dynamicSeverity is None
or sphinxContext.dynamicSeverity == "pass"
): # don't process pass
sphinxContext.dynamicSeverity = None
result = False
else:
if isReportRule:
resultTags = {"value": result}
else:
resultTags = {}
if sphinxContext.formulaOptions.traceVariableSetExpressionResult:
sphinxContext.modelXbrl.info(
"sphinx:trace",
_(
"%(node)s result %(result)s %(severity)s iteration %(iteration)s"
),
sourceFileLine=node.sourceFileLine,
node=nodeId,
iteration=ruleIteration,
result=result,
severity=(
sphinxContext.dynamicSeverity
or node.severity
or ("info" if isReportRule else "error")
),
)
if (result or isReportRule) or (
sphinxContext.dynamicSeverity
and sphinxContext.dynamicSeverity != "pass"
):
severity = (
sphinxContext.dynamicSeverity
or node.severity
or ("info" if isReportRule else "error")
)
if isinstance(severity, astFunctionReference):
severity = severity.name
logSeverity = {"error": "ERROR", "warning": "WARNING", "info": "INFO"}[
severity
]
if node.message:
sphinxContext.modelXbrl.log(
logSeverity,
name,
evaluateMessage(
node.message, sphinxContext, resultTags, hsBindings
),
sourceFileLine=[node.sourceFileLine]
+ [
(fact.modelDocument.uri, fact.sourceline)
for fact in hsBindings.boundFacts
],
severity=severity,
)
elif isFormulaRule:
sphinxContext.modelXbrl.log(
logSeverity,
name,
_("Formula %(severity)s difference %(value)s for %(aspects)s"),
sourceFileLine=[node.sourceFileLine]
+ [
(fact.modelDocument.uri, fact.sourceline)
for fact in hsBindings.boundFacts
],
severity=severity,
value=difference,
aspects=contextView(sphinxContext),
)
elif isReportRule:
sphinxContext.modelXbrl.log(
logSeverity,
name,
_("Report %(severity)s %(value)s for %(aspects)s"),
sourceFileLine=[node.sourceFileLine]
+ [
(fact.modelDocument.uri, fact.sourceline)
for fact in hsBindings.boundFacts
],
severity=severity,
value=result,
aspects=contextView(sphinxContext),
)
else:
sphinxContext.modelXbrl.log(
logSeverity,
name,
_("Validation rule %(severity)s for %(aspects)s"),
sourceFileLine=[node.sourceFileLine]
+ [
(fact.modelDocument.uri, fact.sourceline)
for fact in hsBindings.boundFacts
],
severity=severity,
aspects=contextView(sphinxContext),
)
next(hsBindings) # raises StopIteration when done
except StopIteration:
if sphinxContext.formulaOptions.traceVariableSetExpressionResult:
sphinxContext.modelXbrl.info(
"sphinx:trace",
_("%(node)s StopIteration"),
sourceFileLine=node.sourceFileLine,
node=nodeId,
)
except SphinxException as ex:
sphinxContext.modelXbrl.log(
"ERROR",
ex.code,
_("Exception in %(node)s: %(exception)s"),
node=nodeId,
ruleName=name,
exception=ex.message % ex.kwargs,
sourceFileLine=[node.sourceFileLine]
+ ([ex.node.sourceFileLine] if ex.node is not node else []),
**ex.kwargs
)
if hsBindings is not None:
hsBindings.close()
return None
def noop(arg):
return arg
def evaluateUnaryOperation(node, sphinxContext):
if node.op == "brackets": # parentheses around an expression
return node.expr
value = evaluate(node.expr, sphinxContext, value=True, fallback=UNBOUND)
if value is UNBOUND:
return UNBOUND
try:
result = {
"+": operator.pos,
"-": operator.neg,
"not": operator.not_,
"values": noop,
}[node.op](value)
return result
except KeyError:
sphinxContext.modelXbrl.error(
"sphinx:error",
_("%(node)s operation %(op)s not implemented"),
modelObject=node,
op=node.op,
)
return None
def evaluateValuesIteration(node, sphinxContext):
hsBindings = sphinxContext.hyperspaceBindings
if hsBindings.aggregationNode is None:
sphinxContext.modelXbrl.error(
"sphinx:warning",
_("Values iteration expected to be nested in an aggregating function"),
modelObject=node,
)
else:
hsBindings.isValuesIteration = True
return evaluate(node.expr, sphinxContext)
def evaluateVariableAssignment(node, sphinxContext):
result = evaluate(node.expr, sphinxContext)
sphinxContext.localVariables[node.variableName] = result
if node.tagName:
sphinxContext.tags[node.tagName] = result
return result
def evaluateVariableReference(node, sphinxContext):
try:
return sphinxContext.localVariables[node.variableName]
except KeyError:
if node.variableName in sphinxContext.constants:
return evaluateConstant(
sphinxContext.constants[node.variableName], sphinxContext
)
raise SphinxException(
node,
"sphinx:variableName",
_("unassigned variable name %(name)s"),
name=node.variableName,
)
def evaluateWith(node, sphinxContext):
# covered clauses of withExpr match uncovered aspects of expr
hsBindings = sphinxContext.hyperspaceBindings
withRestrictionBinding = hsBindings.nodeBinding(
node.restrictionExpr, isWithRestrictionNode=True
)
hsBindings.withRestrictionBindings.append(withRestrictionBinding)
try:
for varAssignNode in node.variableAssignments:
evaluateVariableAssignment(varAssignNode, sphinxContext)
result = evaluate(node.bodyExpr, sphinxContext)
except Exception as ex:
del hsBindings.withRestrictionBindings[-1]
raise ex # re-throw the exception after removing withstack entry
del hsBindings.withRestrictionBindings[-1]
return result
def contextView(sphinxContext, fact=None):
if isinstance(fact, ModelFact):
return "{0}[{1}]".format(
fact.qname,
", ".join(
"{2}={1}".format(
aspectName(aspect), factAspectValue(fact, aspect, view=True)
)
for aspect, fact in list(
sphinxContext.hyperspaceBindings.aspectBoundFacts.items()
)
if factAspectValue(fact, aspect) and aspect != Aspect.CONCEPT
),
)
else:
return "[{0}]".format(
", ".join(
"{0}={1}".format(
aspectName(aspect), factAspectValue(fact, aspect, view=True)
)
for aspect, fact in list(
sphinxContext.hyperspaceBindings.aspectBoundFacts.items()
)
if factAspectValue(fact, aspect)
)
)
def aspectName(aspect):
if isinstance(aspect, QName):
return aspect
return {
Aspect.LOCATION: "tuple",
Aspect.CONCEPT: "primary",
Aspect.ENTITY_IDENTIFIER: "entity",
Aspect.PERIOD: "period",
Aspect.UNIT: "unit",
Aspect.NON_XDT_SEGMENT: "segment",
Aspect.NON_XDT_SCENARIO: "scenario",
}.get(aspect)
if aspect in Aspect.label:
return Aspect.label[aspect]
else:
return str(aspect)
def factAspectValue(fact, aspect, view=False):
if fact is DEFAULT:
return "none"
elif fact is NONDEFAULT:
return "*"
elif fact is DEFAULTorNONDEFAULT:
return "**"
elif aspect == Aspect.LOCATION:
parentQname = fact.getparent().qname
if parentQname == XbrlConst.qnXbrliXbrl: # not tuple
return NONE
return parentQname # tuple
elif aspect == Aspect.CONCEPT:
return fact.qname
elif fact.isTuple or fact.context is None:
return NONE # subsequent aspects don't exist for tuples
elif aspect == Aspect.UNIT:
if fact.unit is None:
return NONE
measures = fact.unit.measures
if measures[1]:
return "{0} / {1}".format(
" ".join(str(m) for m in measures[0]),
" ".join(str(m) for m in measures[1]),
)
else:
return " ".join(str(m) for m in measures[0])
else:
context = fact.context
if aspect == Aspect.PERIOD:
return (
"forever"
if context.isForeverPeriod
else XmlUtil.dateunionValue(
context.instantDatetime, subtractOneDay=True
)
if context.isInstantPeriod
else XmlUtil.dateunionValue(context.startDatetime)
+ "-"
+ XmlUtil.dateunionValue(context.endDatetime, subtractOneDay=True)
)
elif aspect == Aspect.ENTITY_IDENTIFIER:
if view:
return context.entityIdentifier[1]
else:
return context.entityIdentifier # (scheme, identifier)
elif aspect in (
Aspect.COMPLETE_SEGMENT,
Aspect.COMPLETE_SCENARIO,
Aspect.NON_XDT_SEGMENT,
Aspect.NON_XDT_SCENARIO,
):
return "".join(
XmlUtil.xmlstring(elt, stripXmlns=True, prettyPrint=True)
for elt in context.nonDimValues(aspect)
)
elif aspect == Aspect.DIMENSIONS:
return context.dimAspects(fact.xpCtx.defaultDimensionAspects)
elif isinstance(aspect, QName):
dimValue = context.dimValue(aspect)
if dimValue is None:
return NONE
else:
if isinstance(dimValue, QName): # default dim
return dimValue
elif dimValue.isExplicit:
return dimValue.memberQname
else: # explicit
return dimValue.typedMember.xValue # typed element value
evaluator = {
"astAnnotationDeclaration": evaluateAnnotationDeclaration,
"astBinaryOperation": evaluateBinaryOperation,
"astComment": evaluateNoOp,
"astFor": evaluateFor,
"astFormulaRule": evaluateRule,
"astFunctionDeclaration": evaluateFunctionDeclaration,
"astFunctionReference": evaluateFunctionReference,
"astHyperspaceExpression": evaluateHyperspaceExpression,
"astIf": evaluateIf,
"astMessage": evaluateMessage,
"astMethodReference": evaluateMethodReference,
"astNamespaceDeclaration": evaluateNoOp,
"astNode": evaluateNoOp,
"astNoOp": evaluateNoOp,
"astNumericLiteral": evaluateNumericLiteral,
"astPreconditionDeclaration": evaluatePreconditionDeclaration,
"astQnameLiteral": evaluateQnameLiteral,
"astReportRule": evaluateRule,
"astSourceFile": evaluateNoOp,
"astRuleBasePrecondition": evaluateRuleBasePrecondition,
"astPreconditionReference": evaluatePreconditionReference,
"astStringLiteral": evaluateStringLiteral,
"astTagAssignment": evaluateTagAssignment,
"astTagReference": evaluateTagReference,
"astValidationRule": evaluateRule,
"astValuesIteration": evaluateValuesIteration,
"astVariableAssignment": evaluateVariableAssignment,
"astVariableReference": evaluateVariableReference,
"astUnaryOperation": evaluateUnaryOperation,
"astWith": evaluateWith,
}
SphinxMethodsModuleInit()
<|endoftext|> |
<|endoftext|>"""
DialogRssWatchExtender extends DialogRssWatch for XBRL databases.
It is separate from the xbrlDB __init__.py module so that it can be removed when
compiling server versions where Python has no GUI facilities. The imports of GUI
facilities would cause compilation of the server-related modules to fail, otherwise.
(c) Copyright 2013 Mark V Systems Limited, California US, All rights reserved.
Mark V copyright applies to this software, which is licensed according to the terms of Arelle(r).
and does not apply to the XBRL US Database schema and description.
"""
def dialogRssWatchDBextender(
dialog, frame, row, options, cntlr, openFileImage, openDatabaseImage
):
from tkinter import PhotoImage, N, S, E, W
from tkinter.simpledialog import askstring
from arelle.CntlrWinTooltip import ToolTip
from arelle.UiUtil import gridCell, label
try:
from tkinter.ttk import Button
except ImportError:
from tkinter.ttk import Button
def enterConnectionString():
from arelle.DialogUserPassword import askDatabase
# (user, password, host, port, database)
db = askDatabase(
cntlr.parent,
dialog.cellDBconnection.value.split(",")
if dialog.cellDBconnection.value
else None,
)
if db:
dbConnectionString = ",".join(db)
dialog.options["xbrlDBconnection"] = dbConnectionString
dialog.cellDBconnection.setValue(dbConnectionString)
else: # deleted
dialog.options.pop("xbrlDBconnection", "") # remove entry
label(frame, 1, row, "DB Connection:")
dialog.cellDBconnection = gridCell(
frame, 2, row, options.get("xbrlDBconnection", "")
)
ToolTip(
dialog.cellDBconnection,
text=_(
"Enter an XBRL Database (Postgres) connection string. "
"E.g., host,port,user,password,db[,timeout]. "
),
wraplength=240,
)
enterDBconnectionButton = Button(
frame, image=openDatabaseImage, width=12, command=enterConnectionString
)
enterDBconnectionButton.grid(row=row, column=3, sticky=W)
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
#
# this script generates a testcase variations file for entry point checking
#
import os, fnmatch, xml.dom.minidom, datetime
def main():
# the top directory where to generate the test case (and relative file names in the variations)
topDirectory = "C:\\temp\\editaxonomy20110314"
testcaseName = "EDInet test cases"
ownerName = "Hugh Wallis"
ownerEmail = "[email protected]"
entryRelativeFilePaths = []
for root, dirs, files in os.walk(topDirectory):
for fileName in files:
if fnmatch.fnmatch(fileName, "*.xsd"):
fullFilePath = os.path.join(root, fileName)
entryRelativeFilePaths.append(
os.path.relpath(fullFilePath, topDirectory)
)
lines = [
'<?xml version="1.0" encoding="UTF-8"?>',
"<!-- Copyright 2011 XBRL International. All Rights Reserved. -->",
'<?xml-stylesheet type="text/xsl" href="http://www.xbrl.org/Specification/formula/REC-2009-06-22/conformance/infrastructure/test.xsl"?>',
'<testcase name="{0}" date="{1}" '.format(testcaseName, datetime.date.today()),
' xmlns="http://xbrl.org/2008/conformance"',
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"',
' xsi:schemaLocation="http://xbrl.org/2008/conformance http://www.xbrl.org/Specification/formula/REC-2009-06-22/conformance/infrastructure/test.xsd">',
" <creator>",
" <name>{0}</name>".format(ownerName),
" <email>{0}</email>".format(ownerEmail),
" </creator>",
" <name>{0}</name>".format(ownerEmail),
" <description>{0}</description>".format(testcaseName),
]
num = 1
for entryFile in entryRelativeFilePaths:
fileName = os.path.basename(entryFile)
lines.append(" <variation name='{0}' id='V-{1}'>".format(fileName, num))
num += 1
lines.append(" <description>{0}</description>".format(fileName))
lines.append(" <data>")
lines.append(
" <xsd readMeFirst='true'>{0}</xsd>".format(
entryFile.replace("\\", "/")
)
)
lines.append(" </data>")
lines.append(" <result expected='valid'/>")
lines.append(" </variation>")
lines.append("</testcase>")
with open(os.path.join(topDirectory, "testcase.xml"), "w") as fh:
fh.write("\n".join(lines))
if __name__ == "__main__":
main()
<|endoftext|> |
<|endoftext|>from .gen.tags import _Tags
class Tags(_Tags):
"""Tags resource"""
pass
<|endoftext|> |
<|endoftext|>#!/usr/bin/python
""" SAX parser implementation to prepare an Ordnance Survey
GML file (.gml or .gz) so that it is ready to be loaded by OGR 1.9
or above.
The parser promotes the fid attribute to a child element.
Output is via stdout and is UTF-8 encoded.
usage: python prepgml4ogr.py file.gml
"""
import sys
import os.path
import gzip
import zipfile
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
from xml.sax import saxutils
class gmlhandler(ContentHandler):
def __init__(self, preparer):
# The class that will prepare the features
self.preparer = preparer
# Flag to indicate if we have encountered the first element yet
self.first_elm = True
self.feat = None
self.recording = False
def startElement(self, name, attrs):
if self.first_elm:
# Output the xml declaration prior to the first element,
# done here instead of in startDocument to allow us to avoid
# outputting the declaration when we try and parse non XML content
# as can happen when we parse all files in a zip archive
self.first_elm = False
output('<?xml version="1.0" ?>')
try:
name = name.split(":")[1]
except IndexError:
pass
# Determine if we are interested
# in starting to record the raw
# XML string so we can prepare
# the feature when the feature ends
if name in self.preparer.feat_types:
self.buffer = []
self.recording = True
# Process the attributes
tmp = "<" + name
for name, value in list(attrs.items()):
try:
name = name.split(":")[1]
except IndexError:
pass
tmp += " %s=%s" % (name, saxutils.quoteattr(value))
tmp += ">"
if self.recording:
self.buffer.append(tmp)
else:
output(tmp)
return
def characters(self, ch):
if len(ch.strip()) > 0:
if self.recording:
self.buffer.append(saxutils.escape(ch))
else:
output(saxutils.escape(ch))
def endElement(self, name):
try:
name = name.split(":")[1]
except IndexError:
pass
if self.recording:
self.buffer.append("</" + name + ">")
else:
output("</" + name + ">")
if name in self.preparer.feat_types:
self.recording = False
output(self.preparer.prepare_feature("".join(self.buffer)))
self.buffer = []
def output(str):
try:
sys.stdout.write(str.encode("utf_8", "xmlcharrefreplace").decode("utf_8"))
except UnicodeEncodeError:
sys.stdout.write(str.encode("utf_8", "xmlcharrefreplace"))
class prep_gml:
def __init__(self, inputfile):
self.feat_types = []
def get_feat_types(self):
return self.feat_types
def prepare_feature(self, feat_str):
return feat_str
def main():
if len(sys.argv) < 2:
print("usage: python prepgml4ogr.py file [[prep_module.]prep_class]")
sys.exit(1)
inputfile = sys.argv[1]
if os.path.exists(inputfile):
# Create an instance of a preparer
# class which is used to prepare
# features as they are read
prep_class = "prep_gml"
try:
prep_class = sys.argv[2]
except IndexError:
pass
prep_class = get_preparer(prep_class)
preparer = prep_class(inputfile)
parser = make_parser()
parser.setContentHandler(gmlhandler(preparer))
if os.path.splitext(inputfile)[1].lower() == ".zip":
archive = zipfile.ZipFile(inputfile, "r")
for filename in archive.namelist():
file = archive.open(filename)
try:
parser.parse(file)
except:
# Ignore any files that can't be parsed
pass
else:
if os.path.splitext(inputfile)[1].lower() == ".gz":
file = gzip.open(inputfile, "r")
else:
# Assume non compressed gml, xml or no extension
file = open(inputfile, "r")
parser.parse(file)
else:
print(("Could not find input file: " + inputfile))
def get_preparer(prep_class):
parts = prep_class.split(".")
if len(parts) > 1:
prep_module = parts[0]
prep_module = __import__(prep_module)
prep_class = getattr(prep_module, parts[1])
else:
prep_class = globals()[prep_class]
return prep_class
if __name__ == "__main__":
main()
<|endoftext|> |
<|endoftext|>from django.contrib import admin
# Register your models here.
<|endoftext|> |
<|endoftext|>import csv
from . import eigen
import axelrod.interaction_utils as iu
from numpy import mean, nanmedian, std
try:
# Python 2
from io import StringIO
except ImportError:
# Python 3
from io import StringIO
class ResultSet(object):
"""A class to hold the results of a tournament."""
def __init__(self, players, interactions, with_morality=True):
"""
Parameters
----------
players : list
a list of player objects.
interactions : list
a list of dictionaries mapping tuples of player indices to
interactions (1 for each repetition)
with_morality : bool
a flag to determine whether morality metrics should be
calculated.
"""
self.players = players
self.nplayers = len(players)
self.interactions = interactions
self.nrepetitions = len(interactions)
# Calculate all attributes:
self.build_all(with_morality)
def build_all(self, with_morality):
"""Build all the results. In a seperate method to make inheritance more
straightforward"""
self.wins = self.build_wins()
self.match_lengths = self.build_match_lengths()
self.scores = self.build_scores()
self.normalised_scores = self.build_normalised_scores()
self.ranking = self.build_ranking()
self.ranked_names = self.build_ranked_names()
self.payoffs = self.build_payoffs()
self.payoff_matrix = self.build_payoff_matrix()
self.payoff_stddevs = self.build_payoff_stddevs()
self.score_diffs = self.build_score_diffs()
self.payoff_diffs_means = self.build_payoff_diffs_means()
if with_morality:
self.cooperation = self.build_cooperation()
self.normalised_cooperation = self.build_normalised_cooperation()
self.vengeful_cooperation = self.build_vengeful_cooperation()
self.cooperating_rating = self.build_cooperating_rating()
self.good_partner_matrix = self.build_good_partner_matrix()
self.good_partner_rating = self.build_good_partner_rating()
self.eigenmoses_rating = self.build_eigenmoses_rating()
self.eigenjesus_rating = self.build_eigenjesus_rating()
@property
def _null_results_matrix(self):
"""
Returns:
--------
A null matrix (i.e. fully populated with zero values) using
lists of the form required for the results dictionary.
i.e. one row per player, containing one element per opponent (in
order of player index) which lists values for each repetition.
"""
plist = list(range(self.nplayers))
replist = list(range(self.nrepetitions))
return [[[0 for j in plist] for i in plist] for r in replist]
def build_match_lengths(self):
"""
Returns:
--------
The match lengths. List of the form:
[ML1, ML2, ML3..., MLn]
Where n is the number of repetitions and MLi is a list of the form:
[Pli1, PLi2, Pli3, ..., Plim]
Where m is the number of players and Plij is of the form:
[aij1, aij2, aij3, ..., aijk]
Where k is the number of players and aijk is the length of the match
between player j and k in repetition i.
"""
match_lengths = self._null_results_matrix
for rep in range(self.nrepetitions):
for player_pair_index, interactions in list(self.interactions[rep].items()):
player, opponent = player_pair_index
match_lengths[rep][player][opponent] = len(interactions)
if player != opponent: # Match lengths are symmetric
match_lengths[rep][opponent][player] = len(interactions)
return match_lengths
def build_scores(self):
"""
Returns:
--------
The total scores per player for each repetition lengths.
List of the form:
[ML1, ML2, ML3..., MLn]
Where n is the number of players and MLi is a list of the form:
[pi1, pi2, pi3, ..., pim]
Where m is the number of repetitions and pij is the total score
obtained by each player in repetition j.
In Axelrod's original tournament, there were no self-interactions
(e.g. player 1 versus player 1) and so these are also ignored.
"""
scores = [[0 for rep in range(self.nrepetitions)] for _ in range(self.nplayers)]
for rep, inter_dict in enumerate(self.interactions):
for index_pair, interactions in list(inter_dict.items()):
if index_pair[0] != index_pair[1]: # Ignoring self interactions
final_scores = iu.compute_final_score(interactions)
for player in range(2):
player_index = index_pair[player]
player_score = final_scores[player]
scores[player_index][rep] += player_score
return scores
def build_ranked_names(self):
"""
Returns:
--------
Returns the ranked names. A list of names as calculated by
self.ranking.
"""
return [str(self.players[i]) for i in self.ranking]
def build_wins(self):
"""
Returns:
--------
The total wins per player for each repetition lengths.
List of the form:
[ML1, ML2, ML3..., MLn]
Where n is the number of players and MLi is a list of the form:
[pi1, pi2, pi3, ..., pim]
Where m is the number of repetitions and pij is the total wins
obtained by each player in repetition j.
In Axelrod's original tournament, there were no self-interactions
(e.g. player 1 versus player 1) and so these are also ignored.
"""
wins = [[0 for rep in range(self.nrepetitions)] for _ in range(self.nplayers)]
for rep, inter_dict in enumerate(self.interactions):
for index_pair, interactions in list(inter_dict.items()):
if index_pair[0] != index_pair[1]: # Ignore self interactions
for player in range(2):
player_index = index_pair[player]
winner_index = iu.compute_winner_index(interactions)
if winner_index is not False and player == winner_index:
wins[player_index][rep] += 1
return wins
def build_normalised_scores(self):
"""
Returns:
--------
The total mean scores per turn per layer for each repetition
lengths. List of the form:
[ML1, ML2, ML3..., MLn]
Where n is the number of players and MLi is a list of the form:
[pi1, pi2, pi3, ..., pim]
Where m is the number of repetitions and pij is the mean scores per
turn obtained by each player in repetition j.
In Axelrod's original tournament, there were no self-interactions
(e.g. player 1 versus player 1) and so these are also ignored.
"""
normalised_scores = [
[[] for rep in range(self.nrepetitions)] for _ in range(self.nplayers)
]
# Getting list of all per turn scores for each player for each rep
for rep, inter_dict in enumerate(self.interactions):
for index_pair, interactions in list(inter_dict.items()):
if index_pair[0] != index_pair[1]: # Ignore self interactions
scores_per_turn = iu.compute_final_score_per_turn(interactions)
for player in range(2):
player_index = index_pair[player]
score_per_turn = scores_per_turn[player]
normalised_scores[player_index][rep].append(score_per_turn)
# Obtaining mean scores and overwriting corresponding entry in
# normalised scores
for i, rep in enumerate(normalised_scores):
for j, player_scores in enumerate(rep):
normalised_scores[i][j] = mean(player_scores)
return normalised_scores
def build_ranking(self):
"""
Returns:
--------
The ranking. List of the form:
[R1, R2, R3..., Rn]
Where n is the number of players and Rj is the rank of the jth player
(based on median normalised score).
"""
return sorted(
list(range(self.nplayers)),
key=lambda i: -nanmedian(self.normalised_scores[i]),
)
def build_payoffs(self):
"""
Returns:
--------
The list of per turn payoffs.
List of the form:
[ML1, ML2, ML3..., MLn]
Where n is the number of players and MLi is a list of the form:
[pi1, pi2, pi3, ..., pim]
Where m is the number of players and pij is a list of the form:
[uij1, uij2, ..., uijk]
Where k is the number of repetitions and uijk is the list of utilities
obtained by player i against player j in each repetition.
"""
plist = list(range(self.nplayers))
payoffs = [[[] for opponent in plist] for player in plist]
for player in plist:
for opponent in plist:
utilities = []
for rep in self.interactions:
if (player, opponent) in rep:
interactions = rep[(player, opponent)]
utilities.append(
iu.compute_final_score_per_turn(interactions)[0]
)
if (opponent, player) in rep:
interactions = rep[(opponent, player)]
utilities.append(
iu.compute_final_score_per_turn(interactions)[1]
)
payoffs[player][opponent] = utilities
return payoffs
def build_payoff_matrix(self):
"""
Returns:
--------
The mean of per turn payoffs.
List of the form:
[ML1, ML2, ML3..., MLn]
Where n is the number of players and MLi is a list of the form:
[pi1, pi2, pi3, ..., pim]
Where m is the number of players and pij is a list of the form:
[uij1, uij2, ..., uijk]
Where k is the number of repetitions and u is the mean utility (over
all repetitions) obtained by player i against player j.
"""
plist = list(range(self.nplayers))
payoff_matrix = [[[] for opponent in plist] for player in plist]
for player in plist:
for opponent in plist:
utilities = self.payoffs[player][opponent]
if utilities:
payoff_matrix[player][opponent] = mean(utilities)
else:
payoff_matrix[player][opponent] = 0
return payoff_matrix
def build_payoff_stddevs(self):
"""
Returns:
--------
The mean of per turn payoffs.
List of the form:
[ML1, ML2, ML3..., MLn]
Where n is the number of players and MLi is a list of the form:
[pi1, pi2, pi3, ..., pim]
Where m is the number of players and pij is a list of the form:
[uij1, uij2, ..., uijk]
Where k is the number of repetitions and u is the standard
deviation of the utility (over all repetitions) obtained by player
i against player j.
"""
plist = list(range(self.nplayers))
payoff_stddevs = [[[0] for opponent in plist] for player in plist]
for player in plist:
for opponent in plist:
utilities = self.payoffs[player][opponent]
if utilities:
payoff_stddevs[player][opponent] = std(utilities)
else:
payoff_stddevs[player][opponent] = 0
return payoff_stddevs
def build_score_diffs(self):
"""
Returns:
--------
Returns the score differences between players.
List of the form:
[ML1, ML2, ML3..., MLn]
Where n is the number of players and MLi is a list of the form:
[pi1, pi2, pi3, ..., pim]
Where m is the number of players and pij is a list of the form:
[uij1, uij2, ..., uijk]
Where k is the number of repetitions and uijm is the difference of the
scores per turn between player i and j in repetition m.
"""
plist = list(range(self.nplayers))
score_diffs = [
[[0] * self.nrepetitions for opponent in plist] for player in plist
]
for player in plist:
for opponent in plist:
for r, rep in enumerate(self.interactions):
if (player, opponent) in rep:
scores = iu.compute_final_score_per_turn(
rep[(player, opponent)]
)
diff = scores[0] - scores[1]
score_diffs[player][opponent][r] = diff
if (opponent, player) in rep:
scores = iu.compute_final_score_per_turn(
rep[(opponent, player)]
)
diff = scores[1] - scores[0]
score_diffs[player][opponent][r] = diff
return score_diffs
def build_payoff_diffs_means(self):
"""
Returns:
--------
The score differences between players.
List of the form:
[ML1, ML2, ML3..., MLn]
Where n is the number of players and MLi is a list of the form:
[pi1, pi2, pi3, ..., pim]
Where pij is the mean difference of the
scores per turn between player i and j in repetition m.
"""
plist = list(range(self.nplayers))
payoff_diffs_means = [[0 for opponent in plist] for player in plist]
for player in plist:
for opponent in plist:
diffs = []
for rep in self.interactions:
if (player, opponent) in rep:
scores = iu.compute_final_score_per_turn(
rep[(player, opponent)]
)
diffs.append(scores[0] - scores[1])
if (opponent, player) in rep:
scores = iu.compute_final_score_per_turn(
rep[(opponent, player)]
)
diffs.append(scores[1] - scores[0])
if diffs:
payoff_diffs_means[player][opponent] = mean(diffs)
else:
payoff_diffs_means[player][opponent] = 0
return payoff_diffs_means
def build_cooperation(self):
"""
Returns:
--------
The list of cooperation counts.
List of the form:
[ML1, ML2, ML3..., MLn]
Where n is the number of players and MLi is a list of the form:
[pi1, pi2, pi3, ..., pim]
Where pij is the total number of cooperations over all repetitions
played by player i against player j.
"""
plist = list(range(self.nplayers))
cooperations = [[0 for opponent in plist] for player in plist]
for player in plist:
for opponent in plist:
if player != opponent:
for rep in self.interactions:
coop_count = 0
if (player, opponent) in rep:
interactions = rep[(player, opponent)]
coop_count = iu.compute_cooperations(interactions)[0]
if (opponent, player) in rep:
interactions = rep[(opponent, player)]
coop_count = iu.compute_cooperations(interactions)[1]
cooperations[player][opponent] += coop_count
return cooperations
def build_normalised_cooperation(self):
"""
Returns:
--------
The list of per turn cooperation counts.
List of the form:
[ML1, ML2, ML3..., MLn]
Where n is the number of players and MLi is a list of the form:
[pi1, pi2, pi3, ..., pin]
Where pij is the mean number of
cooperations per turn played by player i against player j in each
repetition.
"""
plist = list(range(self.nplayers))
normalised_cooperations = [[0 for opponent in plist] for player in plist]
for player in plist:
for opponent in plist:
coop_counts = []
for rep in self.interactions:
if (player, opponent) in rep:
interactions = rep[(player, opponent)]
coop_counts.append(
iu.compute_normalised_cooperation(interactions)[0]
)
if (opponent, player) in rep:
interactions = rep[(opponent, player)]
coop_counts.append(
iu.compute_normalised_cooperation(interactions)[1]
)
if ((player, opponent) not in rep) and (
(opponent, player) not in rep
):
coop_counts.append(0)
# Mean over all reps:
normalised_cooperations[player][opponent] = mean(coop_counts)
return normalised_cooperations
def build_vengeful_cooperation(self):
"""
Returns:
--------
The vengeful cooperation matrix derived from the
normalised cooperation matrix:
Dij = 2(Cij - 0.5)
"""
return [
[2 * (element - 0.5) for element in row]
for row in self.normalised_cooperation
]
def build_cooperating_rating(self):
"""
Returns:
--------
The list of cooperation counts
List of the form:
[ML1, ML2, ML3..., MLn]
Where n is the number of players and MLi is a list of the form:
[pi1, pi2, pi3, ..., pim]
Where pij is the total number of cooperations divided by the total
number of turns over all repetitions played by player i against
player j.
"""
plist = list(range(self.nplayers))
total_length_v_opponent = [
list(zip(*[rep[player_index] for rep in self.match_lengths]))
for player_index in plist
]
lengths = [
[sum(e) for j, e in enumerate(row) if i != j]
for i, row in enumerate(total_length_v_opponent)
]
# Max is to deal with edge cases of matches that have no turns
return [
sum(cs) / max(1, float(sum(ls)))
for cs, ls in zip(self.cooperation, lengths)
]
def build_good_partner_matrix(self):
"""
Returns:
--------
An n by n matrix of good partner ratings for n players i.e. an n by
n matrix where n is the number of players. Each row (i) and column
(j) represents an individual player and the value Pij is the sum of
the number of repetitions where player i cooperated as often or
more than opponent j.
"""
plist = list(range(self.nplayers))
good_partner_matrix = [[0 for opponent in plist] for player in plist]
for player in plist:
for opponent in plist:
if player != opponent:
for rep in self.interactions:
if (player, opponent) in rep:
interaction = rep[(player, opponent)]
coops = iu.compute_cooperations(interaction)
if coops[0] >= coops[1]:
good_partner_matrix[player][opponent] += 1
if (opponent, player) in rep:
interaction = rep[(opponent, player)]
coops = iu.compute_cooperations(interaction)
if coops[0] <= coops[1]:
good_partner_matrix[player][opponent] += 1
return good_partner_matrix
def build_good_partner_rating(self):
"""
Returns:
--------
A list of good partner ratings ordered by player index.
"""
plist = list(range(self.nplayers))
good_partner_rating = []
for player_index in plist:
total_interactions = 0
for rep in self.interactions:
total_interactions += len(
[
pair
for pair in list(rep.keys())
if player_index in pair and pair[0] != pair[1]
]
)
# Max is to deal with edge case of matchs with no turns
rating = sum(self.good_partner_matrix[player_index]) / max(
1, float(total_interactions)
)
good_partner_rating.append(rating)
return good_partner_rating
def build_eigenjesus_rating(self):
"""
Returns:
--------
The eigenjesus rating as defined in:
http://www.scottaaronson.com/morality.pdf
"""
eigenvector, eigenvalue = eigen.principal_eigenvector(
self.normalised_cooperation
)
return eigenvector.tolist()
def build_eigenmoses_rating(self):
"""
Returns:
--------
The eigenmoses rating as defined in:
http://www.scottaaronson.com/morality.pdf
"""
eigenvector, eigenvalue = eigen.principal_eigenvector(self.vengeful_cooperation)
return eigenvector.tolist()
def csv(self):
"""
Returns:
--------
The string of the total scores per player (columns) per repetition
(rows).
"""
csv_string = StringIO()
header = ",".join(self.ranked_names) + "\n"
csv_string.write(header)
writer = csv.writer(csv_string, lineterminator="\n")
for irep in range(self.nrepetitions):
data = [self.normalised_scores[rank][irep] for rank in self.ranking]
writer.writerow(list(map(str, data)))
return csv_string.getvalue()
class ResultSetFromFile(ResultSet):
"""A class to hold the results of a tournament.
Initialised by a csv file of the format:
[p1index, p2index, p1name, p2name, p1rep1ac1p2rep1ac1p1rep1ac2p2rep1ac2,
...]
[0, 1, Defector, Cooperator, DCDCDC, DCDCDC, DCDCDC,...]
[0, 2, Defector, Alternator, DCDDDC, DCDDDC, DCDDDC,...]
[1, 2, Cooperator, Alternator, CCCDCC, CCCDCC, CCCDCC,...]
"""
def __init__(self, filename, with_morality=True):
"""
Parameters
----------
filename : string
name of a file of the correct file.
with_morality : bool
a flag to determine whether morality metrics should be
calculated.
"""
self.players, self.interactions = self._read_csv(filename)
self.nplayers = len(self.players)
self.nrepetitions = len(self.interactions)
# Calculate all attributes:
self.build_all(with_morality)
def _read_csv(self, filename):
"""
Reads from a csv file of the format:
p1index, p2index, p1name, p2name, p1rep1ac1p2rep1ac1p1rep1ac2p2rep1ac2,
...
0, 1, Defector, Cooperator, DCDCDC, DCDCDC, DCDCDC,...
0, 2, Defector, Alternator, DCDDDC, DCDDDC, DCDDDC,...
1, 2, Cooperator, Alternator, CCCDCC, CCCDCC, CCCDCC,...
Returns
-------
A tuple:
- First element: list of player names
- Second element: interactions (list of dictionaries mapping
index indices to interactions)
"""
players_d = {}
interactions_d = {}
with open(filename, "r") as f:
for row in csv.reader(f):
index_pair = (int(row[0]), int(row[1]))
players = (row[2], row[3])
inters = row[4:]
# Build a dictionary mapping indices to players
# This is temporary to make sure the ordering of the players
# matches the indices
for index, player in zip(index_pair, players):
if index not in players:
players_d[index] = player
# Build a dictionary mapping indices to list of interactions
# This is temporary (as we do not know the number of
# interactions at this point.
interactions_d[index_pair] = [
self._string_to_interactions(inter) for inter in inters
]
nreps = len(inters)
# Create an ordered list of players
players = []
for i in range(len(players_d)):
players.append(players_d[i])
# Create a list of dictionaries
interactions = []
for rep in range(nreps):
pair_to_interactions_d = {}
for index_pair, inters in list(interactions_d.items()):
pair_to_interactions_d[index_pair] = inters[rep]
interactions.append(pair_to_interactions_d)
return players, interactions
def _string_to_interactions(self, string):
"""
Converts a compact string representation of an interaction to an
interaction:
'CDCDDD' -> [('C', 'D'), ('C', 'D'), ('D', 'D')]
"""
return iu.string_to_interactions(string)
<|endoftext|> |
<|endoftext|>from axelrod import Actions, Player
C, D = Actions.C, Actions.D
class MindController(Player):
"""A player that changes the opponents strategy to cooperate."""
name = "Mind Controller"
classifier = {
"memory_depth": -10,
"stochastic": False,
"makes_use_of": set(),
"inspects_source": False,
"manipulates_source": True, # Finds out what opponent will do
"manipulates_state": False,
}
@staticmethod
def strategy(opponent):
"""
Alters the opponents strategy method to be a lambda function which
always returns C. This player will then always return D to take
advantage of this
"""
opponent.strategy = lambda opponent: C
return D
class MindWarper(Player):
"""
A player that changes the opponent's strategy but blocks changes to
its own.
"""
name = "Mind Warper"
classifier = {
"memory_depth": -10,
"stochastic": False,
"makes_use_of": set(),
"inspects_source": False,
"manipulates_source": True, # changes what opponent will do
"manipulates_state": False,
}
def __setattr__(self, name, val):
if name == "strategy":
pass
else:
self.__dict__[name] = val
@staticmethod
def strategy(opponent):
opponent.strategy = lambda opponent: C
return D
class MindBender(MindWarper):
"""
A player that changes the opponent's strategy by modifying the internal
dictionary.
"""
name = "Mind Bender"
classifier = {
"memory_depth": -10,
"makes_use_of": set(),
"stochastic": False,
"inspects_source": False,
"manipulates_source": True, # changes what opponent will do
"manipulates_state": False,
}
@staticmethod
def strategy(opponent):
opponent.__dict__["strategy"] = lambda opponent: C
return D
<|endoftext|> |
<|endoftext|>"""Tests for the Ecosystem class"""
import unittest
import axelrod
class TestEcosystem(unittest.TestCase):
@classmethod
def setUpClass(cls):
cooperators = axelrod.Tournament(
players=[
axelrod.Cooperator(),
axelrod.Cooperator(),
axelrod.Cooperator(),
axelrod.Cooperator(),
]
)
defector_wins = axelrod.Tournament(
players=[
axelrod.Cooperator(),
axelrod.Cooperator(),
axelrod.Cooperator(),
axelrod.Defector(),
]
)
cls.res_cooperators = cooperators.play()
cls.res_defector_wins = defector_wins.play()
def test_init(self):
"""Are the populations created correctly?"""
# By default create populations of equal size
eco = axelrod.Ecosystem(self.res_cooperators)
pops = eco.population_sizes
self.assertEqual(eco.nplayers, 4)
self.assertEqual(len(pops), 1)
self.assertEqual(len(pops[0]), 4)
self.assertAlmostEqual(sum(pops[0]), 1.0)
self.assertEqual(list(set(pops[0])), [0.25])
# Can pass list of initial population distributions
eco = axelrod.Ecosystem(
self.res_cooperators, population=[0.7, 0.25, 0.03, 0.02]
)
pops = eco.population_sizes
self.assertEqual(eco.nplayers, 4)
self.assertEqual(len(pops), 1)
self.assertEqual(len(pops[0]), 4)
self.assertAlmostEqual(sum(pops[0]), 1.0)
self.assertEqual(pops[0], [0.7, 0.25, 0.03, 0.02])
# Distribution will automatically normalise
eco = axelrod.Ecosystem(self.res_cooperators, population=[70, 25, 3, 2])
pops = eco.population_sizes
self.assertEqual(eco.nplayers, 4)
self.assertEqual(len(pops), 1)
self.assertEqual(len(pops[0]), 4)
self.assertAlmostEqual(sum(pops[0]), 1.0)
self.assertEqual(pops[0], [0.7, 0.25, 0.03, 0.02])
# If passed list is of incorrect size get error
self.assertRaises(
TypeError,
axelrod.Ecosystem,
self.res_cooperators,
population=[0.7, 0.2, 0.03, 0.1, 0.1],
)
# If passed list has negative values
self.assertRaises(
TypeError,
axelrod.Ecosystem,
self.res_cooperators,
population=[0.7, -0.2, 0.03, 0.2],
)
def test_fitness(self):
fitness = lambda p: 2 * p
eco = axelrod.Ecosystem(self.res_cooperators, fitness=fitness)
self.assertTrue(eco.fitness(10), 20)
def test_cooperators(self):
"""Are cooperators stable over time?"""
eco = axelrod.Ecosystem(self.res_cooperators)
eco.reproduce(100)
pops = eco.population_sizes
self.assertEqual(len(pops), 101)
for p in pops:
self.assertEqual(len(p), 4)
self.assertEqual(sum(p), 1.0)
self.assertEqual(list(set(p)), [0.25])
def test_defector_wins(self):
"""Does one defector win over time?"""
eco = axelrod.Ecosystem(self.res_defector_wins)
eco.reproduce(1000)
pops = eco.population_sizes
self.assertEqual(len(pops), 1001)
for p in pops:
self.assertEqual(len(p), 4)
self.assertAlmostEqual(sum(p), 1.0)
last = pops[-1]
self.assertAlmostEqual(last[0], 0.0)
self.assertAlmostEqual(last[1], 0.0)
self.assertAlmostEqual(last[2], 0.0)
self.assertAlmostEqual(last[3], 1.0)
<|endoftext|> |
<|endoftext|>"""Test for the qlearner strategy."""
import random
import axelrod
from axelrod import simulate_play, Game
from .test_player import TestPlayer, test_responses
C, D = axelrod.Actions.C, axelrod.Actions.D
class TestRiskyQLearner(TestPlayer):
name = "Risky QLearner"
player = axelrod.RiskyQLearner
expected_classifier = {
"memory_depth": float("inf"),
"stochastic": True,
"makes_use_of": set(["game"]),
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def test_payoff_matrix(self):
(R, P, S, T) = Game().RPST()
payoff_matrix = {C: {C: R, D: S}, D: {C: T, D: P}}
p1 = self.player()
self.assertEqual(p1.payoff_matrix, payoff_matrix)
def test_qs_update(self):
"""Test that the q and v values update."""
random.seed(5)
p1 = axelrod.RiskyQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.Qs, {"": {C: 0, D: 0.9}, "0.0": {C: 0, D: 0}})
simulate_play(p1, p2)
self.assertEqual(
p1.Qs, {"": {C: 0, D: 0.9}, "0.0": {C: 2.7, D: 0}, "C1.0": {C: 0, D: 0}}
)
def test_vs_update(self):
"""Test that the q and v values update."""
random.seed(5)
p1 = axelrod.RiskyQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.Vs, {"": 0.9, "0.0": 0})
simulate_play(p1, p2)
self.assertEqual(p1.Vs, {"": 0.9, "0.0": 2.7, "C1.0": 0})
def test_prev_state_updates(self):
"""Test that the q and v values update."""
random.seed(5)
p1 = axelrod.RiskyQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.prev_state, "0.0")
simulate_play(p1, p2)
self.assertEqual(p1.prev_state, "C1.0")
def test_strategy(self):
"""Tests that it chooses the best strategy."""
random.seed(5)
p1 = axelrod.RiskyQLearner()
p1.state = "CCDC"
p1.Qs = {"": {C: 0, D: 0}, "CCDC": {C: 2, D: 6}}
p2 = axelrod.Cooperator()
test_responses(self, p1, p2, [], [], [C, D, C, C, D, C, C])
def test_reset_method(self):
"""
tests the reset method
"""
P1 = axelrod.RiskyQLearner()
P1.Qs = {"": {C: 0, D: -0.9}, "0.0": {C: 0, D: 0}}
P1.Vs = {"": 0, "0.0": 0}
P1.history = [C, D, D, D]
P1.prev_state = C
P1.reset()
self.assertEqual(P1.prev_state, "")
self.assertEqual(P1.history, [])
self.assertEqual(P1.Vs, {"": 0})
self.assertEqual(P1.Qs, {"": {C: 0, D: 0}})
class TestArrogantQLearner(TestPlayer):
name = "Arrogant QLearner"
player = axelrod.ArrogantQLearner
expected_classifier = {
"memory_depth": float("inf"), # Long memory
"stochastic": True,
"makes_use_of": set(["game"]),
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def test_qs_update(self):
"""
Test that the q and v values update
"""
random.seed(5)
p1 = axelrod.ArrogantQLearner()
p2 = axelrod.Cooperator()
play_1, play_2 = simulate_play(p1, p2)
self.assertEqual(p1.Qs, {"": {C: 0, D: 0.9}, "0.0": {C: 0, D: 0}})
simulate_play(p1, p2)
self.assertEqual(
p1.Qs, {"": {C: 0, D: 0.9}, "0.0": {C: 2.7, D: 0}, "C1.0": {C: 0, D: 0}}
)
def test_vs_update(self):
"""
Test that the q and v values update
"""
random.seed(5)
p1 = axelrod.ArrogantQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.Vs, {"": 0.9, "0.0": 0})
simulate_play(p1, p2)
self.assertEqual(p1.Vs, {"": 0.9, "0.0": 2.7, "C1.0": 0})
def test_prev_state_updates(self):
"""
Test that the q and v values update
"""
random.seed(5)
p1 = axelrod.ArrogantQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.prev_state, "0.0")
simulate_play(p1, p2)
self.assertEqual(p1.prev_state, "C1.0")
def test_strategy(self):
"""Tests that it chooses the best strategy."""
random.seed(9)
p1 = axelrod.ArrogantQLearner()
p1.state = "CCDC"
p1.Qs = {"": {C: 0, D: 0}, "CCDC": {C: 2, D: 6}}
p2 = axelrod.Cooperator()
test_responses(self, p1, p2, [], [], [C, C, C, C, C, C, C])
def test_reset_method(self):
"""Tests the reset method."""
P1 = axelrod.ArrogantQLearner()
P1.Qs = {"": {C: 0, D: -0.9}, "0.0": {C: 0, D: 0}}
P1.Vs = {"": 0, "0.0": 0}
P1.history = [C, D, D, D]
P1.prev_state = C
P1.reset()
self.assertEqual(P1.prev_state, "")
self.assertEqual(P1.history, [])
self.assertEqual(P1.Vs, {"": 0})
self.assertEqual(P1.Qs, {"": {C: 0, D: 0}})
class TestHesitantQLearner(TestPlayer):
name = "Hesitant QLearner"
player = axelrod.HesitantQLearner
expected_classifier = {
"memory_depth": float("inf"), # Long memory
"stochastic": True,
"makes_use_of": set(["game"]),
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def test_qs_update(self):
"""Test that the q and v values update."""
random.seed(5)
p1 = axelrod.HesitantQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.Qs, {"": {C: 0, D: 0.1}, "0.0": {C: 0, D: 0}})
simulate_play(p1, p2)
self.assertEqual(
p1.Qs,
{
"": {C: 0, D: 0.1},
"0.0": {C: 0.30000000000000004, D: 0},
"C1.0": {C: 0, D: 0},
},
)
def test_vs_update(self):
"""
Test that the q and v values update
"""
random.seed(5)
p1 = axelrod.HesitantQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.Vs, {"": 0.1, "0.0": 0})
simulate_play(p1, p2)
self.assertEqual(p1.Vs, {"": 0.1, "0.0": 0.30000000000000004, "C1.0": 0})
def test_prev_state_updates(self):
"""
Test that the q and v values update
"""
random.seed(5)
p1 = axelrod.HesitantQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.prev_state, "0.0")
simulate_play(p1, p2)
self.assertEqual(p1.prev_state, "C1.0")
def test_strategy(self):
"""Tests that it chooses the best strategy."""
random.seed(9)
p1 = axelrod.HesitantQLearner()
p1.state = "CCDC"
p1.Qs = {"": {C: 0, D: 0}, "CCDC": {C: 2, D: 6}}
p2 = axelrod.Cooperator()
test_responses(self, p1, p2, [], [], [C, C, C, C, C, C, C])
def test_reset_method(self):
"""
tests the reset method
"""
P1 = axelrod.HesitantQLearner()
P1.Qs = {"": {C: 0, D: -0.9}, "0.0": {C: 0, D: 0}}
P1.Vs = {"": 0, "0.0": 0}
P1.history = [C, D, D, D]
P1.prev_state = C
P1.reset()
self.assertEqual(P1.prev_state, "")
self.assertEqual(P1.history, [])
self.assertEqual(P1.Vs, {"": 0})
self.assertEqual(P1.Qs, {"": {C: 0, D: 0}})
class TestCautiousQLearner(TestPlayer):
name = "Cautious QLearner"
player = axelrod.CautiousQLearner
expected_classifier = {
"memory_depth": float("inf"), # Long memory
"stochastic": True,
"makes_use_of": set(["game"]),
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def test_qs_update(self):
"""Test that the q and v values update."""
random.seed(5)
p1 = axelrod.CautiousQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.Qs, {"": {C: 0, D: 0.1}, "0.0": {C: 0, D: 0}})
simulate_play(p1, p2)
self.assertEqual(
p1.Qs,
{
"": {C: 0, D: 0.1},
"0.0": {C: 0.30000000000000004, D: 0},
"C1.0": {C: 0, D: 0.0},
},
)
def test_vs_update(self):
"""Test that the q and v values update."""
random.seed(5)
p1 = axelrod.CautiousQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.Vs, {"": 0.1, "0.0": 0})
simulate_play(p1, p2)
self.assertEqual(p1.Vs, {"": 0.1, "0.0": 0.30000000000000004, "C1.0": 0})
def test_prev_state_updates(self):
"""Test that the q and v values update."""
random.seed(5)
p1 = axelrod.CautiousQLearner()
p2 = axelrod.Cooperator()
simulate_play(p1, p2)
self.assertEqual(p1.prev_state, "0.0")
simulate_play(p1, p2)
self.assertEqual(p1.prev_state, "C1.0")
def test_strategy(self):
"""Tests that it chooses the best strategy."""
random.seed(9)
p1 = axelrod.CautiousQLearner()
p1.state = "CCDC"
p1.Qs = {"": {C: 0, D: 0}, "CCDC": {C: 2, D: 6}}
p2 = axelrod.Cooperator()
test_responses(self, p1, p2, [], [], [C, C, C, C, C, C, C])
def test_reset_method(self):
"""Tests the reset method."""
P1 = axelrod.CautiousQLearner()
P1.Qs = {"": {C: 0, D: -0.9}, "0.0": {C: 0, D: 0}}
P1.Vs = {"": 0, "0.0": 0}
P1.history = [C, D, D, D]
P1.prev_state = C
P1.reset()
self.assertEqual(P1.prev_state, "")
self.assertEqual(P1.history, [])
self.assertEqual(P1.Vs, {"": 0})
self.assertEqual(P1.Qs, {"": {C: 0, D: 0}})
<|endoftext|> |
<|endoftext|># -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License:
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# --------------------------------------------------------------------------
import json
import requests
from azureml.errors import AzureMLConflictHttpError
try:
from urllib.parse import urljoin
except ImportError:
from urllib.parse import urljoin
from azureml.errors import (
AzureMLHttpError,
)
__author__ = "Microsoft Corp. <[email protected]>"
__version__ = "0.2.7"
class _RestClient(object):
SERVICE_ROOT = "api/"
INTERMEDIATE_DATASET_URI_FMT = (
SERVICE_ROOT + "workspaces/{0}/experiments/{1}/outputdata/{2}/{3}"
)
EXPERIMENTS_URI_FMT = SERVICE_ROOT + "workspaces/{0}/experiments"
DATASOURCES_URI_FMT = SERVICE_ROOT + "workspaces/{0}/datasources"
DATASOURCE_URI_FMT = SERVICE_ROOT + "workspaces/{0}/datasources/{1}"
UPLOAD_URI_FMI = (
SERVICE_ROOT + "resourceuploads/workspaces/{0}/?userStorage=true&dataTypeId={1}"
)
UPLOAD_CHUNK_URI_FMT = (
SERVICE_ROOT
+ "blobuploads/workspaces/{0}/?numberOfBlocks={1}&blockId={2}&uploadId={3}&dataTypeId={4}"
)
SESSION_ID_HEADER_NAME = "x-ms-client-session-id"
SESSION_ID_HEADER_VALUE = "DefaultSession"
ACCESS_TOKEN_HEADER_NAME = "x-ms-metaanalytics-authorizationtoken"
CONTENT_TYPE_HEADER_NAME = "Content-Type"
CONTENT_TYPE_HEADER_VALUE_JSON = "application/json;charset=UTF8"
CHUNK_SIZE = 0x200000
DEFAULT_OWNER = "Python SDK"
USER_AGENT_HEADER_NAME = "User-Agent"
USER_AGENT_HEADER_VALUE = "pyazureml/" + __version__
def __init__(self, service_endpoint, access_token):
self._service_endpoint = service_endpoint
self._access_token = access_token
def get_experiments(self, workspace_id):
"""Runs HTTP GET request to retrieve the list of experiments."""
api_path = self.EXPERIMENTS_URI_FMT.format(workspace_id)
return self._send_get_req(api_path)
def get_datasets(self, workspace_id):
"""Runs HTTP GET request to retrieve the list of datasets."""
api_path = self.DATASOURCES_URI_FMT.format(workspace_id)
return self._send_get_req(api_path)
def get_dataset(self, workspace_id, dataset_id):
"""Runs HTTP GET request to retrieve a single dataset."""
api_path = self.DATASOURCE_URI_FMT.format(workspace_id, dataset_id)
return self._send_get_req(api_path)
def open_intermediate_dataset_contents(
self, workspace_id, experiment_id, node_id, port_name
):
return self._get_intermediate_dataset_contents(
workspace_id, experiment_id, node_id, port_name, stream=True
).raw
def read_intermediate_dataset_contents_binary(
self, workspace_id, experiment_id, node_id, port_name
):
return self._get_intermediate_dataset_contents(
workspace_id, experiment_id, node_id, port_name, stream=False
).content
def read_intermediate_dataset_contents_text(
self, workspace_id, experiment_id, node_id, port_name
):
return self._get_intermediate_dataset_contents(
workspace_id, experiment_id, node_id, port_name, stream=False
).text
def _get_intermediate_dataset_contents(
self, workspace_id, experiment_id, node_id, port_name, stream
):
api_path = self.INTERMEDIATE_DATASET_URI_FMT.format(
workspace_id, experiment_id, node_id, port_name
)
response = requests.get(
url=urljoin(self._service_endpoint, api_path),
headers=self._get_headers(),
stream=stream,
)
return response
def open_dataset_contents(self, url):
response = requests.get(url, stream=True)
return response.raw
def read_dataset_contents_binary(self, url):
response = requests.get(url)
return response.content
def read_dataset_contents_text(self, url):
response = requests.get(url)
return response.text
def upload_dataset(
self, workspace_id, name, description, data_type_id, raw_data, family_id
):
# uploading data is a two step process. First we upload the raw data
api_path = self.UPLOAD_URI_FMI.format(workspace_id, data_type_id)
upload_result = self._send_post_req(api_path, data=b"")
# now get the id that was generated
upload_id = upload_result["Id"]
# Upload the data in chunks...
total_chunks = int((len(raw_data) + (self.CHUNK_SIZE - 1)) / self.CHUNK_SIZE)
for chunk in range(total_chunks):
chunk_url = self.UPLOAD_CHUNK_URI_FMT.format(
workspace_id,
total_chunks, # number of blocks
chunk, # block id
upload_id,
data_type_id,
)
chunk_data = raw_data[
chunk * self.CHUNK_SIZE : (chunk + 1) * self.CHUNK_SIZE
]
self._send_post_req(chunk_url, data=chunk_data)
# use that to construct the DataSource metadata
metadata = {
"DataSource": {
"Name": name,
"DataTypeId": data_type_id,
"Description": description,
"FamilyId": family_id,
"Owner": self.DEFAULT_OWNER,
"SourceOrigin": "FromResourceUpload",
},
"UploadId": upload_id,
"UploadedFromFileName": "",
"ClientPoll": True,
}
try:
api_path = self.DATASOURCES_URI_FMT.format(workspace_id)
except AzureMLConflictHttpError as e:
raise AzureMLConflictHttpError(
'A data set named "{}" already exists'.format(name), e.status_code
)
datasource_id = self._send_post_req(
api_path, json.dumps(metadata), self.CONTENT_TYPE_HEADER_VALUE_JSON
)
return datasource_id
def _send_get_req(self, api_path):
response = requests.get(
url=urljoin(self._service_endpoint, api_path), headers=self._get_headers()
)
if response.status_code >= 400:
raise AzureMLHttpError(response.text, response.status_code)
return response.json()
def _send_post_req(self, api_path, data, content_type=None):
response = requests.post(
url=urljoin(self._service_endpoint, api_path),
data=data,
headers=self._get_headers(content_type),
)
if response.status_code >= 400:
raise AzureMLHttpError(response.text, response.status_code)
return response.json()
def _get_headers(self, content_type=None):
headers = {
self.USER_AGENT_HEADER_NAME: self.USER_AGENT_HEADER_VALUE,
self.CONTENT_TYPE_HEADER_NAME: self.CONTENT_TYPE_HEADER_VALUE_JSON,
self.SESSION_ID_HEADER_NAME: self.SESSION_ID_HEADER_VALUE,
self.ACCESS_TOKEN_HEADER_NAME: self._access_token,
}
if content_type:
headers[self.CONTENT_TYPE_HEADER_NAME] = content_type
return headers
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class OSType(Enum):
linux = "linux"
windows = "windows"
unmapped = "unmapped"
class CertificateState(Enum):
active = "active"
deleting = "deleting"
deletefailed = "deletefailed"
class CertificateFormat(Enum):
pfx = "pfx"
cer = "cer"
unmapped = "unmapped"
class ComputeNodeFillType(Enum):
spread = "spread"
pack = "pack"
unmapped = "unmapped"
class CertificateStoreLocation(Enum):
currentuser = "currentuser"
localmachine = "localmachine"
unmapped = "unmapped"
class CertificateVisibility(Enum):
starttask = "starttask"
task = "task"
remoteuser = "remoteuser"
unmapped = "unmapped"
class PoolLifetimeOption(Enum):
jobschedule = "jobschedule"
job = "job"
unmapped = "unmapped"
class JobScheduleState(Enum):
active = "active"
completed = "completed"
disabled = "disabled"
terminating = "terminating"
deleting = "deleting"
class SchedulingErrorCategory(Enum):
usererror = "usererror"
servererror = "servererror"
unmapped = "unmapped"
class JobState(Enum):
active = "active"
disabling = "disabling"
disabled = "disabled"
enabling = "enabling"
terminating = "terminating"
completed = "completed"
deleting = "deleting"
class JobPreparationTaskState(Enum):
running = "running"
completed = "completed"
class JobReleaseTaskState(Enum):
running = "running"
completed = "completed"
class PoolState(Enum):
active = "active"
deleting = "deleting"
upgrading = "upgrading"
class AllocationState(Enum):
steady = "steady"
resizing = "resizing"
stopping = "stopping"
class TaskState(Enum):
active = "active"
preparing = "preparing"
running = "running"
completed = "completed"
class TaskAddStatus(Enum):
success = "success"
clienterror = "clienterror"
servererror = "servererror"
unmapped = "unmapped"
class StartTaskState(Enum):
running = "running"
completed = "completed"
class ComputeNodeState(Enum):
idle = "idle"
rebooting = "rebooting"
reimaging = "reimaging"
running = "running"
unusable = "unusable"
creating = "creating"
starting = "starting"
waitingforstarttask = "waitingforstarttask"
starttaskfailed = "starttaskfailed"
unknown = "unknown"
leavingpool = "leavingpool"
offline = "offline"
class SchedulingState(Enum):
enabled = "enabled"
disabled = "disabled"
class DisableJobOption(Enum):
requeue = "requeue"
terminate = "terminate"
wait = "wait"
class ComputeNodeDeallocationOption(Enum):
requeue = "requeue"
terminate = "terminate"
taskcompletion = "taskcompletion"
retaineddata = "retaineddata"
class ComputeNodeRebootOption(Enum):
requeue = "requeue"
terminate = "terminate"
taskcompletion = "taskcompletion"
retaineddata = "retaineddata"
class ComputeNodeReimageOption(Enum):
requeue = "requeue"
terminate = "terminate"
taskcompletion = "taskcompletion"
retaineddata = "retaineddata"
class DisableComputeNodeSchedulingOption(Enum):
requeue = "requeue"
terminate = "terminate"
taskcompletion = "taskcompletion"
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ComputeNodeGetRemoteLoginSettingsOptions(Model):
"""
Additional parameters for the GetRemoteLoginSettings operation.
:param timeout: Sets the maximum time that the server can spend
processing the request, in seconds. The default is 30 seconds. Default
value: 30 .
:type timeout: int
:param client_request_id: Caller generated request identity, in the form
of a GUID with no decoration such as curly braces e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Specifies if the server should return
the client-request-id identifier in the response.
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. If not specified, this
header will be automatically populated with the current system clock
time.
:type ocp_date: datetime
"""
def __init__(
self,
timeout=30,
client_request_id=None,
return_client_request_id=None,
ocp_date=None,
):
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobEnableOptions(Model):
"""
Additional parameters for the Enable operation.
:param timeout: Sets the maximum time that the server can spend
processing the request, in seconds. The default is 30 seconds. Default
value: 30 .
:type timeout: int
:param client_request_id: Caller generated request identity, in the form
of a GUID with no decoration such as curly braces e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Specifies if the server should return
the client-request-id identifier in the response.
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. If not specified, this
header will be automatically populated with the current system clock
time.
:type ocp_date: datetime
:param if_match: An ETag is specified. Specify this header to perform the
operation only if the resource's ETag is an exact match as specified.
:type if_match: str
:param if_none_match: An ETag is specified. Specify this header to
perform the operation only if the resource's ETag does not match the
specified ETag.
:type if_none_match: str
:param if_modified_since: Specify this header to perform the operation
only if the resource has been modified since the specified date/time.
:type if_modified_since: datetime
:param if_unmodified_since: Specify this header to perform the operation
only if the resource has not been modified since the specified date/time.
:type if_unmodified_since: datetime
"""
def __init__(
self,
timeout=30,
client_request_id=None,
return_client_request_id=None,
ocp_date=None,
if_match=None,
if_none_match=None,
if_modified_since=None,
if_unmodified_since=None,
):
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobScheduleTerminateOptions(Model):
"""
Additional parameters for the Terminate operation.
:param timeout: Sets the maximum time that the server can spend
processing the request, in seconds. The default is 30 seconds. Default
value: 30 .
:type timeout: int
:param client_request_id: Caller generated request identity, in the form
of a GUID with no decoration such as curly braces e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Specifies if the server should return
the client-request-id identifier in the response.
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. If not specified, this
header will be automatically populated with the current system clock
time.
:type ocp_date: datetime
:param if_match: An ETag is specified. Specify this header to perform the
operation only if the resource's ETag is an exact match as specified.
:type if_match: str
:param if_none_match: An ETag is specified. Specify this header to
perform the operation only if the resource's ETag does not match the
specified ETag.
:type if_none_match: str
:param if_modified_since: Specify this header to perform the operation
only if the resource has been modified since the specified date/time.
:type if_modified_since: datetime
:param if_unmodified_since: Specify this header to perform the operation
only if the resource has not been modified since the specified date/time.
:type if_unmodified_since: datetime
"""
def __init__(
self,
timeout=30,
client_request_id=None,
return_client_request_id=None,
ocp_date=None,
if_match=None,
if_none_match=None,
if_modified_since=None,
if_unmodified_since=None,
):
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PoolEvaluateAutoScaleOptions(Model):
"""
Additional parameters for the EvaluateAutoScale operation.
:param timeout: Sets the maximum time that the server can spend
processing the request, in seconds. The default is 30 seconds. Default
value: 30 .
:type timeout: int
:param client_request_id: Caller generated request identity, in the form
of a GUID with no decoration such as curly braces e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Specifies if the server should return
the client-request-id identifier in the response.
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. If not specified, this
header will be automatically populated with the current system clock
time.
:type ocp_date: datetime
"""
def __init__(
self,
timeout=30,
client_request_id=None,
return_client_request_id=None,
ocp_date=None,
):
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StartTaskInformation(Model):
"""
Information about a start task running on a compute node.
:param state: Gets or sets the state of the start task on the compute
node. Possible values include: 'running', 'completed'
:type state: str
:param start_time: Gets or sets the time at which the start task started
running.
:type start_time: datetime
:param end_time: Gets or sets the time at which the start task stopped
running.
:type end_time: datetime
:param exit_code: Gets or sets the exit code of the start task.
:type exit_code: int
:param scheduling_error: Gets or sets any error encountered scheduling
the start task.
:type scheduling_error: :class:`TaskSchedulingError
<azure.batch.models.TaskSchedulingError>`
:param retry_count: Gets or sets the number of times the task has been
retried by the Batch service.
:type retry_count: int
:param last_retry_time: Gets or sets the most recent time at which a
retry of the task started running.
:type last_retry_time: datetime
"""
_validation = {
"state": {"required": True},
"start_time": {"required": True},
"retry_count": {"required": True},
}
_attribute_map = {
"state": {"key": "state", "type": "StartTaskState"},
"start_time": {"key": "startTime", "type": "iso-8601"},
"end_time": {"key": "endTime", "type": "iso-8601"},
"exit_code": {"key": "exitCode", "type": "int"},
"scheduling_error": {"key": "schedulingError", "type": "TaskSchedulingError"},
"retry_count": {"key": "retryCount", "type": "int"},
"last_retry_time": {"key": "lastRetryTime", "type": "iso-8601"},
}
def __init__(
self,
state,
start_time,
retry_count,
end_time=None,
exit_code=None,
scheduling_error=None,
last_retry_time=None,
):
self.state = state
self.start_time = start_time
self.end_time = end_time
self.exit_code = exit_code
self.scheduling_error = scheduling_error
self.retry_count = retry_count
self.last_retry_time = last_retry_time
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
import uuid
from .. import models
class ApplicationOperations(object):
"""ApplicationOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def list(
self,
application_list_options=None,
custom_headers={},
raw=False,
**operation_config
):
"""
Lists all of the applications available in the specified account.
:param application_list_options: Additional parameters for the
operation
:type application_list_options: :class:`ApplicationListOptions
<azure.batch.models.ApplicationListOptions>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ApplicationSummaryPaged
<azure.batch.models.ApplicationSummaryPaged>`
"""
max_results = None
if application_list_options is not None:
max_results = application_list_options.max_results
timeout = None
if application_list_options is not None:
timeout = application_list_options.timeout
client_request_id = None
if application_list_options is not None:
client_request_id = application_list_options.client_request_id
return_client_request_id = None
if application_list_options is not None:
return_client_request_id = application_list_options.return_client_request_id
ocp_date = None
if application_list_options is not None:
ocp_date = application_list_options.ocp_date
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = "/applications"
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
if max_results is not None:
query_parameters["maxresults"] = self._serialize.query(
"max_results", max_results, "int"
)
if timeout is not None:
query_parameters["timeout"] = self._serialize.query(
"timeout", timeout, "int"
)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters[
"Content-Type"
] = "application/json; odata=minimalmetadata; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
if client_request_id is not None:
header_parameters["client-request-id"] = self._serialize.header(
"client_request_id", client_request_id, "str"
)
if return_client_request_id is not None:
header_parameters["return-client-request-id"] = self._serialize.header(
"return_client_request_id", return_client_request_id, "bool"
)
if ocp_date is not None:
header_parameters["ocp-date"] = self._serialize.header(
"ocp_date", ocp_date, "rfc-1123"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.ApplicationSummaryPaged(
internal_paging, self._deserialize.dependencies
)
if raw:
header_dict = {}
client_raw_response = models.ApplicationSummaryPaged(
internal_paging, self._deserialize.dependencies, header_dict
)
return client_raw_response
return deserialized
def get(
self,
application_id,
application_get_options=None,
custom_headers={},
raw=False,
**operation_config
):
"""
Gets information about the specified application.
:param application_id: The id of the application.
:type application_id: str
:param application_get_options: Additional parameters for the
operation
:type application_get_options: :class:`ApplicationGetOptions
<azure.batch.models.ApplicationGetOptions>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ApplicationSummary
<azure.batch.models.ApplicationSummary>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
timeout = None
if application_get_options is not None:
timeout = application_get_options.timeout
client_request_id = None
if application_get_options is not None:
client_request_id = application_get_options.client_request_id
return_client_request_id = None
if application_get_options is not None:
return_client_request_id = application_get_options.return_client_request_id
ocp_date = None
if application_get_options is not None:
ocp_date = application_get_options.ocp_date
# Construct URL
url = "/applications/{applicationId}"
path_format_arguments = {
"applicationId": self._serialize.url(
"application_id", application_id, "str"
)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
if timeout is not None:
query_parameters["timeout"] = self._serialize.query(
"timeout", timeout, "int"
)
# Construct headers
header_parameters = {}
header_parameters[
"Content-Type"
] = "application/json; odata=minimalmetadata; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
if client_request_id is not None:
header_parameters["client-request-id"] = self._serialize.header(
"client_request_id", client_request_id, "str"
)
if return_client_request_id is not None:
header_parameters["return-client-request-id"] = self._serialize.header(
"return_client_request_id", return_client_request_id, "bool"
)
if ocp_date is not None:
header_parameters["ocp-date"] = self._serialize.header(
"ocp_date", ocp_date, "rfc-1123"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.BatchErrorException(self._deserialize, response)
deserialized = None
header_dict = {}
if response.status_code == 200:
deserialized = self._deserialize("ApplicationSummary", response)
header_dict = {
"client-request-id": "str",
"request-id": "str",
"ETag": "str",
"Last-Modified": "rfc-1123",
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationCreateParameters(Model):
"""
Request parameters for create a new application
:param available_to_other_tenants: Indicates if the application will be
available to other tenants
:type available_to_other_tenants: bool
:param display_name: Application display name
:type display_name: str
:param homepage: Application homepage
:type homepage: str
:param identifier_uris: Application Uris
:type identifier_uris: list of str
:param reply_urls: Application reply Urls
:type reply_urls: list of str
:param key_credentials: Gets or sets the list of KeyCredential objects
:type key_credentials: list of :class:`KeyCredential
<azure.graphrbac.models.KeyCredential>`
:param password_credentials: Gets or sets the list of PasswordCredential
objects
:type password_credentials: list of :class:`PasswordCredential
<azure.graphrbac.models.PasswordCredential>`
"""
_validation = {
"available_to_other_tenants": {"required": True},
"display_name": {"required": True},
"homepage": {"required": True},
"identifier_uris": {"required": True},
}
_attribute_map = {
"available_to_other_tenants": {
"key": "availableToOtherTenants",
"type": "bool",
},
"display_name": {"key": "displayName", "type": "str"},
"homepage": {"key": "homepage", "type": "str"},
"identifier_uris": {"key": "identifierUris", "type": "[str]"},
"reply_urls": {"key": "replyUrls", "type": "[str]"},
"key_credentials": {"key": "keyCredentials", "type": "[KeyCredential]"},
"password_credentials": {
"key": "passwordCredentials",
"type": "[PasswordCredential]",
},
}
def __init__(
self,
available_to_other_tenants,
display_name,
homepage,
identifier_uris,
reply_urls=None,
key_credentials=None,
password_credentials=None,
):
self.available_to_other_tenants = available_to_other_tenants
self.display_name = display_name
self.homepage = homepage
self.identifier_uris = identifier_uris
self.reply_urls = reply_urls
self.key_credentials = key_credentials
self.password_credentials = password_credentials
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .authorization_management_client import (
AuthorizationManagementClient,
AuthorizationManagementClientConfiguration,
)
from .version import VERSION
__all__ = [
"AuthorizationManagementClient",
"AuthorizationManagementClientConfiguration",
]
__version__ = VERSION
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
VERSION = "2015-07-01"
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class ApplicationOperations(object):
"""ApplicationOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def activate_application_package(
self,
resource_group_name,
account_name,
id,
version,
format,
custom_headers={},
raw=False,
**operation_config
):
"""
Activates the specified application package.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param id: The id of the application.
:type id: str
:param version: The version of the application to activate.
:type version: str
:param format: The format of the application package binary file.
:type format: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
parameters = models.ActivateApplicationPackageParameters(format=format)
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{id}/versions/{version}/activate"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str", pattern="^[-\w\._]+$"
),
"accountName": self._serialize.url(
"account_name",
account_name,
"str",
max_length=24,
min_length=3,
pattern="^[-\w\._]+$",
),
"id": self._serialize.url("id", id, "str"),
"version": self._serialize.url("version", version, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct body
body_content = self._serialize.body(
parameters, "ActivateApplicationPackageParameters"
)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config
)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def add_application(
self,
resource_group_name,
account_name,
application_id,
allow_updates=None,
display_name=None,
custom_headers={},
raw=False,
**operation_config
):
"""
Adds an application to the specified Batch account.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param application_id: The id of the application.
:type application_id: str
:param allow_updates: A value indicating whether packages within the
application may be overwritten using the same version string.
:type allow_updates: bool
:param display_name: The display name for the application.
:type display_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
parameters = None
if allow_updates is not None or display_name is not None:
parameters = models.AddApplicationParameters(
allow_updates=allow_updates, display_name=display_name
)
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationId}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str", pattern="^[-\w\._]+$"
),
"accountName": self._serialize.url(
"account_name",
account_name,
"str",
max_length=24,
min_length=3,
pattern="^[-\w\._]+$",
),
"applicationId": self._serialize.url(
"application_id", application_id, "str"
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct body
if parameters is not None:
body_content = self._serialize.body(parameters, "AddApplicationParameters")
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config
)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete_application(
self,
resource_group_name,
account_name,
application_id,
custom_headers={},
raw=False,
**operation_config
):
"""
Deletes an application.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param application_id: The id of the application.
:type application_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationId}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str", pattern="^[-\w\._]+$"
),
"accountName": self._serialize.url(
"account_name",
account_name,
"str",
max_length=24,
min_length=3,
pattern="^[-\w\._]+$",
),
"applicationId": self._serialize.url(
"application_id", application_id, "str"
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_application(
self,
resource_group_name,
account_name,
application_id,
custom_headers={},
raw=False,
**operation_config
):
"""
Gets information about the specified application.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param application_id: The id of the application.
:type application_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Application <azure.mgmt.batch.models.Application>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationId}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str", pattern="^[-\w\._]+$"
),
"accountName": self._serialize.url(
"account_name",
account_name,
"str",
max_length=24,
min_length=3,
pattern="^[-\w\._]+$",
),
"applicationId": self._serialize.url(
"application_id", application_id, "str"
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("Application", response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_application(
self,
resource_group_name,
account_name,
application_id,
parameters,
custom_headers={},
raw=False,
**operation_config
):
"""
Updates settings for the specified application.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param application_id: The id of the application.
:type application_id: str
:param parameters: The parameters for the request.
:type parameters: :class:`UpdateApplicationParameters
<azure.mgmt.batch.models.UpdateApplicationParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationId}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str", pattern="^[-\w\._]+$"
),
"accountName": self._serialize.url(
"account_name",
account_name,
"str",
max_length=24,
min_length=3,
pattern="^[-\w\._]+$",
),
"applicationId": self._serialize.url(
"application_id", application_id, "str"
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct body
body_content = self._serialize.body(parameters, "UpdateApplicationParameters")
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config
)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def add_application_package(
self,
resource_group_name,
account_name,
application_id,
version,
custom_headers={},
raw=False,
**operation_config
):
"""
Creates an application package record.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param application_id: The id of the application.
:type application_id: str
:param version: The version of the application.
:type version: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`AddApplicationPackageResult
<azure.mgmt.batch.models.AddApplicationPackageResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationId}/versions/{version}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str", pattern="^[-\w\._]+$"
),
"accountName": self._serialize.url(
"account_name",
account_name,
"str",
max_length=24,
min_length=3,
pattern="^[-\w\._]+$",
),
"applicationId": self._serialize.url(
"application_id", application_id, "str"
),
"version": self._serialize.url("version", version, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize("AddApplicationPackageResult", response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_application_package(
self,
resource_group_name,
account_name,
application_id,
version,
custom_headers={},
raw=False,
**operation_config
):
"""
Deletes an application package record and the binary file.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param application_id: The id of the application.
:type application_id: str
:param version: The version of the application to delete.
:type version: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationId}/versions/{version}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str", pattern="^[-\w\._]+$"
),
"accountName": self._serialize.url(
"account_name",
account_name,
"str",
max_length=24,
min_length=3,
pattern="^[-\w\._]+$",
),
"applicationId": self._serialize.url(
"application_id", application_id, "str"
),
"version": self._serialize.url("version", version, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_application_package(
self,
resource_group_name,
account_name,
application_id,
version,
custom_headers={},
raw=False,
**operation_config
):
"""
Gets information about the specified application package.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param application_id: The id of the application.
:type application_id: str
:param version: The version of the application.
:type version: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`GetApplicationPackageResult
<azure.mgmt.batch.models.GetApplicationPackageResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationId}/versions/{version}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str", pattern="^[-\w\._]+$"
),
"accountName": self._serialize.url(
"account_name",
account_name,
"str",
max_length=24,
min_length=3,
pattern="^[-\w\._]+$",
),
"applicationId": self._serialize.url(
"application_id", application_id, "str"
),
"version": self._serialize.url("version", version, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("GetApplicationPackageResult", response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self,
resource_group_name,
account_name,
maxresults=None,
custom_headers={},
raw=False,
**operation_config
):
"""
Lists all of the applications in the specified account.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param maxresults: The maximum number of items to return in the
response.
:type maxresults: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ApplicationPaged
<azure.mgmt.batch.models.ApplicationPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name",
resource_group_name,
"str",
pattern="^[-\w\._]+$",
),
"accountName": self._serialize.url(
"account_name",
account_name,
"str",
max_length=24,
min_length=3,
pattern="^[-\w\._]+$",
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id",
self.config.subscription_id,
"str",
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if maxresults is not None:
query_parameters["maxresults"] = self._serialize.query(
"maxresults", maxresults, "int"
)
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
return response
# Deserialize response
deserialized = models.ApplicationPaged(
internal_paging, self._deserialize.dependencies
)
if raw:
header_dict = {}
client_raw_response = models.ApplicationPaged(
internal_paging, self._deserialize.dependencies, header_dict
)
return client_raw_response
return deserialized
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class ProfilePaged(Paged):
"""
A paging container for iterating over a list of Profile object
"""
_attribute_map = {
"next_link": {"key": "nextLink", "type": "str"},
"current_page": {"key": "value", "type": "[Profile]"},
}
def __init__(self, *args, **kwargs):
super(ProfilePaged, self).__init__(*args, **kwargs)
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class BootDiagnosticsInstanceView(Model):
"""
The instance view of a virtual machine boot diagnostics.
:param console_screenshot_blob_uri: Gets or sets the console screenshot
blob Uri.
:type console_screenshot_blob_uri: str
:param serial_console_log_blob_uri: Gets or sets the Linux serial console
log blob Uri.
:type serial_console_log_blob_uri: str
"""
_attribute_map = {
"console_screenshot_blob_uri": {
"key": "consoleScreenshotBlobUri",
"type": "str",
},
"serial_console_log_blob_uri": {
"key": "serialConsoleLogBlobUri",
"type": "str",
},
}
def __init__(
self, console_screenshot_blob_uri=None, serial_console_log_blob_uri=None
):
self.console_screenshot_blob_uri = console_screenshot_blob_uri
self.serial_console_log_blob_uri = serial_console_log_blob_uri
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class UpgradePolicy(Model):
"""
Describes an upgrade policy - automatic or manual.
:param mode: Gets or sets the upgrade mode. Possible values include:
'Automatic', 'Manual'
:type mode: str
"""
_attribute_map = {
"mode": {"key": "mode", "type": "UpgradeMode"},
}
def __init__(self, mode=None):
self.mode = mode
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetOSProfile(Model):
"""
Describes a virtual machine scale set OS profile.
:param computer_name_prefix: Gets or sets the computer name prefix.
:type computer_name_prefix: str
:param admin_username: Gets or sets the admin user name.
:type admin_username: str
:param admin_password: Gets or sets the admin user password.
:type admin_password: str
:param custom_data: Gets or sets a base-64 encoded string of custom data.
:type custom_data: str
:param windows_configuration: Gets or sets the Windows Configuration of
the OS profile.
:type windows_configuration: :class:`WindowsConfiguration
<azure.mgmt.compute.models.WindowsConfiguration>`
:param linux_configuration: Gets or sets the Linux Configuration of the
OS profile.
:type linux_configuration: :class:`LinuxConfiguration
<azure.mgmt.compute.models.LinuxConfiguration>`
:param secrets: Gets or sets the List of certificates for addition to the
VM.
:type secrets: list of :class:`VaultSecretGroup
<azure.mgmt.compute.models.VaultSecretGroup>`
"""
_attribute_map = {
"computer_name_prefix": {"key": "computerNamePrefix", "type": "str"},
"admin_username": {"key": "adminUsername", "type": "str"},
"admin_password": {"key": "adminPassword", "type": "str"},
"custom_data": {"key": "customData", "type": "str"},
"windows_configuration": {
"key": "windowsConfiguration",
"type": "WindowsConfiguration",
},
"linux_configuration": {
"key": "linuxConfiguration",
"type": "LinuxConfiguration",
},
"secrets": {"key": "secrets", "type": "[VaultSecretGroup]"},
}
def __init__(
self,
computer_name_prefix=None,
admin_username=None,
admin_password=None,
custom_data=None,
windows_configuration=None,
linux_configuration=None,
secrets=None,
):
self.computer_name_prefix = computer_name_prefix
self.admin_username = admin_username
self.admin_password = admin_password
self.custom_data = custom_data
self.windows_configuration = windows_configuration
self.linux_configuration = linux_configuration
self.secrets = secrets
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class VirtualMachinesOperations(object):
"""VirtualMachinesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def capture(
self,
resource_group_name,
vm_name,
parameters,
custom_headers={},
raw=False,
**operation_config
):
"""
Captures the VM by copying virtual hard disks of the VM and outputs a
template that can be used to create similar VMs.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Capture Virtual Machine
operation.
:type parameters: :class:`VirtualMachineCaptureParameters
<azure.mgmt.compute.models.VirtualMachineCaptureParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`VirtualMachineCaptureResult
<azure.mgmt.compute.models.VirtualMachineCaptureResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"vmName": self._serialize.url("vm_name", vm_name, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct body
body_content = self._serialize.body(
parameters, "VirtualMachineCaptureParameters"
)
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config
)
def get_long_running_status(status_link, headers={}):
request = self._client.get(status_link)
request.headers.update(headers)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize(
"VirtualMachineCaptureResult", response
)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
"long_running_operation_timeout", self.config.long_running_operation_timeout
)
return AzureOperationPoller(
long_running_send,
get_long_running_output,
get_long_running_status,
long_running_operation_timeout,
)
def create_or_update(
self,
resource_group_name,
vm_name,
parameters,
custom_headers={},
raw=False,
**operation_config
):
"""
The operation to create or update a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Create Virtual Machine
operation.
:type parameters: :class:`VirtualMachine
<azure.mgmt.compute.models.VirtualMachine>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`VirtualMachine
<azure.mgmt.compute.models.VirtualMachine>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"vmName": self._serialize.url("vm_name", vm_name, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct body
body_content = self._serialize.body(parameters, "VirtualMachine")
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config
)
def get_long_running_status(status_link, headers={}):
request = self._client.get(status_link)
request.headers.update(headers)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("VirtualMachine", response)
if response.status_code == 201:
deserialized = self._deserialize("VirtualMachine", response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
"long_running_operation_timeout", self.config.long_running_operation_timeout
)
return AzureOperationPoller(
long_running_send,
get_long_running_output,
get_long_running_status,
long_running_operation_timeout,
)
def delete(
self,
resource_group_name,
vm_name,
custom_headers={},
raw=False,
**operation_config
):
"""
The operation to delete a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"vmName": self._serialize.url("vm_name", vm_name, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers={}):
request = self._client.get(status_link)
request.headers.update(headers)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
"long_running_operation_timeout", self.config.long_running_operation_timeout
)
return AzureOperationPoller(
long_running_send,
get_long_running_output,
get_long_running_status,
long_running_operation_timeout,
)
def get(
self,
resource_group_name,
vm_name,
expand=None,
custom_headers={},
raw=False,
**operation_config
):
"""
The operation to get a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param expand: The expand expression to apply on the operation.
Possible values include: 'instanceView'
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualMachine
<azure.mgmt.compute.models.VirtualMachine>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"vmName": self._serialize.url("vm_name", vm_name, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if expand is not None:
query_parameters["$expand"] = self._serialize.query(
"expand", expand, "InstanceViewTypes"
)
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("VirtualMachine", response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def deallocate(
self,
resource_group_name,
vm_name,
custom_headers={},
raw=False,
**operation_config
):
"""
Shuts down the Virtual Machine and releases the compute resources. You
are not billed for the compute resources that this Virtual Machine
uses.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"vmName": self._serialize.url("vm_name", vm_name, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers={}):
request = self._client.get(status_link)
request.headers.update(headers)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
"long_running_operation_timeout", self.config.long_running_operation_timeout
)
return AzureOperationPoller(
long_running_send,
get_long_running_output,
get_long_running_status,
long_running_operation_timeout,
)
def generalize(
self,
resource_group_name,
vm_name,
custom_headers={},
raw=False,
**operation_config
):
"""
Sets the state of the VM as Generalized.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"vmName": self._serialize.url("vm_name", vm_name, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def list(
self, resource_group_name, custom_headers={}, raw=False, **operation_config
):
"""
The operation to list virtual machines under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualMachinePaged
<azure.mgmt.compute.models.VirtualMachinePaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id",
self.config.subscription_id,
"str",
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
return response
# Deserialize response
deserialized = models.VirtualMachinePaged(
internal_paging, self._deserialize.dependencies
)
if raw:
header_dict = {}
client_raw_response = models.VirtualMachinePaged(
internal_paging, self._deserialize.dependencies, header_dict
)
return client_raw_response
return deserialized
def list_all(self, custom_headers={}, raw=False, **operation_config):
"""
Gets the list of Virtual Machines in the subscription. Use nextLink
property in the response to get the next page of Virtual Machines. Do
this till nextLink is not null to fetch all the Virtual Machines.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualMachinePaged
<azure.mgmt.compute.models.VirtualMachinePaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines"
path_format_arguments = {
"subscriptionId": self._serialize.url(
"self.config.subscription_id",
self.config.subscription_id,
"str",
)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
return response
# Deserialize response
deserialized = models.VirtualMachinePaged(
internal_paging, self._deserialize.dependencies
)
if raw:
header_dict = {}
client_raw_response = models.VirtualMachinePaged(
internal_paging, self._deserialize.dependencies, header_dict
)
return client_raw_response
return deserialized
def list_available_sizes(
self,
resource_group_name,
vm_name,
custom_headers={},
raw=False,
**operation_config
):
"""
Lists all available virtual machine sizes it can be resized to for a
virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualMachineSizePaged
<azure.mgmt.compute.models.VirtualMachineSizePaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"vmName": self._serialize.url("vm_name", vm_name, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id",
self.config.subscription_id,
"str",
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
return response
# Deserialize response
deserialized = models.VirtualMachineSizePaged(
internal_paging, self._deserialize.dependencies
)
if raw:
header_dict = {}
client_raw_response = models.VirtualMachineSizePaged(
internal_paging, self._deserialize.dependencies, header_dict
)
return client_raw_response
return deserialized
def power_off(
self,
resource_group_name,
vm_name,
custom_headers={},
raw=False,
**operation_config
):
"""
The operation to power off (stop) a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"vmName": self._serialize.url("vm_name", vm_name, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers={}):
request = self._client.get(status_link)
request.headers.update(headers)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
"long_running_operation_timeout", self.config.long_running_operation_timeout
)
return AzureOperationPoller(
long_running_send,
get_long_running_output,
get_long_running_status,
long_running_operation_timeout,
)
def restart(
self,
resource_group_name,
vm_name,
custom_headers={},
raw=False,
**operation_config
):
"""
The operation to restart a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"vmName": self._serialize.url("vm_name", vm_name, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers={}):
request = self._client.get(status_link)
request.headers.update(headers)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
"long_running_operation_timeout", self.config.long_running_operation_timeout
)
return AzureOperationPoller(
long_running_send,
get_long_running_output,
get_long_running_status,
long_running_operation_timeout,
)
def start(
self,
resource_group_name,
vm_name,
custom_headers={},
raw=False,
**operation_config
):
"""
The operation to start a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"vmName": self._serialize.url("vm_name", vm_name, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers={}):
request = self._client.get(status_link)
request.headers.update(headers)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
"long_running_operation_timeout", self.config.long_running_operation_timeout
)
return AzureOperationPoller(
long_running_send,
get_long_running_output,
get_long_running_status,
long_running_operation_timeout,
)
def redeploy(
self,
resource_group_name,
vm_name,
custom_headers={},
raw=False,
**operation_config
):
"""
The operation to redeploy a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"vmName": self._serialize.url("vm_name", vm_name, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers={}):
request = self._client.get(status_link)
request.headers.update(headers)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
"long_running_operation_timeout", self.config.long_running_operation_timeout
)
return AzureOperationPoller(
long_running_send,
get_long_running_output,
get_long_running_status,
long_running_operation_timeout,
)
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class AccountOperations(object):
"""AccountOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_storage_account(
self,
resource_group_name,
account_name,
storage_account_name,
custom_headers={},
raw=False,
**operation_config
):
"""
Gets the specified Azure Storage account linked to the given Data Lake
Analytics account.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account from
which to retrieve Azure storage account details.
:type account_name: str
:param storage_account_name: The name of the Azure Storage account
for which to retrieve the details.
:type storage_account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccountInfo
<azure.mgmt.datalake.analytics.account.models.StorageAccountInfo>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/StorageAccounts/{storageAccountName}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"accountName": self._serialize.url("account_name", account_name, "str"),
"storageAccountName": self._serialize.url(
"storage_account_name", storage_account_name, "str"
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("StorageAccountInfo", response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_storage_account(
self,
resource_group_name,
account_name,
storage_account_name,
custom_headers={},
raw=False,
**operation_config
):
"""
Updates the specified Data Lake Analytics account to remove an Azure
Storage account.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account from
which to remove the Azure Storage account.
:type account_name: str
:param storage_account_name: The name of the Azure Storage account to
remove
:type storage_account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/StorageAccounts/{storageAccountName}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"accountName": self._serialize.url("account_name", account_name, "str"),
"storageAccountName": self._serialize.url(
"storage_account_name", storage_account_name, "str"
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def update_storage_account(
self,
resource_group_name,
account_name,
storage_account_name,
properties,
custom_headers={},
raw=False,
**operation_config
):
"""
Updates the Data Lake Analytics account to replace Azure Storage blob
account details, such as the access key and/or suffix.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account to
modify storage accounts in
:type account_name: str
:param storage_account_name: The Azure Storage account to modify
:type storage_account_name: str
:param properties: Gets or sets the properties for the Azure Storage
account being added.
:type properties: :class:`StorageAccountProperties
<azure.mgmt.datalake.analytics.account.models.StorageAccountProperties>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
parameters = models.AddStorageAccountParameters(properties=properties)
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/StorageAccounts/{storageAccountName}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"accountName": self._serialize.url("account_name", account_name, "str"),
"storageAccountName": self._serialize.url(
"storage_account_name", storage_account_name, "str"
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct body
body_content = self._serialize.body(parameters, "AddStorageAccountParameters")
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config
)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def add_storage_account(
self,
resource_group_name,
account_name,
storage_account_name,
properties,
custom_headers={},
raw=False,
**operation_config
):
"""
Updates the specified Data Lake Analytics account to add an Azure
Storage account.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account to
which to add the Azure Storage account.
:type account_name: str
:param storage_account_name: The name of the Azure Storage account to
add
:type storage_account_name: str
:param properties: Gets or sets the properties for the Azure Storage
account being added.
:type properties: :class:`StorageAccountProperties
<azure.mgmt.datalake.analytics.account.models.StorageAccountProperties>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
parameters = models.AddStorageAccountParameters(properties=properties)
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/StorageAccounts/{storageAccountName}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"accountName": self._serialize.url("account_name", account_name, "str"),
"storageAccountName": self._serialize.url(
"storage_account_name", storage_account_name, "str"
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct body
body_content = self._serialize.body(parameters, "AddStorageAccountParameters")
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config
)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_storage_container(
self,
resource_group_name,
account_name,
storage_account_name,
container_name,
custom_headers={},
raw=False,
**operation_config
):
"""
Gets the specified Azure Storage container associated with the given
Data Lake Analytics and Azure Storage accounts.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account for
which to retrieve blob container.
:type account_name: str
:param storage_account_name: The name of the Azure storage account
from which to retrieve the blob container.
:type storage_account_name: str
:param container_name: The name of the Azure storage container to
retrieve
:type container_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`BlobContainer
<azure.mgmt.datalake.analytics.account.models.BlobContainer>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/StorageAccounts/{storageAccountName}/Containers/{containerName}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"accountName": self._serialize.url("account_name", account_name, "str"),
"storageAccountName": self._serialize.url(
"storage_account_name", storage_account_name, "str"
),
"containerName": self._serialize.url(
"container_name", container_name, "str"
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("BlobContainer", response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_storage_containers(
self,
resource_group_name,
account_name,
storage_account_name,
custom_headers={},
raw=False,
**operation_config
):
"""
Lists the Azure Storage containers, if any, associated with the
specified Data Lake Analytics and Azure Storage account combination.
The response includes a link to the next page of results, if any.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account for
which to list Azure Storage blob containers.
:type account_name: str
:param storage_account_name: The name of the Azure storage account
from which to list blob containers.
:type storage_account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`BlobContainerPaged
<azure.mgmt.datalake.analytics.account.models.BlobContainerPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/StorageAccounts/{storageAccountName}/Containers"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"accountName": self._serialize.url(
"account_name", account_name, "str"
),
"storageAccountName": self._serialize.url(
"storage_account_name", storage_account_name, "str"
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id",
self.config.subscription_id,
"str",
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
return response
# Deserialize response
deserialized = models.BlobContainerPaged(
internal_paging, self._deserialize.dependencies
)
if raw:
header_dict = {}
client_raw_response = models.BlobContainerPaged(
internal_paging, self._deserialize.dependencies, header_dict
)
return client_raw_response
return deserialized
def storage_containers_list_next(
self, next_link, custom_headers={}, raw=False, **operation_config
):
"""
Gets the next page of Azure Storage containers, if any, within the
specified Azure Storage account. The response includes a link to the
next page of results, if any.
:param next_link: The URL to the next Azure Storage Container page.
:type next_link: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`BlobContainerPaged
<azure.mgmt.datalake.analytics.account.models.BlobContainerPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = "/{nextLink}"
path_format_arguments = {
"nextLink": self._serialize.url(
"next_link", next_link, "str", skip_quote=True
)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
return response
# Deserialize response
deserialized = models.BlobContainerPaged(
internal_paging, self._deserialize.dependencies
)
if raw:
header_dict = {}
client_raw_response = models.BlobContainerPaged(
internal_paging, self._deserialize.dependencies, header_dict
)
return client_raw_response
return deserialized
def sas_tokens_list_next(
self, next_link, custom_headers={}, raw=False, **operation_config
):
"""
Gets the next page of the SAS token objects within the specified Azure
Storage account and container, if any.
:param next_link: The URL to the next Azure Storage Container SAS
token page.
:type next_link: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`SasTokenInfoPaged
<azure.mgmt.datalake.analytics.account.models.SasTokenInfoPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = "/{nextLink}"
path_format_arguments = {
"nextLink": self._serialize.url(
"next_link", next_link, "str", skip_quote=True
)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
return response
# Deserialize response
deserialized = models.SasTokenInfoPaged(
internal_paging, self._deserialize.dependencies
)
if raw:
header_dict = {}
client_raw_response = models.SasTokenInfoPaged(
internal_paging, self._deserialize.dependencies, header_dict
)
return client_raw_response
return deserialized
def list_sas_tokens(
self,
resource_group_name,
account_name,
storage_account_name,
container_name,
custom_headers={},
raw=False,
**operation_config
):
"""
Gets the SAS token associated with the specified Data Lake Analytics
and Azure Storage account and container combination.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account from
which an Azure Storage account's SAS token is being requested.
:type account_name: str
:param storage_account_name: The name of the Azure storage account
for which the SAS token is being requested.
:type storage_account_name: str
:param container_name: The name of the Azure storage container for
which the SAS token is being requested.
:type container_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`SasTokenInfoPaged
<azure.mgmt.datalake.analytics.account.models.SasTokenInfoPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/StorageAccounts/{storageAccountName}/Containers/{containerName}/listSasTokens"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"accountName": self._serialize.url(
"account_name", account_name, "str"
),
"storageAccountName": self._serialize.url(
"storage_account_name", storage_account_name, "str"
),
"containerName": self._serialize.url(
"container_name", container_name, "str"
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id",
self.config.subscription_id,
"str",
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
return response
# Deserialize response
deserialized = models.SasTokenInfoPaged(
internal_paging, self._deserialize.dependencies
)
if raw:
header_dict = {}
client_raw_response = models.SasTokenInfoPaged(
internal_paging, self._deserialize.dependencies, header_dict
)
return client_raw_response
return deserialized
def get_data_lake_store_account(
self,
resource_group_name,
account_name,
data_lake_store_account_name,
custom_headers={},
raw=False,
**operation_config
):
"""
Gets the specified Data Lake Store account details in the specified
Data Lake Analytics account.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account from
which to retrieve the Data Lake Store account details.
:type account_name: str
:param data_lake_store_account_name: The name of the Data Lake Store
account to retrieve
:type data_lake_store_account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DataLakeStoreAccountInfo
<azure.mgmt.datalake.analytics.account.models.DataLakeStoreAccountInfo>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/DataLakeStoreAccounts/{dataLakeStoreAccountName}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"accountName": self._serialize.url("account_name", account_name, "str"),
"dataLakeStoreAccountName": self._serialize.url(
"data_lake_store_account_name", data_lake_store_account_name, "str"
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("DataLakeStoreAccountInfo", response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_data_lake_store_account(
self,
resource_group_name,
account_name,
data_lake_store_account_name,
custom_headers={},
raw=False,
**operation_config
):
"""
Updates the Data Lake Analytics account specified to remove the
specified Data Lake Store account.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account from
which to remove the Data Lake Store account.
:type account_name: str
:param data_lake_store_account_name: The name of the Data Lake Store
account to remove
:type data_lake_store_account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/DataLakeStoreAccounts/{dataLakeStoreAccountName}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"accountName": self._serialize.url("account_name", account_name, "str"),
"dataLakeStoreAccountName": self._serialize.url(
"data_lake_store_account_name", data_lake_store_account_name, "str"
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def add_data_lake_store_account(
self,
resource_group_name,
account_name,
data_lake_store_account_name,
properties,
custom_headers={},
raw=False,
**operation_config
):
"""
Updates the specified Data Lake Analytics account to include the
additional Data Lake Store account.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account to
which to add the Data Lake Store account.
:type account_name: str
:param data_lake_store_account_name: The name of the Data Lake Store
account to add.
:type data_lake_store_account_name: str
:param properties: Gets or sets the properties for the Data Lake
Store account being added.
:type properties: :class:`DataLakeStoreAccountInfoProperties
<azure.mgmt.datalake.analytics.account.models.DataLakeStoreAccountInfoProperties>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
parameters = models.AddDataLakeStoreParameters(properties=properties)
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/DataLakeStoreAccounts/{dataLakeStoreAccountName}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"accountName": self._serialize.url("account_name", account_name, "str"),
"dataLakeStoreAccountName": self._serialize.url(
"data_lake_store_account_name", data_lake_store_account_name, "str"
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct body
body_content = self._serialize.body(parameters, "AddDataLakeStoreParameters")
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config
)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def list_storage_accounts(
self,
resource_group_name,
account_name,
filter=None,
top=None,
skip=None,
expand=None,
select=None,
orderby=None,
count=None,
search=None,
format=None,
custom_headers={},
raw=False,
**operation_config
):
"""
Gets the first page of Azure Storage accounts, if any, linked to the
specified Data Lake Analytics account. The response includes a link
to the next page, if any.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account for
which to list Azure Storage accounts.
:type account_name: str
:param filter: The OData filter. Optional.
:type filter: str
:param top: The number of items to return. Optional.
:type top: int
:param skip: The number of items to skip over before returning
elements. Optional.
:type skip: int
:param expand: OData expansion. Expand related resources in line with
the retrieved resources, e.g. Categories/$expand=Products would
expand Product data in line with each Category entry. Optional.
:type expand: str
:param select: OData Select statement. Limits the properties on each
entry to just those requested, e.g.
Categories?$select=CategoryName,Description. Optional.
:type select: str
:param orderby: OrderBy clause. One or more comma-separated
expressions with an optional "asc" (the default) or "desc" depending
on the order you'd like the values sorted, e.g.
Categories?$orderby=CategoryName desc. Optional.
:type orderby: str
:param count: The Boolean value of true or false to request a count
of the matching resources included with the resources in the
response, e.g. Categories?$count=true. Optional.
:type count: bool
:param search: A free form search. A free-text search expression to
match for whether a particular entry should be included in the feed,
e.g. Categories?$search=blue OR green. Optional.
:type search: str
:param format: The desired return format. Return the response in
particular formatxii without access to request headers for standard
content-type negotiation (e.g Orders?$format=json). Optional.
:type format: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccountInfoPaged
<azure.mgmt.datalake.analytics.account.models.StorageAccountInfoPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/StorageAccounts/"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"accountName": self._serialize.url(
"account_name", account_name, "str"
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id",
self.config.subscription_id,
"str",
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters["$filter"] = self._serialize.query(
"filter", filter, "str"
)
if top is not None:
query_parameters["$top"] = self._serialize.query("top", top, "int")
if skip is not None:
query_parameters["$skip"] = self._serialize.query(
"skip", skip, "int"
)
if expand is not None:
query_parameters["$expand"] = self._serialize.query(
"expand", expand, "str"
)
if select is not None:
query_parameters["$select"] = self._serialize.query(
"select", select, "str"
)
if orderby is not None:
query_parameters["$orderby"] = self._serialize.query(
"orderby", orderby, "str"
)
if count is not None:
query_parameters["$count"] = self._serialize.query(
"count", count, "bool"
)
if search is not None:
query_parameters["$search"] = self._serialize.query(
"search", search, "str"
)
if format is not None:
query_parameters["$format"] = self._serialize.query(
"format", format, "str"
)
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
return response
# Deserialize response
deserialized = models.StorageAccountInfoPaged(
internal_paging, self._deserialize.dependencies
)
if raw:
header_dict = {}
client_raw_response = models.StorageAccountInfoPaged(
internal_paging, self._deserialize.dependencies, header_dict
)
return client_raw_response
return deserialized
def list_data_lake_store_accounts(
self,
resource_group_name,
account_name,
filter=None,
top=None,
skip=None,
expand=None,
select=None,
orderby=None,
count=None,
search=None,
format=None,
custom_headers={},
raw=False,
**operation_config
):
"""
Gets the first page of Data Lake Store accounts linked to the
specified Data Lake Analytics account. The response includes a link
to the next page, if any.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account for
which to list Data Lake Store accounts.
:type account_name: str
:param filter: OData filter. Optional.
:type filter: str
:param top: The number of items to return. Optional.
:type top: int
:param skip: The number of items to skip over before returning
elements. Optional.
:type skip: int
:param expand: OData expansion. Expand related resources in line with
the retrieved resources, e.g. Categories/$expand=Products would
expand Product data in line with each Category entry. Optional.
:type expand: str
:param select: OData Select statement. Limits the properties on each
entry to just those requested, e.g.
Categories?$select=CategoryName,Description. Optional.
:type select: str
:param orderby: OrderBy clause. One or more comma-separated
expressions with an optional "asc" (the default) or "desc" depending
on the order you'd like the values sorted, e.g.
Categories?$orderby=CategoryName desc. Optional.
:type orderby: str
:param count: The Boolean value of true or false to request a count
of the matching resources included with the resources in the
response, e.g. Categories?$count=true. Optional.
:type count: bool
:param search: A free form search. A free-text search expression to
match for whether a particular entry should be included in the feed,
e.g. Categories?$search=blue OR green. Optional.
:type search: str
:param format: The desired return format. Return the response in
particular formatxii without access to request headers for standard
content-type negotiation (e.g Orders?$format=json). Optional.
:type format: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DataLakeStoreAccountInfoPaged
<azure.mgmt.datalake.analytics.account.models.DataLakeStoreAccountInfoPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/DataLakeStoreAccounts/"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"accountName": self._serialize.url(
"account_name", account_name, "str"
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id",
self.config.subscription_id,
"str",
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters["$filter"] = self._serialize.query(
"filter", filter, "str"
)
if top is not None:
query_parameters["$top"] = self._serialize.query("top", top, "int")
if skip is not None:
query_parameters["$skip"] = self._serialize.query(
"skip", skip, "int"
)
if expand is not None:
query_parameters["$expand"] = self._serialize.query(
"expand", expand, "str"
)
if select is not None:
query_parameters["$select"] = self._serialize.query(
"select", select, "str"
)
if orderby is not None:
query_parameters["$orderby"] = self._serialize.query(
"orderby", orderby, "str"
)
if count is not None:
query_parameters["$count"] = self._serialize.query(
"count", count, "bool"
)
if search is not None:
query_parameters["$search"] = self._serialize.query(
"search", search, "str"
)
if format is not None:
query_parameters["$format"] = self._serialize.query(
"format", format, "str"
)
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
return response
# Deserialize response
deserialized = models.DataLakeStoreAccountInfoPaged(
internal_paging, self._deserialize.dependencies
)
if raw:
header_dict = {}
client_raw_response = models.DataLakeStoreAccountInfoPaged(
internal_paging, self._deserialize.dependencies, header_dict
)
return client_raw_response
return deserialized
def list_by_resource_group(
self,
resource_group_name,
filter=None,
top=None,
skip=None,
expand=None,
select=None,
orderby=None,
count=None,
search=None,
format=None,
custom_headers={},
raw=False,
**operation_config
):
"""
Gets the first page of Data Lake Analytics accounts, if any, within a
specific resource group. This includes a link to the next page, if
any.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param filter: OData filter. Optional.
:type filter: str
:param top: The number of items to return. Optional.
:type top: int
:param skip: The number of items to skip over before returning
elements. Optional.
:type skip: int
:param expand: OData expansion. Expand related resources in line with
the retrieved resources, e.g. Categories/$expand=Products would
expand Product data in line with each Category entry. Optional.
:type expand: str
:param select: OData Select statement. Limits the properties on each
entry to just those requested, e.g.
Categories?$select=CategoryName,Description. Optional.
:type select: str
:param orderby: OrderBy clause. One or more comma-separated
expressions with an optional "asc" (the default) or "desc" depending
on the order you'd like the values sorted, e.g.
Categories?$orderby=CategoryName desc. Optional.
:type orderby: str
:param count: The Boolean value of true or false to request a count
of the matching resources included with the resources in the
response, e.g. Categories?$count=true. Optional.
:type count: bool
:param search: A free form search. A free-text search expression to
match for whether a particular entry should be included in the feed,
e.g. Categories?$search=blue OR green. Optional.
:type search: str
:param format: The return format. Return the response in particular
formatxii without access to request headers for standard
content-type negotiation (e.g Orders?$format=json). Optional.
:type format: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DataLakeAnalyticsAccountPaged
<azure.mgmt.datalake.analytics.account.models.DataLakeAnalyticsAccountPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"subscriptionId": self._serialize.url(
"self.config.subscription_id",
self.config.subscription_id,
"str",
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters["$filter"] = self._serialize.query(
"filter", filter, "str"
)
if top is not None:
query_parameters["$top"] = self._serialize.query("top", top, "int")
if skip is not None:
query_parameters["$skip"] = self._serialize.query(
"skip", skip, "int"
)
if expand is not None:
query_parameters["$expand"] = self._serialize.query(
"expand", expand, "str"
)
if select is not None:
query_parameters["$select"] = self._serialize.query(
"select", select, "str"
)
if orderby is not None:
query_parameters["$orderby"] = self._serialize.query(
"orderby", orderby, "str"
)
if count is not None:
query_parameters["$count"] = self._serialize.query(
"count", count, "bool"
)
if search is not None:
query_parameters["$search"] = self._serialize.query(
"search", search, "str"
)
if format is not None:
query_parameters["$format"] = self._serialize.query(
"format", format, "str"
)
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
return response
# Deserialize response
deserialized = models.DataLakeAnalyticsAccountPaged(
internal_paging, self._deserialize.dependencies
)
if raw:
header_dict = {}
client_raw_response = models.DataLakeAnalyticsAccountPaged(
internal_paging, self._deserialize.dependencies, header_dict
)
return client_raw_response
return deserialized
def list(
self,
filter=None,
top=None,
skip=None,
expand=None,
select=None,
orderby=None,
count=None,
search=None,
format=None,
custom_headers={},
raw=False,
**operation_config
):
"""
Gets the first page of Data Lake Analytics accounts, if any, within
the current subscription. This includes a link to the next page, if
any.
:param filter: OData filter. Optional.
:type filter: str
:param top: The number of items to return. Optional.
:type top: int
:param skip: The number of items to skip over before returning
elements. Optional.
:type skip: int
:param expand: OData expansion. Expand related resources in line with
the retrieved resources, e.g. Categories/$expand=Products would
expand Product data in line with each Category entry. Optional.
:type expand: str
:param select: OData Select statement. Limits the properties on each
entry to just those requested, e.g.
Categories?$select=CategoryName,Description. Optional.
:type select: str
:param orderby: OrderBy clause. One or more comma-separated
expressions with an optional "asc" (the default) or "desc" depending
on the order you'd like the values sorted, e.g.
Categories?$orderby=CategoryName desc. Optional.
:type orderby: str
:param count: The Boolean value of true or false to request a count
of the matching resources included with the resources in the
response, e.g. Categories?$count=true. Optional.
:type count: bool
:param search: A free form search. A free-text search expression to
match for whether a particular entry should be included in the feed,
e.g. Categories?$search=blue OR green. Optional.
:type search: str
:param format: The desired return format. Return the response in
particular formatxii without access to request headers for standard
content-type negotiation (e.g Orders?$format=json). Optional.
:type format: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DataLakeAnalyticsAccountPaged
<azure.mgmt.datalake.analytics.account.models.DataLakeAnalyticsAccountPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = "/subscriptions/{subscriptionId}/providers/Microsoft.DataLakeAnalytics/accounts"
path_format_arguments = {
"subscriptionId": self._serialize.url(
"self.config.subscription_id",
self.config.subscription_id,
"str",
)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters["$filter"] = self._serialize.query(
"filter", filter, "str"
)
if top is not None:
query_parameters["$top"] = self._serialize.query("top", top, "int")
if skip is not None:
query_parameters["$skip"] = self._serialize.query(
"skip", skip, "int"
)
if expand is not None:
query_parameters["$expand"] = self._serialize.query(
"expand", expand, "str"
)
if select is not None:
query_parameters["$select"] = self._serialize.query(
"select", select, "str"
)
if orderby is not None:
query_parameters["$orderby"] = self._serialize.query(
"orderby", orderby, "str"
)
if count is not None:
query_parameters["$count"] = self._serialize.query(
"count", count, "bool"
)
if search is not None:
query_parameters["$search"] = self._serialize.query(
"search", search, "str"
)
if format is not None:
query_parameters["$format"] = self._serialize.query(
"format", format, "str"
)
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
return response
# Deserialize response
deserialized = models.DataLakeAnalyticsAccountPaged(
internal_paging, self._deserialize.dependencies
)
if raw:
header_dict = {}
client_raw_response = models.DataLakeAnalyticsAccountPaged(
internal_paging, self._deserialize.dependencies, header_dict
)
return client_raw_response
return deserialized
def get(
self,
resource_group_name,
account_name,
custom_headers={},
raw=False,
**operation_config
):
"""
Gets details of the specified Data Lake Analytics account.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account to
retrieve.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DataLakeAnalyticsAccount
<azure.mgmt.datalake.analytics.account.models.DataLakeAnalyticsAccount>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"accountName": self._serialize.url("account_name", account_name, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("DataLakeAnalyticsAccount", response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self,
resource_group_name,
account_name,
custom_headers={},
raw=False,
**operation_config
):
"""
Begins the delete delete process for the Data Lake Analytics account
object specified by the account name.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account to
delete
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"accountName": self._serialize.url("account_name", account_name, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers={}):
request = self._client.get(status_link)
request.headers.update(headers)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 404, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
"long_running_operation_timeout", self.config.long_running_operation_timeout
)
return AzureOperationPoller(
long_running_send,
get_long_running_output,
get_long_running_status,
long_running_operation_timeout,
)
def create(
self,
resource_group_name,
name,
parameters,
custom_headers={},
raw=False,
**operation_config
):
"""
Creates the specified Data Lake Analytics account. This supplies the
user with computation services for Data Lake Analytics workloads
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.the account will be
associated with.
:type resource_group_name: str
:param name: The name of the Data Lake Analytics account to create.
:type name: str
:param parameters: Parameters supplied to the create Data Lake
Analytics account operation.
:type parameters: :class:`DataLakeAnalyticsAccount
<azure.mgmt.datalake.analytics.account.models.DataLakeAnalyticsAccount>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DataLakeAnalyticsAccount
<azure.mgmt.datalake.analytics.account.models.DataLakeAnalyticsAccount>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{name}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"name": self._serialize.url("name", name, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct body
body_content = self._serialize.body(parameters, "DataLakeAnalyticsAccount")
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config
)
def get_long_running_status(status_link, headers={}):
request = self._client.get(status_link)
request.headers.update(headers)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [201, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize("DataLakeAnalyticsAccount", response)
if response.status_code == 200:
deserialized = self._deserialize("DataLakeAnalyticsAccount", response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
"long_running_operation_timeout", self.config.long_running_operation_timeout
)
return AzureOperationPoller(
long_running_send,
get_long_running_output,
get_long_running_status,
long_running_operation_timeout,
)
def update(
self,
resource_group_name,
name,
parameters,
custom_headers={},
raw=False,
**operation_config
):
"""
Updates the Data Lake Analytics account object specified by the
accountName with the contents of the account object.
:param resource_group_name: The name of the Azure resource group that
contains the Data Lake Analytics account.
:type resource_group_name: str
:param name: The name of the Data Lake Analytics account to update.
:type name: str
:param parameters: Parameters supplied to the update Data Lake
Analytics account operation.
:type parameters: :class:`DataLakeAnalyticsAccount
<azure.mgmt.datalake.analytics.account.models.DataLakeAnalyticsAccount>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DataLakeAnalyticsAccount
<azure.mgmt.datalake.analytics.account.models.DataLakeAnalyticsAccount>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{name}"
path_format_arguments = {
"resourceGroupName": self._serialize.url(
"resource_group_name", resource_group_name, "str"
),
"name": self._serialize.url("name", name, "str"),
"subscriptionId": self._serialize.url(
"self.config.subscription_id", self.config.subscription_id, "str"
),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters["api-version"] = self._serialize.query(
"self.config.api_version", self.config.api_version, "str"
)
# Construct headers
header_parameters = {}
header_parameters["Content-Type"] = "application/json; charset=utf-8"
if self.config.generate_client_request_id:
header_parameters["x-ms-client-request-id"] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters["accept-language"] = self._serialize.header(
"self.config.accept_language", self.config.accept_language, "str"
)
# Construct body
body_content = self._serialize.body(parameters, "DataLakeAnalyticsAccount")
# Construct and send request
def long_running_send():
request = self._client.patch(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config
)
def get_long_running_status(status_link, headers={}):
request = self._client.get(status_link)
request.headers.update(headers)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get("x-ms-request-id")
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("DataLakeAnalyticsAccount", response)
if response.status_code == 201:
deserialized = self._deserialize("DataLakeAnalyticsAccount", response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
"long_running_operation_timeout", self.config.long_running_operation_timeout
)
return AzureOperationPoller(
long_running_send,
get_long_running_output,
get_long_running_status,
long_running_operation_timeout,
)
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .catalog_item import CatalogItem
class USqlSchema(CatalogItem):
"""
A Data Lake Analytics catalog U-SQL schema item.
:param compute_account_name: Gets or sets the name of the Data Lake
Analytics account.
:type compute_account_name: str
:param version: Gets or sets the version of the catalog item.
:type version: str
:param database_name: Gets or sets the name of the database.
:type database_name: str
:param name: Gets or sets the name of the schema.
:type name: str
"""
_attribute_map = {
"compute_account_name": {"key": "computeAccountName", "type": "str"},
"version": {"key": "version", "type": "str"},
"database_name": {"key": "databaseName", "type": "str"},
"name": {"key": "schemaName", "type": "str"},
}
def __init__(
self, compute_account_name=None, version=None, database_name=None, name=None
):
super(USqlSchema, self).__init__(
compute_account_name=compute_account_name, version=version
)
self.database_name = database_name
self.name = name
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobResource(Model):
"""
The Data Lake Analytics U-SQL job resources.
:param name: Gets or set the name of the resource.
:type name: str
:param resource_path: Gets or sets the path to the resource.
:type resource_path: str
:param type: Gets or sets the job resource type. Possible values include:
'VertexResource', 'StatisticsResource'
:type type: str
"""
_attribute_map = {
"name": {"key": "name", "type": "str"},
"resource_path": {"key": "resourcePath", "type": "str"},
"type": {"key": "type", "type": "JobResourceType"},
}
def __init__(self, name=None, resource_path=None, type=None):
self.name = name
self.resource_path = resource_path
self.type = type
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .file_operation_result import FileOperationResult
from .acl_status import AclStatus
from .acl_status_result import AclStatusResult
from .content_summary import ContentSummary
from .content_summary_result import ContentSummaryResult
from .file_status_properties import FileStatusProperties
from .file_statuses import FileStatuses
from .file_statuses_result import FileStatusesResult
from .file_status_result import FileStatusResult
from .data_lake_store_file_system_management_client_enums import (
FileType,
AppendModeType,
)
__all__ = [
"FileOperationResult",
"AclStatus",
"AclStatusResult",
"ContentSummary",
"ContentSummaryResult",
"FileStatusProperties",
"FileStatuses",
"FileStatusesResult",
"FileStatusResult",
"FileType",
"AppendModeType",
]
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class WorkflowAccessKeyPaged(Paged):
"""
A paging container for iterating over a list of WorkflowAccessKey object
"""
_attribute_map = {
"next_link": {"key": "nextLink", "type": "str"},
"current_page": {"key": "value", "type": "[WorkflowAccessKey]"},
}
def __init__(self, *args, **kwargs):
super(WorkflowAccessKeyPaged, self).__init__(*args, **kwargs)
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
VERSION = "2015-02-01-preview"
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Error(Model):
"""Error
:param code:
:type code: str
:param message:
:type message: str
:param target:
:type target: str
:param details:
:type details: list of :class:`ErrorDetails
<azure.mgmt.network.models.ErrorDetails>`
:param inner_error:
:type inner_error: str
"""
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
"target": {"key": "target", "type": "str"},
"details": {"key": "details", "type": "[ErrorDetails]"},
"inner_error": {"key": "innerError", "type": "str"},
}
def __init__(
self, code=None, message=None, target=None, details=None, inner_error=None
):
self.code = code
self.message = message
self.target = target
self.details = details
self.inner_error = inner_error
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class LocalNetworkGateway(Resource):
"""
A common class for general resource information
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource Id
:type id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param local_network_address_space: Local network site Address space
:type local_network_address_space: :class:`AddressSpace
<azure.mgmt.network.models.AddressSpace>`
:param gateway_ip_address: IP address of local network gateway.
:type gateway_ip_address: str
:param bgp_settings: Local network gateway's BGP speaker settings
:type bgp_settings: :class:`BgpSettings
<azure.mgmt.network.models.BgpSettings>`
:param resource_guid: Gets or sets resource guid property of the
LocalNetworkGateway resource
:type resource_guid: str
:param provisioning_state: Gets or sets Provisioning state of the
LocalNetworkGateway resource Updating/Deleting/Failed
:type provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated
:type etag: str
"""
_validation = {
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"location": {"key": "location", "type": "str"},
"tags": {"key": "tags", "type": "{str}"},
"local_network_address_space": {
"key": "properties.localNetworkAddressSpace",
"type": "AddressSpace",
},
"gateway_ip_address": {"key": "properties.gatewayIpAddress", "type": "str"},
"bgp_settings": {"key": "properties.bgpSettings", "type": "BgpSettings"},
"resource_guid": {"key": "properties.resourceGuid", "type": "str"},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
"etag": {"key": "etag", "type": "str"},
}
def __init__(
self,
id=None,
location=None,
tags=None,
local_network_address_space=None,
gateway_ip_address=None,
bgp_settings=None,
resource_guid=None,
provisioning_state=None,
etag=None,
):
super(LocalNetworkGateway, self).__init__(id=id, location=location, tags=tags)
self.local_network_address_space = local_network_address_space
self.gateway_ip_address = gateway_ip_address
self.bgp_settings = bgp_settings
self.resource_guid = resource_guid
self.provisioning_state = provisioning_state
self.etag = etag
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class VirtualNetworkGatewayIPConfiguration(SubResource):
"""
IpConfiguration for Virtual network gateway
:param id: Resource Id
:type id: str
:param private_ip_address: Gets or sets the privateIPAddress of the IP
Configuration
:type private_ip_address: str
:param private_ip_allocation_method: Gets or sets PrivateIP allocation
method (Static/Dynamic). Possible values include: 'Static', 'Dynamic'
:type private_ip_allocation_method: str
:param subnet: Gets or sets the reference of the subnet resource
:type subnet: :class:`SubResource <azure.mgmt.network.models.SubResource>`
:param public_ip_address: Gets or sets the reference of the PublicIP
resource
:type public_ip_address: :class:`SubResource
<azure.mgmt.network.models.SubResource>`
:param provisioning_state: Gets or sets Provisioning state of the
PublicIP resource Updating/Deleting/Failed
:type provisioning_state: str
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated
:type etag: str
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
"private_ip_address": {"key": "properties.privateIPAddress", "type": "str"},
"private_ip_allocation_method": {
"key": "properties.privateIPAllocationMethod",
"type": "IPAllocationMethod",
},
"subnet": {"key": "properties.subnet", "type": "SubResource"},
"public_ip_address": {
"key": "properties.publicIPAddress",
"type": "SubResource",
},
"provisioning_state": {"key": "properties.provisioningState", "type": "str"},
"name": {"key": "name", "type": "str"},
"etag": {"key": "etag", "type": "str"},
}
def __init__(
self,
id=None,
private_ip_address=None,
private_ip_allocation_method=None,
subnet=None,
public_ip_address=None,
provisioning_state=None,
name=None,
etag=None,
):
super(VirtualNetworkGatewayIPConfiguration, self).__init__(id=id)
self.private_ip_address = private_ip_address
self.private_ip_allocation_method = private_ip_allocation_method
self.subnet = subnet
self.public_ip_address = public_ip_address
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
# -------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
from setuptools import setup
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
"This package is incompatible with azure=={}. ".format(ver)
+ 'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
setup(
name="azure-mgmt-network",
version="0.30.0rc3",
description="Microsoft Azure Network Resource Management Client Library for Python",
long_description=open("README.rst", "r").read(),
license="Apache License 2.0",
author="Microsoft Corporation",
author_email="[email protected]",
url="https://github.com/Azure/azure-sdk-for-python",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"License :: OSI Approved :: Apache Software License",
],
zip_safe=False,
packages=[
"azure",
"azure.mgmt",
"azure.mgmt.network",
"azure.mgmt.network.models",
"azure.mgmt.network.operations",
],
install_requires=[
"azure-mgmt-nspkg",
"azure-common[autorest]==1.1.3",
],
)
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class SharedAccessAuthorizationRuleResourcePaged(Paged):
"""
A paging container for iterating over a list of SharedAccessAuthorizationRuleResource object
"""
_attribute_map = {
"next_link": {"key": "nextLink", "type": "str"},
"current_page": {
"key": "value",
"type": "[SharedAccessAuthorizationRuleResource]",
},
}
def __init__(self, *args, **kwargs):
super(SharedAccessAuthorizationRuleResourcePaged, self).__init__(
*args, **kwargs
)
<|endoftext|> |
<|endoftext|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .feature_client import FeatureClient, FeatureClientConfiguration
from .version import VERSION
__all__ = ["FeatureClient", "FeatureClientConfiguration"]
__version__ = VERSION
<|endoftext|> |
Subsets and Splits