max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
red_panda/aws/templates/redshift.py | jucyai/red-panda | 12 | 6632851 | <gh_stars>10-100
SQL_NUM_SLICES = "select count(1) from stv_slices"
SQL_LOAD_ERRORS = "select * from stl_load_errors order by starttime desc"
SQL_TABLE_INFO = """\
SELECT TRIM(pgn.nspname) AS SCHEMA,
TRIM(a.name) AS TABLE,
id AS TableId,
decode(pgc.reldiststyle, 0, 'EVEN', 1, det.distkey, 8, 'ALL') AS DistKey,
decode(pgc.reldiststyle,8, NULL, dist_ratio.ratio::DECIMAL(20,4)) AS Skew,
det.head_sort AS "SortKey",
det.n_sortkeys AS "#SKs",
CASE WHEN pgc.reldiststyle = 8 THEN a.rows_all_dist ELSE a.rows END AS rows,
b.mbytes,
decode(det.max_enc, 0, 'N', 'Y') AS Enc,
det.pct_enc,
decode(b.mbytes, 0, 0,((b.mbytes/part.total::DECIMAL)*100)::DECIMAL(20,2)) AS pct_of_total,
CASE
WHEN a.rows = 0
THEN NULL
ELSE
CASE
WHEN pgc.reldiststyle = 8
THEN ((a.rows_all_dist - pgc.reltuples)::DECIMAL(20,3) / a.rows_all_dist::DECIMAL(20,3)*100)::DECIMAL(20,2)
ELSE ((a.rows - pgc.reltuples)::DECIMAL(20,3) / a.rows::DECIMAL(20,3)*100)::DECIMAL(20,2)
END
END AS pct_stats_off,
CASE
WHEN pgc.reldiststyle = 8
THEN decode(det.n_sortkeys, 0, NULL, DECODE(a.rows_all_dist, 0, 0, (a.unsorted_rows_all_dist::DECIMAL(32)/a.rows_all_dist)*100))::DECIMAL(20,2)
ELSE decode(det.n_sortkeys, 0, NULL, DECODE(a.rows, 0, 0, (a.unsorted_rows::DECIMAL(32)/a.rows)*100))::DECIMAL(20,2)
END AS pct_unsorted
FROM (
SELECT db_id,
id,
name,
SUM(ROWS) AS ROWS,
MAX(ROWS) AS rows_all_dist,
SUM(ROWS) - SUM(sorted_rows) AS unsorted_rows,
MAX(ROWS) - MAX(sorted_rows) AS unsorted_rows_all_dist
FROM stv_tbl_perm a
GROUP BY db_id,
id,
name
) AS a
JOIN pg_class AS pgc ON pgc.oid = a.id
JOIN pg_namespace AS pgn ON pgn.oid = pgc.relnamespace
LEFT OUTER JOIN (
SELECT tbl, COUNT(*) AS mbytes FROM stv_blocklist GROUP BY tbl
) b ON a.id = b.tbl
INNER JOIN (
SELECT attrelid,
MIN(CASE attisdistkey WHEN 't' THEN attname ELSE NULL END) AS "distkey",
MIN(CASE attsortkeyord WHEN 1 THEN attname ELSE NULL END) AS head_sort,
MAX(attsortkeyord) AS n_sortkeys,
MAX(attencodingtype) AS max_enc,
SUM(case when attencodingtype <> 0 then 1 else 0 end)::DECIMAL(20,3)/COUNT(attencodingtype)::DECIMAL(20,3) * 100.00 as pct_enc
FROM pg_attribute
GROUP BY 1
) AS det ON det.attrelid = a.id
INNER JOIN (
SELECT tbl,
MAX(Mbytes)::DECIMAL(32) /MIN(Mbytes) AS ratio
FROM (
SELECT tbl,
TRIM(name) AS name,
slice,
COUNT(*) AS Mbytes
FROM svv_diskusage
GROUP BY tbl,
name,
slice
)
GROUP BY tbl,
name
) AS dist_ratio ON a.id = dist_ratio.tbl
JOIN (
SELECT SUM(capacity) AS total
FROM stv_partitions
WHERE part_begin = 0
) AS part ON 1 = 1
WHERE mbytes IS NOT NULL
AND pgc.relowner > 1
ORDER BY mbytes DESC
"""
SQL_TABLE_INFO_SIMPLIFIED = """\
select table_catalog, table_schema, table_name, table_type
from information_schema.tables
where table_schema not in ('pg_catalog', 'information_schema')
"""
SQL_RUNNING_INFO = """\
select trim(u.usename) as user,
s.pid,
q.xid,
q.query,
q.service_class as "q",
q.slot_count as slt,
date_trunc('second',q.wlm_start_time) as start,
decode(trim(q.state), 'Running', 'Run', 'QueuedWaiting', 'Queue', 'Returning', 'Return', trim(q.state)) as state,
q.queue_Time/1000000 as q_sec,
q.exec_time/1000000 as exe_sec,
m.cpu_time/1000000 cpu_sec,
m.blocks_read read_mb,
decode(m.blocks_to_disk,-1,null,m.blocks_to_disk) spill_mb,
m2.rows as ret_rows, m3.rows as NL_rows,
substring(replace(nvl(qrytext_cur.text,trim(translate(s.text,chr(10)||chr(13)||chr(9) ,''))),'\\n',' '), 1, 90) as sql,
trim(decode(event&1,1,'SK ','') || decode(event&2,2,'Del ','') || decode(event&4,4,'NL ','') || decode(event&8,8,'Dist ','') || decode(event&16,16,'Bcast ','') || decode(event&32,32,'Stats ','')) as Alert
from stv_wlm_query_state q
left outer join stl_querytext s on (s.query=q.query and sequence = 0)
left outer join stv_query_metrics m on ( q.query = m.query and m.segment=-1 and m.step=-1 )
left outer join stv_query_metrics m2 on ( q.query = m2.query and m2.step_type = 38 )
left outer join ( select query, sum(rows) as rows from stv_query_metrics m3 where step_type = 15 group by 1) as m3 on ( q.query = m3.query )
left outer join pg_user u on ( s.userid = u.usesysid )
LEFT OUTER JOIN (
SELECT ut.xid,'CURSOR ' || TRIM( substring ( TEXT from strpos(upper(TEXT),'SELECT') )) as TEXT
FROM stl_utilitytext ut
WHERE sequence = 0
AND upper(TEXT) like 'DECLARE%'
GROUP BY text, ut.xid) qrytext_cur ON
(q.xid = qrytext_cur.xid)
left outer join (
select query,
sum(decode(
trim(split_part(event,':',1)), 'Very selective query filter',
1, 'Scanned a large number of deleted rows',
2, 'Nested Loop Join in the query plan',
4,'Distributed a large number of rows across the network',
8,'Broadcasted a large number of rows across the network',
16,'Missing query planner statistics',32,0)
) as event
from STL_ALERT_EVENT_LOG
where event_time >= dateadd(hour, -8, current_Date)
group by query
) as alrt
on alrt.query = q.query
order by q.service_class,q.exec_time desc, q.wlm_start_time
"""
SQL_LOCK_INFO = """\
select table_id,
last_update,
last_commit,
lock_owner_pid,
lock_status
from stv_locks
order by last_update asc
"""
SQL_TRANSACT_INFO = """\
select * from svv_transactions
"""
| SQL_NUM_SLICES = "select count(1) from stv_slices"
SQL_LOAD_ERRORS = "select * from stl_load_errors order by starttime desc"
SQL_TABLE_INFO = """\
SELECT TRIM(pgn.nspname) AS SCHEMA,
TRIM(a.name) AS TABLE,
id AS TableId,
decode(pgc.reldiststyle, 0, 'EVEN', 1, det.distkey, 8, 'ALL') AS DistKey,
decode(pgc.reldiststyle,8, NULL, dist_ratio.ratio::DECIMAL(20,4)) AS Skew,
det.head_sort AS "SortKey",
det.n_sortkeys AS "#SKs",
CASE WHEN pgc.reldiststyle = 8 THEN a.rows_all_dist ELSE a.rows END AS rows,
b.mbytes,
decode(det.max_enc, 0, 'N', 'Y') AS Enc,
det.pct_enc,
decode(b.mbytes, 0, 0,((b.mbytes/part.total::DECIMAL)*100)::DECIMAL(20,2)) AS pct_of_total,
CASE
WHEN a.rows = 0
THEN NULL
ELSE
CASE
WHEN pgc.reldiststyle = 8
THEN ((a.rows_all_dist - pgc.reltuples)::DECIMAL(20,3) / a.rows_all_dist::DECIMAL(20,3)*100)::DECIMAL(20,2)
ELSE ((a.rows - pgc.reltuples)::DECIMAL(20,3) / a.rows::DECIMAL(20,3)*100)::DECIMAL(20,2)
END
END AS pct_stats_off,
CASE
WHEN pgc.reldiststyle = 8
THEN decode(det.n_sortkeys, 0, NULL, DECODE(a.rows_all_dist, 0, 0, (a.unsorted_rows_all_dist::DECIMAL(32)/a.rows_all_dist)*100))::DECIMAL(20,2)
ELSE decode(det.n_sortkeys, 0, NULL, DECODE(a.rows, 0, 0, (a.unsorted_rows::DECIMAL(32)/a.rows)*100))::DECIMAL(20,2)
END AS pct_unsorted
FROM (
SELECT db_id,
id,
name,
SUM(ROWS) AS ROWS,
MAX(ROWS) AS rows_all_dist,
SUM(ROWS) - SUM(sorted_rows) AS unsorted_rows,
MAX(ROWS) - MAX(sorted_rows) AS unsorted_rows_all_dist
FROM stv_tbl_perm a
GROUP BY db_id,
id,
name
) AS a
JOIN pg_class AS pgc ON pgc.oid = a.id
JOIN pg_namespace AS pgn ON pgn.oid = pgc.relnamespace
LEFT OUTER JOIN (
SELECT tbl, COUNT(*) AS mbytes FROM stv_blocklist GROUP BY tbl
) b ON a.id = b.tbl
INNER JOIN (
SELECT attrelid,
MIN(CASE attisdistkey WHEN 't' THEN attname ELSE NULL END) AS "distkey",
MIN(CASE attsortkeyord WHEN 1 THEN attname ELSE NULL END) AS head_sort,
MAX(attsortkeyord) AS n_sortkeys,
MAX(attencodingtype) AS max_enc,
SUM(case when attencodingtype <> 0 then 1 else 0 end)::DECIMAL(20,3)/COUNT(attencodingtype)::DECIMAL(20,3) * 100.00 as pct_enc
FROM pg_attribute
GROUP BY 1
) AS det ON det.attrelid = a.id
INNER JOIN (
SELECT tbl,
MAX(Mbytes)::DECIMAL(32) /MIN(Mbytes) AS ratio
FROM (
SELECT tbl,
TRIM(name) AS name,
slice,
COUNT(*) AS Mbytes
FROM svv_diskusage
GROUP BY tbl,
name,
slice
)
GROUP BY tbl,
name
) AS dist_ratio ON a.id = dist_ratio.tbl
JOIN (
SELECT SUM(capacity) AS total
FROM stv_partitions
WHERE part_begin = 0
) AS part ON 1 = 1
WHERE mbytes IS NOT NULL
AND pgc.relowner > 1
ORDER BY mbytes DESC
"""
SQL_TABLE_INFO_SIMPLIFIED = """\
select table_catalog, table_schema, table_name, table_type
from information_schema.tables
where table_schema not in ('pg_catalog', 'information_schema')
"""
SQL_RUNNING_INFO = """\
select trim(u.usename) as user,
s.pid,
q.xid,
q.query,
q.service_class as "q",
q.slot_count as slt,
date_trunc('second',q.wlm_start_time) as start,
decode(trim(q.state), 'Running', 'Run', 'QueuedWaiting', 'Queue', 'Returning', 'Return', trim(q.state)) as state,
q.queue_Time/1000000 as q_sec,
q.exec_time/1000000 as exe_sec,
m.cpu_time/1000000 cpu_sec,
m.blocks_read read_mb,
decode(m.blocks_to_disk,-1,null,m.blocks_to_disk) spill_mb,
m2.rows as ret_rows, m3.rows as NL_rows,
substring(replace(nvl(qrytext_cur.text,trim(translate(s.text,chr(10)||chr(13)||chr(9) ,''))),'\\n',' '), 1, 90) as sql,
trim(decode(event&1,1,'SK ','') || decode(event&2,2,'Del ','') || decode(event&4,4,'NL ','') || decode(event&8,8,'Dist ','') || decode(event&16,16,'Bcast ','') || decode(event&32,32,'Stats ','')) as Alert
from stv_wlm_query_state q
left outer join stl_querytext s on (s.query=q.query and sequence = 0)
left outer join stv_query_metrics m on ( q.query = m.query and m.segment=-1 and m.step=-1 )
left outer join stv_query_metrics m2 on ( q.query = m2.query and m2.step_type = 38 )
left outer join ( select query, sum(rows) as rows from stv_query_metrics m3 where step_type = 15 group by 1) as m3 on ( q.query = m3.query )
left outer join pg_user u on ( s.userid = u.usesysid )
LEFT OUTER JOIN (
SELECT ut.xid,'CURSOR ' || TRIM( substring ( TEXT from strpos(upper(TEXT),'SELECT') )) as TEXT
FROM stl_utilitytext ut
WHERE sequence = 0
AND upper(TEXT) like 'DECLARE%'
GROUP BY text, ut.xid) qrytext_cur ON
(q.xid = qrytext_cur.xid)
left outer join (
select query,
sum(decode(
trim(split_part(event,':',1)), 'Very selective query filter',
1, 'Scanned a large number of deleted rows',
2, 'Nested Loop Join in the query plan',
4,'Distributed a large number of rows across the network',
8,'Broadcasted a large number of rows across the network',
16,'Missing query planner statistics',32,0)
) as event
from STL_ALERT_EVENT_LOG
where event_time >= dateadd(hour, -8, current_Date)
group by query
) as alrt
on alrt.query = q.query
order by q.service_class,q.exec_time desc, q.wlm_start_time
"""
SQL_LOCK_INFO = """\
select table_id,
last_update,
last_commit,
lock_owner_pid,
lock_status
from stv_locks
order by last_update asc
"""
SQL_TRANSACT_INFO = """\
select * from svv_transactions
""" | en | 0.485333 | \ SELECT TRIM(pgn.nspname) AS SCHEMA, TRIM(a.name) AS TABLE, id AS TableId, decode(pgc.reldiststyle, 0, 'EVEN', 1, det.distkey, 8, 'ALL') AS DistKey, decode(pgc.reldiststyle,8, NULL, dist_ratio.ratio::DECIMAL(20,4)) AS Skew, det.head_sort AS "SortKey", det.n_sortkeys AS "#SKs", CASE WHEN pgc.reldiststyle = 8 THEN a.rows_all_dist ELSE a.rows END AS rows, b.mbytes, decode(det.max_enc, 0, 'N', 'Y') AS Enc, det.pct_enc, decode(b.mbytes, 0, 0,((b.mbytes/part.total::DECIMAL)*100)::DECIMAL(20,2)) AS pct_of_total, CASE WHEN a.rows = 0 THEN NULL ELSE CASE WHEN pgc.reldiststyle = 8 THEN ((a.rows_all_dist - pgc.reltuples)::DECIMAL(20,3) / a.rows_all_dist::DECIMAL(20,3)*100)::DECIMAL(20,2) ELSE ((a.rows - pgc.reltuples)::DECIMAL(20,3) / a.rows::DECIMAL(20,3)*100)::DECIMAL(20,2) END END AS pct_stats_off, CASE WHEN pgc.reldiststyle = 8 THEN decode(det.n_sortkeys, 0, NULL, DECODE(a.rows_all_dist, 0, 0, (a.unsorted_rows_all_dist::DECIMAL(32)/a.rows_all_dist)*100))::DECIMAL(20,2) ELSE decode(det.n_sortkeys, 0, NULL, DECODE(a.rows, 0, 0, (a.unsorted_rows::DECIMAL(32)/a.rows)*100))::DECIMAL(20,2) END AS pct_unsorted FROM ( SELECT db_id, id, name, SUM(ROWS) AS ROWS, MAX(ROWS) AS rows_all_dist, SUM(ROWS) - SUM(sorted_rows) AS unsorted_rows, MAX(ROWS) - MAX(sorted_rows) AS unsorted_rows_all_dist FROM stv_tbl_perm a GROUP BY db_id, id, name ) AS a JOIN pg_class AS pgc ON pgc.oid = a.id JOIN pg_namespace AS pgn ON pgn.oid = pgc.relnamespace LEFT OUTER JOIN ( SELECT tbl, COUNT(*) AS mbytes FROM stv_blocklist GROUP BY tbl ) b ON a.id = b.tbl INNER JOIN ( SELECT attrelid, MIN(CASE attisdistkey WHEN 't' THEN attname ELSE NULL END) AS "distkey", MIN(CASE attsortkeyord WHEN 1 THEN attname ELSE NULL END) AS head_sort, MAX(attsortkeyord) AS n_sortkeys, MAX(attencodingtype) AS max_enc, SUM(case when attencodingtype <> 0 then 1 else 0 end)::DECIMAL(20,3)/COUNT(attencodingtype)::DECIMAL(20,3) * 100.00 as pct_enc FROM pg_attribute GROUP BY 1 ) AS det ON det.attrelid = a.id INNER JOIN ( SELECT tbl, MAX(Mbytes)::DECIMAL(32) /MIN(Mbytes) AS ratio FROM ( SELECT tbl, TRIM(name) AS name, slice, COUNT(*) AS Mbytes FROM svv_diskusage GROUP BY tbl, name, slice ) GROUP BY tbl, name ) AS dist_ratio ON a.id = dist_ratio.tbl JOIN ( SELECT SUM(capacity) AS total FROM stv_partitions WHERE part_begin = 0 ) AS part ON 1 = 1 WHERE mbytes IS NOT NULL AND pgc.relowner > 1 ORDER BY mbytes DESC \ select table_catalog, table_schema, table_name, table_type from information_schema.tables where table_schema not in ('pg_catalog', 'information_schema') \ select trim(u.usename) as user, s.pid, q.xid, q.query, q.service_class as "q", q.slot_count as slt, date_trunc('second',q.wlm_start_time) as start, decode(trim(q.state), 'Running', 'Run', 'QueuedWaiting', 'Queue', 'Returning', 'Return', trim(q.state)) as state, q.queue_Time/1000000 as q_sec, q.exec_time/1000000 as exe_sec, m.cpu_time/1000000 cpu_sec, m.blocks_read read_mb, decode(m.blocks_to_disk,-1,null,m.blocks_to_disk) spill_mb, m2.rows as ret_rows, m3.rows as NL_rows, substring(replace(nvl(qrytext_cur.text,trim(translate(s.text,chr(10)||chr(13)||chr(9) ,''))),'\\n',' '), 1, 90) as sql, trim(decode(event&1,1,'SK ','') || decode(event&2,2,'Del ','') || decode(event&4,4,'NL ','') || decode(event&8,8,'Dist ','') || decode(event&16,16,'Bcast ','') || decode(event&32,32,'Stats ','')) as Alert from stv_wlm_query_state q left outer join stl_querytext s on (s.query=q.query and sequence = 0) left outer join stv_query_metrics m on ( q.query = m.query and m.segment=-1 and m.step=-1 ) left outer join stv_query_metrics m2 on ( q.query = m2.query and m2.step_type = 38 ) left outer join ( select query, sum(rows) as rows from stv_query_metrics m3 where step_type = 15 group by 1) as m3 on ( q.query = m3.query ) left outer join pg_user u on ( s.userid = u.usesysid ) LEFT OUTER JOIN ( SELECT ut.xid,'CURSOR ' || TRIM( substring ( TEXT from strpos(upper(TEXT),'SELECT') )) as TEXT FROM stl_utilitytext ut WHERE sequence = 0 AND upper(TEXT) like 'DECLARE%' GROUP BY text, ut.xid) qrytext_cur ON (q.xid = qrytext_cur.xid) left outer join ( select query, sum(decode( trim(split_part(event,':',1)), 'Very selective query filter', 1, 'Scanned a large number of deleted rows', 2, 'Nested Loop Join in the query plan', 4,'Distributed a large number of rows across the network', 8,'Broadcasted a large number of rows across the network', 16,'Missing query planner statistics',32,0) ) as event from STL_ALERT_EVENT_LOG where event_time >= dateadd(hour, -8, current_Date) group by query ) as alrt on alrt.query = q.query order by q.service_class,q.exec_time desc, q.wlm_start_time \ select table_id, last_update, last_commit, lock_owner_pid, lock_status from stv_locks order by last_update asc \ select * from svv_transactions | 1.865958 | 2 |
src/unicms_calendar/migrations/0004_alter_calendarevent_unique_together.py | UniversitaDellaCalabria/unicms-calendar | 0 | 6632852 | # Generated by Django 3.2.5 on 2022-01-27 11:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('unicms_calendar', '0003_alter_calendarcontext_unique_together'),
]
operations = [
migrations.AlterUniqueTogether(
name='calendarevent',
unique_together=set(),
),
]
| # Generated by Django 3.2.5 on 2022-01-27 11:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('unicms_calendar', '0003_alter_calendarcontext_unique_together'),
]
operations = [
migrations.AlterUniqueTogether(
name='calendarevent',
unique_together=set(),
),
]
| en | 0.856655 | # Generated by Django 3.2.5 on 2022-01-27 11:25 | 1.43371 | 1 |
iris_sdk/models/cities.py | NumberAI/python-bandwidth-iris | 2 | 6632853 | #!/usr/bin/env python
from __future__ import division, absolute_import, print_function
from future.builtins import super
from iris_sdk.models.base_resource import BaseResource
from iris_sdk.models.data.cities import CitiesData
XML_NAME_CITIES = "CityResponse"
XPATH_RATE_CITIES = "/cities"
class Cities(BaseResource, CitiesData):
"""Covered cities directory"""
_node_name = XML_NAME_CITIES
_xpath = XPATH_RATE_CITIES
def __init__(self, parent=None, client=None):
super().__init__(parent, client)
CitiesData.__init__(self)
def list(self, params):
return self._get_data(params=params).cities.city | #!/usr/bin/env python
from __future__ import division, absolute_import, print_function
from future.builtins import super
from iris_sdk.models.base_resource import BaseResource
from iris_sdk.models.data.cities import CitiesData
XML_NAME_CITIES = "CityResponse"
XPATH_RATE_CITIES = "/cities"
class Cities(BaseResource, CitiesData):
"""Covered cities directory"""
_node_name = XML_NAME_CITIES
_xpath = XPATH_RATE_CITIES
def __init__(self, parent=None, client=None):
super().__init__(parent, client)
CitiesData.__init__(self)
def list(self, params):
return self._get_data(params=params).cities.city | en | 0.372921 | #!/usr/bin/env python Covered cities directory | 2.295004 | 2 |
app/auth/forms.py | tgalvinjr/blog-ip | 0 | 6632854 | <filename>app/auth/forms.py
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, PasswordField, BooleanField
from wtforms.validators import DataRequired, Email, EqualTo, ValidationError
from ..models import User
class RegistrationForm(FlaskForm):
email = StringField('Your Email Address',validators=[DataRequired(),Email()])
username = StringField('Enter your username',validators = [DataRequired()])
password = PasswordField('Password',validators = [DataRequired(), EqualTo('password_confirm',message = 'Passwords must match')])
password_confirm = PasswordField('<PASSWORD>',validators = [DataRequired()])
submit = SubmitField('Sign Up')
def validate_email(self,data_field):
if User.query.filter_by(email =data_field.data).first():
raise ValidationError('That username is taken.Please choose a different one.')
def validate_username(self,data_field):
if User.query.filter_by(username = data_field.data).first():
raise ValidationError('That email is taken.Please choose a different one.')
class LoginForm(FlaskForm):
email = StringField('Email',validators=[DataRequired()])
password = PasswordField('Password',validators=[DataRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
| <filename>app/auth/forms.py
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, PasswordField, BooleanField
from wtforms.validators import DataRequired, Email, EqualTo, ValidationError
from ..models import User
class RegistrationForm(FlaskForm):
email = StringField('Your Email Address',validators=[DataRequired(),Email()])
username = StringField('Enter your username',validators = [DataRequired()])
password = PasswordField('Password',validators = [DataRequired(), EqualTo('password_confirm',message = 'Passwords must match')])
password_confirm = PasswordField('<PASSWORD>',validators = [DataRequired()])
submit = SubmitField('Sign Up')
def validate_email(self,data_field):
if User.query.filter_by(email =data_field.data).first():
raise ValidationError('That username is taken.Please choose a different one.')
def validate_username(self,data_field):
if User.query.filter_by(username = data_field.data).first():
raise ValidationError('That email is taken.Please choose a different one.')
class LoginForm(FlaskForm):
email = StringField('Email',validators=[DataRequired()])
password = PasswordField('Password',validators=[DataRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
| none | 1 | 3.091697 | 3 |
|
PythonNetwork/venv/Lib/site-packages/scipy/optimize/_linprog.py | Moldovandreii/RepetitionCount | 6,989 | 6632855 | """
A top-level linear programming interface. Currently this interface solves
linear programming problems via the Simplex and Interior-Point methods.
.. versionadded:: 0.15.0
Functions
---------
.. autosummary::
:toctree: generated/
linprog
linprog_verbose_callback
linprog_terse_callback
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .optimize import OptimizeResult
from ._linprog_ip import _linprog_ip
from ._linprog_simplex import _linprog_simplex
from ._linprog_util import (
_parse_linprog, _presolve, _get_Abc, _postprocess
)
__all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback']
__docformat__ = "restructuredtext en"
def linprog_verbose_callback(res):
"""
A sample callback function demonstrating the linprog callback interface.
This callback produces detailed output to sys.stdout before each iteration
and after the final iteration of the simplex algorithm.
Parameters
----------
res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1D array
The independent variable vector which optimizes the linear
programming problem.
fun : float
Value of the objective function.
success : bool
True if the algorithm succeeded in finding an optimal solution.
slack : 1D array
The values of the slack variables. Each slack variable corresponds
to an inequality constraint. If the slack is zero, then the
corresponding constraint is active.
con : 1D array
The (nominally zero) residuals of the equality constraints, that is,
``b - A_eq @ x``
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row
representing an alternate objective function.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
"""
x = res['x']
fun = res['fun']
success = res['success']
phase = res['phase']
status = res['status']
nit = res['nit']
message = res['message']
complete = res['complete']
saved_printoptions = np.get_printoptions()
np.set_printoptions(linewidth=500,
formatter={'float': lambda x: "{0: 12.4f}".format(x)})
if status:
print('--------- Simplex Early Exit -------\n'.format(nit))
print('The simplex method exited early with status {0:d}'.format(status))
print(message)
elif complete:
print('--------- Simplex Complete --------\n')
print('Iterations required: {}'.format(nit))
else:
print('--------- Iteration {0:d} ---------\n'.format(nit))
if nit > 0:
if phase == 1:
print('Current Pseudo-Objective Value:')
else:
print('Current Objective Value:')
print('f = ', fun)
print()
print('Current Solution Vector:')
print('x = ', x)
print()
np.set_printoptions(**saved_printoptions)
def linprog_terse_callback(res):
"""
A sample callback function demonstrating the linprog callback interface.
This callback produces brief output to sys.stdout before each iteration
and after the final iteration of the simplex algorithm.
Parameters
----------
res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1D array
The independent variable vector which optimizes the linear
programming problem.
fun : float
Value of the objective function.
success : bool
True if the algorithm succeeded in finding an optimal solution.
slack : 1D array
The values of the slack variables. Each slack variable corresponds
to an inequality constraint. If the slack is zero, then the
corresponding constraint is active.
con : 1D array
The (nominally zero) residuals of the equality constraints, that is,
``b - A_eq @ x``
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row
representing an alternate objective function.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
"""
nit = res['nit']
x = res['x']
if nit == 0:
print("Iter: X:")
print("{0: <5d} ".format(nit), end="")
print(x)
def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
bounds=None, method='simplex', callback=None,
options=None):
"""
Minimize a linear objective function subject to linear
equality and inequality constraints. Linear Programming is intended to
solve the following problem form:
Minimize::
c @ x
Subject to::
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x <= ub
where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
Parameters
----------
c : 1D array
Coefficients of the linear objective function to be minimized.
A_ub : 2D array, optional
2D array such that ``A_ub @ x`` gives the values of the upper-bound
inequality constraints at ``x``.
b_ub : 1D array, optional
1D array of values representing the upper-bound of each inequality
constraint (row) in ``A_ub``.
A_eq : 2D, optional
2D array such that ``A_eq @ x`` gives the values of the equality
constraints at ``x``.
b_eq : 1D array, optional
1D array of values representing the RHS of each equality constraint
(row) in ``A_eq``.
bounds : sequence, optional
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter. Use None for one of ``min`` or
``max`` when there is no bound in that direction. By default
bounds are ``(0, None)`` (non-negative).
If a sequence containing a single tuple is provided, then ``min`` and
``max`` will be applied to all variables in the problem.
method : str, optional
Type of solver. :ref:`'simplex' <optimize.linprog-simplex>`
and :ref:`'interior-point' <optimize.linprog-interior-point>`
are supported.
callback : callable, optional (simplex only)
If a callback function is provided, it will be called within each
iteration of the simplex algorithm. The callback must require a
`scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1D array
The independent variable vector which optimizes the linear
programming problem.
fun : float
Value of the objective function.
success : bool
True if the algorithm succeeded in finding an optimal solution.
slack : 1D array
The values of the slack variables. Each slack variable
corresponds to an inequality constraint. If the slack is zero,
the corresponding constraint is active.
con : 1D array
The (nominally zero) residuals of the equality constraints
that is, ``b - A_eq @ x``
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row
representing an alternate objective function.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
options : dict, optional
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
For method-specific options, see :func:`show_options('linprog')`.
Returns
-------
res : OptimizeResult
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
x : 1D array
The independent variable vector which optimizes the linear
programming problem.
fun : float
Value of the objective function.
slack : 1D array
The values of the slack variables. Each slack variable
corresponds to an inequality constraint. If the slack is zero,
then the corresponding constraint is active.
con : 1D array
The (nominally zero) residuals of the equality constraints,
that is, ``b - A_eq @ x``
success : bool
Returns True if the algorithm succeeded in finding an optimal
solution.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
See Also
--------
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method
is :ref:`Simplex <optimize.linprog-simplex>`.
:ref:`Interior point <optimize.linprog-interior-point>` is also available.
Method *simplex* uses the simplex algorithm (as it relates to linear
programming, NOT the Nelder-Mead simplex) [1]_, [2]_. This algorithm
should be reasonably reliable and fast for small problems.
.. versionadded:: 0.15.0
Method *interior-point* uses the primal-dual path following algorithm
as outlined in [4]_. This algorithm is intended to provide a faster
and more reliable alternative to *simplex*, especially for large,
sparse problems. Note, however, that the solution returned may be slightly
less accurate than that of the simplex method and may not correspond with a
vertex of the polytope defined by the constraints.
Before applying either method a presolve procedure based on [8]_ attempts to
identify trivial infeasibilities, trivial unboundedness, and potential
problem simplifications. Specifically, it checks for:
- rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints;
- columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained
variables;
- column singletons in ``A_eq``, representing fixed variables; and
- column singletons in ``A_ub``, representing simple bounds.
If presolve reveals that the problem is unbounded (e.g. an unconstrained
and unbounded variable has negative cost) or infeasible (e.g. a row of
zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver
terminates with the appropriate status code. Note that presolve terminates
as soon as any sign of unboundedness is detected; consequently, a problem
may be reported as unbounded when in reality the problem is infeasible
(but infeasibility has not been detected yet). Therefore, if the output
message states that unboundedness is detected in presolve and it is
necessary to know whether the problem is actually infeasible, set option
``presolve=False``.
If neither infeasibility nor unboundedness are detected in a single pass
of the presolve check, bounds are tightened where possible and fixed
variables are removed from the problem. Then, linearly dependent rows
of the ``A_eq`` matrix are removed, (unless they represent an
infeasibility) to avoid numerical difficulties in the primary solve
routine. Note that rows that are nearly linearly dependent (within a
prescribed tolerance) may also be removed, which can change the optimal
solution in rare cases. If this is a concern, eliminate redundancy from
your problem formulation and run with option ``rr=False`` or
``presolve=False``.
Several potential improvements can be made here: additional presolve
checks outlined in [8]_ should be implemented, the presolve routine should
be run multiple times (until no further simplifications can be made), and
more of the efficiency improvements from [5]_ should be implemented in the
redundancy removal routines.
After presolve, the problem is transformed to standard form by converting
the (tightened) simple bounds to upper bound constraints, introducing
non-negative slack variables for inequality constraints, and expressing
unbounded variables as the difference between two non-negative variables.
References
----------
.. [1] Dantzig, <NAME>., Linear programming and extensions. Rand
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
1963
.. [2] <NAME>. and <NAME>. (1995), "Introduction to
Mathematical Programming", McGraw-Hill, Chapter 4.
.. [3] <NAME>. New finite pivoting rules for the simplex method.
Mathematics of Operations Research (2), 1977: pp. 103-107.
.. [4] Andersen, <NAME>., and <NAME>. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
.. [5] <NAME>. "Finding all linearly dependent rows in
large-scale linear programming." Optimization Methods and Software
6.3 (1995): 219-227.
.. [6] <NAME>. "Primal-Dual Interior-Point Methods for Linear
Programming based on Newton's Method." Unpublished Course Notes,
March 2004. Available 2/25/2017 at
https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
.. [7] <NAME>. "Solving Linear Programs by Interior-Point Methods."
Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at
http://www.4er.org/CourseNotes/Book%20B/B-III.pdf
.. [8] Andersen, <NAME>., and <NAME>. "Presolving in linear
programming." Mathematical Programming 71.2 (1995): 221-245.
.. [9] <NAME>, and <NAME>. "Introduction to linear
programming." Athena Scientific 1 (1997): 997.
.. [10] Andersen, <NAME>., et al. Implementation of interior point
methods for large scale linear programming. HEC/Universite de
Geneve, 1996.
Examples
--------
Consider the following problem:
Minimize::
f = -1x[0] + 4x[1]
Subject to::
-3x[0] + 1x[1] <= 6
1x[0] + 2x[1] <= 4
x[1] >= -3
-inf <= x[0] <= inf
This problem deviates from the standard linear programming problem.
In standard form, linear programming problems assume the variables x are
non-negative. Since the problem variables don't have the standard bounds of
``(0, None)``, the variable bounds must be set using ``bounds`` explicitly.
There are two upper-bound constraints, which can be expressed as
dot(A_ub, x) <= b_ub
The input for this problem is as follows:
>>> c = [-1, 4]
>>> A = [[-3, 1], [1, 2]]
>>> b = [6, 4]
>>> x0_bounds = (None, None)
>>> x1_bounds = (-3, None)
>>> from scipy.optimize import linprog
>>> res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds),
... options={"disp": True})
Optimization terminated successfully.
Current function value: -22.000000
Iterations: 5 # may vary
>>> print(res)
con: array([], dtype=float64)
fun: -22.0
message: 'Optimization terminated successfully.'
nit: 5 # may vary
slack: array([39., 0.]) # may vary
status: 0
success: True
x: array([10., -3.])
"""
meth = method.lower()
default_tol = 1e-12 if meth == 'simplex' else 1e-9
c, A_ub, b_ub, A_eq, b_eq, bounds, solver_options = _parse_linprog(
c, A_ub, b_ub, A_eq, b_eq, bounds, options)
tol = solver_options.get('tol', default_tol)
iteration = 0
complete = False # will become True if solved in presolve
undo = []
# Keep the original arrays to calculate slack/residuals for original
# problem.
c_o, A_ub_o, b_ub_o, A_eq_o, b_eq_o = c.copy(
), A_ub.copy(), b_ub.copy(), A_eq.copy(), b_eq.copy()
# Solve trivial problem, eliminate variables, tighten bounds, etc...
c0 = 0 # we might get a constant term in the objective
if solver_options.pop('presolve', True):
rr = solver_options.pop('rr', True)
(c, c0, A_ub, b_ub, A_eq, b_eq, bounds, x, undo, complete, status,
message) = _presolve(c, A_ub, b_ub, A_eq, b_eq, bounds, rr, tol)
if not complete:
A, b, c, c0 = _get_Abc(c, c0, A_ub, b_ub, A_eq, b_eq, bounds, undo)
T_o = (c_o, A_ub_o, b_ub_o, A_eq_o, b_eq_o, bounds, undo)
if meth == 'simplex':
x, status, message, iteration = _linprog_simplex(
c, c0=c0, A=A, b=b, callback=callback, _T_o=T_o, **solver_options)
elif meth == 'interior-point':
x, status, message, iteration = _linprog_ip(
c, c0=c0, A=A, b=b, callback=callback, **solver_options)
else:
raise ValueError('Unknown solver %s' % method)
# Eliminate artificial variables, re-introduce presolved variables, etc...
# need modified bounds here to translate variables appropriately
disp = solver_options.get('disp', False)
x, fun, slack, con, status, message = _postprocess(
x, c_o, A_ub_o, b_ub_o, A_eq_o, b_eq_o, bounds,
complete, undo, status, message, tol, iteration, disp)
sol = {
'x': x,
'fun': fun,
'slack': slack,
'con': con,
'status': status,
'message': message,
'nit': iteration,
'success': status == 0}
return OptimizeResult(sol)
| """
A top-level linear programming interface. Currently this interface solves
linear programming problems via the Simplex and Interior-Point methods.
.. versionadded:: 0.15.0
Functions
---------
.. autosummary::
:toctree: generated/
linprog
linprog_verbose_callback
linprog_terse_callback
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .optimize import OptimizeResult
from ._linprog_ip import _linprog_ip
from ._linprog_simplex import _linprog_simplex
from ._linprog_util import (
_parse_linprog, _presolve, _get_Abc, _postprocess
)
__all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback']
__docformat__ = "restructuredtext en"
def linprog_verbose_callback(res):
"""
A sample callback function demonstrating the linprog callback interface.
This callback produces detailed output to sys.stdout before each iteration
and after the final iteration of the simplex algorithm.
Parameters
----------
res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1D array
The independent variable vector which optimizes the linear
programming problem.
fun : float
Value of the objective function.
success : bool
True if the algorithm succeeded in finding an optimal solution.
slack : 1D array
The values of the slack variables. Each slack variable corresponds
to an inequality constraint. If the slack is zero, then the
corresponding constraint is active.
con : 1D array
The (nominally zero) residuals of the equality constraints, that is,
``b - A_eq @ x``
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row
representing an alternate objective function.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
"""
x = res['x']
fun = res['fun']
success = res['success']
phase = res['phase']
status = res['status']
nit = res['nit']
message = res['message']
complete = res['complete']
saved_printoptions = np.get_printoptions()
np.set_printoptions(linewidth=500,
formatter={'float': lambda x: "{0: 12.4f}".format(x)})
if status:
print('--------- Simplex Early Exit -------\n'.format(nit))
print('The simplex method exited early with status {0:d}'.format(status))
print(message)
elif complete:
print('--------- Simplex Complete --------\n')
print('Iterations required: {}'.format(nit))
else:
print('--------- Iteration {0:d} ---------\n'.format(nit))
if nit > 0:
if phase == 1:
print('Current Pseudo-Objective Value:')
else:
print('Current Objective Value:')
print('f = ', fun)
print()
print('Current Solution Vector:')
print('x = ', x)
print()
np.set_printoptions(**saved_printoptions)
def linprog_terse_callback(res):
"""
A sample callback function demonstrating the linprog callback interface.
This callback produces brief output to sys.stdout before each iteration
and after the final iteration of the simplex algorithm.
Parameters
----------
res : A `scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1D array
The independent variable vector which optimizes the linear
programming problem.
fun : float
Value of the objective function.
success : bool
True if the algorithm succeeded in finding an optimal solution.
slack : 1D array
The values of the slack variables. Each slack variable corresponds
to an inequality constraint. If the slack is zero, then the
corresponding constraint is active.
con : 1D array
The (nominally zero) residuals of the equality constraints, that is,
``b - A_eq @ x``
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row
representing an alternate objective function.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
"""
nit = res['nit']
x = res['x']
if nit == 0:
print("Iter: X:")
print("{0: <5d} ".format(nit), end="")
print(x)
def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
bounds=None, method='simplex', callback=None,
options=None):
"""
Minimize a linear objective function subject to linear
equality and inequality constraints. Linear Programming is intended to
solve the following problem form:
Minimize::
c @ x
Subject to::
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x <= ub
where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
Parameters
----------
c : 1D array
Coefficients of the linear objective function to be minimized.
A_ub : 2D array, optional
2D array such that ``A_ub @ x`` gives the values of the upper-bound
inequality constraints at ``x``.
b_ub : 1D array, optional
1D array of values representing the upper-bound of each inequality
constraint (row) in ``A_ub``.
A_eq : 2D, optional
2D array such that ``A_eq @ x`` gives the values of the equality
constraints at ``x``.
b_eq : 1D array, optional
1D array of values representing the RHS of each equality constraint
(row) in ``A_eq``.
bounds : sequence, optional
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter. Use None for one of ``min`` or
``max`` when there is no bound in that direction. By default
bounds are ``(0, None)`` (non-negative).
If a sequence containing a single tuple is provided, then ``min`` and
``max`` will be applied to all variables in the problem.
method : str, optional
Type of solver. :ref:`'simplex' <optimize.linprog-simplex>`
and :ref:`'interior-point' <optimize.linprog-interior-point>`
are supported.
callback : callable, optional (simplex only)
If a callback function is provided, it will be called within each
iteration of the simplex algorithm. The callback must require a
`scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1D array
The independent variable vector which optimizes the linear
programming problem.
fun : float
Value of the objective function.
success : bool
True if the algorithm succeeded in finding an optimal solution.
slack : 1D array
The values of the slack variables. Each slack variable
corresponds to an inequality constraint. If the slack is zero,
the corresponding constraint is active.
con : 1D array
The (nominally zero) residuals of the equality constraints
that is, ``b - A_eq @ x``
phase : int
The phase of the optimization being executed. In phase 1 a basic
feasible solution is sought and the T has an additional row
representing an alternate objective function.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
options : dict, optional
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
For method-specific options, see :func:`show_options('linprog')`.
Returns
-------
res : OptimizeResult
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
x : 1D array
The independent variable vector which optimizes the linear
programming problem.
fun : float
Value of the objective function.
slack : 1D array
The values of the slack variables. Each slack variable
corresponds to an inequality constraint. If the slack is zero,
then the corresponding constraint is active.
con : 1D array
The (nominally zero) residuals of the equality constraints,
that is, ``b - A_eq @ x``
success : bool
Returns True if the algorithm succeeded in finding an optimal
solution.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
See Also
--------
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method
is :ref:`Simplex <optimize.linprog-simplex>`.
:ref:`Interior point <optimize.linprog-interior-point>` is also available.
Method *simplex* uses the simplex algorithm (as it relates to linear
programming, NOT the Nelder-Mead simplex) [1]_, [2]_. This algorithm
should be reasonably reliable and fast for small problems.
.. versionadded:: 0.15.0
Method *interior-point* uses the primal-dual path following algorithm
as outlined in [4]_. This algorithm is intended to provide a faster
and more reliable alternative to *simplex*, especially for large,
sparse problems. Note, however, that the solution returned may be slightly
less accurate than that of the simplex method and may not correspond with a
vertex of the polytope defined by the constraints.
Before applying either method a presolve procedure based on [8]_ attempts to
identify trivial infeasibilities, trivial unboundedness, and potential
problem simplifications. Specifically, it checks for:
- rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints;
- columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained
variables;
- column singletons in ``A_eq``, representing fixed variables; and
- column singletons in ``A_ub``, representing simple bounds.
If presolve reveals that the problem is unbounded (e.g. an unconstrained
and unbounded variable has negative cost) or infeasible (e.g. a row of
zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver
terminates with the appropriate status code. Note that presolve terminates
as soon as any sign of unboundedness is detected; consequently, a problem
may be reported as unbounded when in reality the problem is infeasible
(but infeasibility has not been detected yet). Therefore, if the output
message states that unboundedness is detected in presolve and it is
necessary to know whether the problem is actually infeasible, set option
``presolve=False``.
If neither infeasibility nor unboundedness are detected in a single pass
of the presolve check, bounds are tightened where possible and fixed
variables are removed from the problem. Then, linearly dependent rows
of the ``A_eq`` matrix are removed, (unless they represent an
infeasibility) to avoid numerical difficulties in the primary solve
routine. Note that rows that are nearly linearly dependent (within a
prescribed tolerance) may also be removed, which can change the optimal
solution in rare cases. If this is a concern, eliminate redundancy from
your problem formulation and run with option ``rr=False`` or
``presolve=False``.
Several potential improvements can be made here: additional presolve
checks outlined in [8]_ should be implemented, the presolve routine should
be run multiple times (until no further simplifications can be made), and
more of the efficiency improvements from [5]_ should be implemented in the
redundancy removal routines.
After presolve, the problem is transformed to standard form by converting
the (tightened) simple bounds to upper bound constraints, introducing
non-negative slack variables for inequality constraints, and expressing
unbounded variables as the difference between two non-negative variables.
References
----------
.. [1] Dantzig, <NAME>., Linear programming and extensions. Rand
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
1963
.. [2] <NAME>. and <NAME>. (1995), "Introduction to
Mathematical Programming", McGraw-Hill, Chapter 4.
.. [3] <NAME>. New finite pivoting rules for the simplex method.
Mathematics of Operations Research (2), 1977: pp. 103-107.
.. [4] Andersen, <NAME>., and <NAME>. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
.. [5] <NAME>. "Finding all linearly dependent rows in
large-scale linear programming." Optimization Methods and Software
6.3 (1995): 219-227.
.. [6] <NAME>. "Primal-Dual Interior-Point Methods for Linear
Programming based on Newton's Method." Unpublished Course Notes,
March 2004. Available 2/25/2017 at
https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
.. [7] <NAME>. "Solving Linear Programs by Interior-Point Methods."
Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at
http://www.4er.org/CourseNotes/Book%20B/B-III.pdf
.. [8] Andersen, <NAME>., and <NAME>. "Presolving in linear
programming." Mathematical Programming 71.2 (1995): 221-245.
.. [9] <NAME>, and <NAME>. "Introduction to linear
programming." Athena Scientific 1 (1997): 997.
.. [10] Andersen, <NAME>., et al. Implementation of interior point
methods for large scale linear programming. HEC/Universite de
Geneve, 1996.
Examples
--------
Consider the following problem:
Minimize::
f = -1x[0] + 4x[1]
Subject to::
-3x[0] + 1x[1] <= 6
1x[0] + 2x[1] <= 4
x[1] >= -3
-inf <= x[0] <= inf
This problem deviates from the standard linear programming problem.
In standard form, linear programming problems assume the variables x are
non-negative. Since the problem variables don't have the standard bounds of
``(0, None)``, the variable bounds must be set using ``bounds`` explicitly.
There are two upper-bound constraints, which can be expressed as
dot(A_ub, x) <= b_ub
The input for this problem is as follows:
>>> c = [-1, 4]
>>> A = [[-3, 1], [1, 2]]
>>> b = [6, 4]
>>> x0_bounds = (None, None)
>>> x1_bounds = (-3, None)
>>> from scipy.optimize import linprog
>>> res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds),
... options={"disp": True})
Optimization terminated successfully.
Current function value: -22.000000
Iterations: 5 # may vary
>>> print(res)
con: array([], dtype=float64)
fun: -22.0
message: 'Optimization terminated successfully.'
nit: 5 # may vary
slack: array([39., 0.]) # may vary
status: 0
success: True
x: array([10., -3.])
"""
meth = method.lower()
default_tol = 1e-12 if meth == 'simplex' else 1e-9
c, A_ub, b_ub, A_eq, b_eq, bounds, solver_options = _parse_linprog(
c, A_ub, b_ub, A_eq, b_eq, bounds, options)
tol = solver_options.get('tol', default_tol)
iteration = 0
complete = False # will become True if solved in presolve
undo = []
# Keep the original arrays to calculate slack/residuals for original
# problem.
c_o, A_ub_o, b_ub_o, A_eq_o, b_eq_o = c.copy(
), A_ub.copy(), b_ub.copy(), A_eq.copy(), b_eq.copy()
# Solve trivial problem, eliminate variables, tighten bounds, etc...
c0 = 0 # we might get a constant term in the objective
if solver_options.pop('presolve', True):
rr = solver_options.pop('rr', True)
(c, c0, A_ub, b_ub, A_eq, b_eq, bounds, x, undo, complete, status,
message) = _presolve(c, A_ub, b_ub, A_eq, b_eq, bounds, rr, tol)
if not complete:
A, b, c, c0 = _get_Abc(c, c0, A_ub, b_ub, A_eq, b_eq, bounds, undo)
T_o = (c_o, A_ub_o, b_ub_o, A_eq_o, b_eq_o, bounds, undo)
if meth == 'simplex':
x, status, message, iteration = _linprog_simplex(
c, c0=c0, A=A, b=b, callback=callback, _T_o=T_o, **solver_options)
elif meth == 'interior-point':
x, status, message, iteration = _linprog_ip(
c, c0=c0, A=A, b=b, callback=callback, **solver_options)
else:
raise ValueError('Unknown solver %s' % method)
# Eliminate artificial variables, re-introduce presolved variables, etc...
# need modified bounds here to translate variables appropriately
disp = solver_options.get('disp', False)
x, fun, slack, con, status, message = _postprocess(
x, c_o, A_ub_o, b_ub_o, A_eq_o, b_eq_o, bounds,
complete, undo, status, message, tol, iteration, disp)
sol = {
'x': x,
'fun': fun,
'slack': slack,
'con': con,
'status': status,
'message': message,
'nit': iteration,
'success': status == 0}
return OptimizeResult(sol)
| en | 0.783569 | A top-level linear programming interface. Currently this interface solves linear programming problems via the Simplex and Interior-Point methods. .. versionadded:: 0.15.0 Functions --------- .. autosummary:: :toctree: generated/ linprog linprog_verbose_callback linprog_terse_callback A sample callback function demonstrating the linprog callback interface. This callback produces detailed output to sys.stdout before each iteration and after the final iteration of the simplex algorithm. Parameters ---------- res : A `scipy.optimize.OptimizeResult` consisting of the following fields: x : 1D array The independent variable vector which optimizes the linear programming problem. fun : float Value of the objective function. success : bool True if the algorithm succeeded in finding an optimal solution. slack : 1D array The values of the slack variables. Each slack variable corresponds to an inequality constraint. If the slack is zero, then the corresponding constraint is active. con : 1D array The (nominally zero) residuals of the equality constraints, that is, ``b - A_eq @ x`` phase : int The phase of the optimization being executed. In phase 1 a basic feasible solution is sought and the T has an additional row representing an alternate objective function. status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered nit : int The number of iterations performed. message : str A string descriptor of the exit status of the optimization. A sample callback function demonstrating the linprog callback interface. This callback produces brief output to sys.stdout before each iteration and after the final iteration of the simplex algorithm. Parameters ---------- res : A `scipy.optimize.OptimizeResult` consisting of the following fields: x : 1D array The independent variable vector which optimizes the linear programming problem. fun : float Value of the objective function. success : bool True if the algorithm succeeded in finding an optimal solution. slack : 1D array The values of the slack variables. Each slack variable corresponds to an inequality constraint. If the slack is zero, then the corresponding constraint is active. con : 1D array The (nominally zero) residuals of the equality constraints, that is, ``b - A_eq @ x`` phase : int The phase of the optimization being executed. In phase 1 a basic feasible solution is sought and the T has an additional row representing an alternate objective function. status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered nit : int The number of iterations performed. message : str A string descriptor of the exit status of the optimization. Minimize a linear objective function subject to linear equality and inequality constraints. Linear Programming is intended to solve the following problem form: Minimize:: c @ x Subject to:: A_ub @ x <= b_ub A_eq @ x == b_eq lb <= x <= ub where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. Parameters ---------- c : 1D array Coefficients of the linear objective function to be minimized. A_ub : 2D array, optional 2D array such that ``A_ub @ x`` gives the values of the upper-bound inequality constraints at ``x``. b_ub : 1D array, optional 1D array of values representing the upper-bound of each inequality constraint (row) in ``A_ub``. A_eq : 2D, optional 2D array such that ``A_eq @ x`` gives the values of the equality constraints at ``x``. b_eq : 1D array, optional 1D array of values representing the RHS of each equality constraint (row) in ``A_eq``. bounds : sequence, optional ``(min, max)`` pairs for each element in ``x``, defining the bounds on that parameter. Use None for one of ``min`` or ``max`` when there is no bound in that direction. By default bounds are ``(0, None)`` (non-negative). If a sequence containing a single tuple is provided, then ``min`` and ``max`` will be applied to all variables in the problem. method : str, optional Type of solver. :ref:`'simplex' <optimize.linprog-simplex>` and :ref:`'interior-point' <optimize.linprog-interior-point>` are supported. callback : callable, optional (simplex only) If a callback function is provided, it will be called within each iteration of the simplex algorithm. The callback must require a `scipy.optimize.OptimizeResult` consisting of the following fields: x : 1D array The independent variable vector which optimizes the linear programming problem. fun : float Value of the objective function. success : bool True if the algorithm succeeded in finding an optimal solution. slack : 1D array The values of the slack variables. Each slack variable corresponds to an inequality constraint. If the slack is zero, the corresponding constraint is active. con : 1D array The (nominally zero) residuals of the equality constraints that is, ``b - A_eq @ x`` phase : int The phase of the optimization being executed. In phase 1 a basic feasible solution is sought and the T has an additional row representing an alternate objective function. status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered nit : int The number of iterations performed. message : str A string descriptor of the exit status of the optimization. options : dict, optional A dictionary of solver options. All methods accept the following generic options: maxiter : int Maximum number of iterations to perform. disp : bool Set to True to print convergence messages. For method-specific options, see :func:`show_options('linprog')`. Returns ------- res : OptimizeResult A :class:`scipy.optimize.OptimizeResult` consisting of the fields: x : 1D array The independent variable vector which optimizes the linear programming problem. fun : float Value of the objective function. slack : 1D array The values of the slack variables. Each slack variable corresponds to an inequality constraint. If the slack is zero, then the corresponding constraint is active. con : 1D array The (nominally zero) residuals of the equality constraints, that is, ``b - A_eq @ x`` success : bool Returns True if the algorithm succeeded in finding an optimal solution. status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered nit : int The number of iterations performed. message : str A string descriptor of the exit status of the optimization. See Also -------- show_options : Additional options accepted by the solvers Notes ----- This section describes the available solvers that can be selected by the 'method' parameter. The default method is :ref:`Simplex <optimize.linprog-simplex>`. :ref:`Interior point <optimize.linprog-interior-point>` is also available. Method *simplex* uses the simplex algorithm (as it relates to linear programming, NOT the Nelder-Mead simplex) [1]_, [2]_. This algorithm should be reasonably reliable and fast for small problems. .. versionadded:: 0.15.0 Method *interior-point* uses the primal-dual path following algorithm as outlined in [4]_. This algorithm is intended to provide a faster and more reliable alternative to *simplex*, especially for large, sparse problems. Note, however, that the solution returned may be slightly less accurate than that of the simplex method and may not correspond with a vertex of the polytope defined by the constraints. Before applying either method a presolve procedure based on [8]_ attempts to identify trivial infeasibilities, trivial unboundedness, and potential problem simplifications. Specifically, it checks for: - rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints; - columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained variables; - column singletons in ``A_eq``, representing fixed variables; and - column singletons in ``A_ub``, representing simple bounds. If presolve reveals that the problem is unbounded (e.g. an unconstrained and unbounded variable has negative cost) or infeasible (e.g. a row of zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver terminates with the appropriate status code. Note that presolve terminates as soon as any sign of unboundedness is detected; consequently, a problem may be reported as unbounded when in reality the problem is infeasible (but infeasibility has not been detected yet). Therefore, if the output message states that unboundedness is detected in presolve and it is necessary to know whether the problem is actually infeasible, set option ``presolve=False``. If neither infeasibility nor unboundedness are detected in a single pass of the presolve check, bounds are tightened where possible and fixed variables are removed from the problem. Then, linearly dependent rows of the ``A_eq`` matrix are removed, (unless they represent an infeasibility) to avoid numerical difficulties in the primary solve routine. Note that rows that are nearly linearly dependent (within a prescribed tolerance) may also be removed, which can change the optimal solution in rare cases. If this is a concern, eliminate redundancy from your problem formulation and run with option ``rr=False`` or ``presolve=False``. Several potential improvements can be made here: additional presolve checks outlined in [8]_ should be implemented, the presolve routine should be run multiple times (until no further simplifications can be made), and more of the efficiency improvements from [5]_ should be implemented in the redundancy removal routines. After presolve, the problem is transformed to standard form by converting the (tightened) simple bounds to upper bound constraints, introducing non-negative slack variables for inequality constraints, and expressing unbounded variables as the difference between two non-negative variables. References ---------- .. [1] Dantzig, <NAME>., Linear programming and extensions. Rand Corporation Research Study Princeton Univ. Press, Princeton, NJ, 1963 .. [2] <NAME>. and <NAME>. (1995), "Introduction to Mathematical Programming", McGraw-Hill, Chapter 4. .. [3] <NAME>. New finite pivoting rules for the simplex method. Mathematics of Operations Research (2), 1977: pp. 103-107. .. [4] Andersen, <NAME>., and <NAME>. "The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm." High performance optimization. Springer US, 2000. 197-232. .. [5] <NAME>. "Finding all linearly dependent rows in large-scale linear programming." Optimization Methods and Software 6.3 (1995): 219-227. .. [6] <NAME>. "Primal-Dual Interior-Point Methods for Linear Programming based on Newton's Method." Unpublished Course Notes, March 2004. Available 2/25/2017 at https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf .. [7] <NAME>. "Solving Linear Programs by Interior-Point Methods." Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at http://www.4er.org/CourseNotes/Book%20B/B-III.pdf .. [8] Andersen, <NAME>., and <NAME>. "Presolving in linear programming." Mathematical Programming 71.2 (1995): 221-245. .. [9] <NAME>, and <NAME>. "Introduction to linear programming." Athena Scientific 1 (1997): 997. .. [10] Andersen, <NAME>., et al. Implementation of interior point methods for large scale linear programming. HEC/Universite de Geneve, 1996. Examples -------- Consider the following problem: Minimize:: f = -1x[0] + 4x[1] Subject to:: -3x[0] + 1x[1] <= 6 1x[0] + 2x[1] <= 4 x[1] >= -3 -inf <= x[0] <= inf This problem deviates from the standard linear programming problem. In standard form, linear programming problems assume the variables x are non-negative. Since the problem variables don't have the standard bounds of ``(0, None)``, the variable bounds must be set using ``bounds`` explicitly. There are two upper-bound constraints, which can be expressed as dot(A_ub, x) <= b_ub The input for this problem is as follows: >>> c = [-1, 4] >>> A = [[-3, 1], [1, 2]] >>> b = [6, 4] >>> x0_bounds = (None, None) >>> x1_bounds = (-3, None) >>> from scipy.optimize import linprog >>> res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds), ... options={"disp": True}) Optimization terminated successfully. Current function value: -22.000000 Iterations: 5 # may vary >>> print(res) con: array([], dtype=float64) fun: -22.0 message: 'Optimization terminated successfully.' nit: 5 # may vary slack: array([39., 0.]) # may vary status: 0 success: True x: array([10., -3.]) # will become True if solved in presolve # Keep the original arrays to calculate slack/residuals for original # problem. # Solve trivial problem, eliminate variables, tighten bounds, etc... # we might get a constant term in the objective # Eliminate artificial variables, re-introduce presolved variables, etc... # need modified bounds here to translate variables appropriately | 2.959492 | 3 |
camcalib/gazeNets.py | ostapstephan/SeniorProject | 1 | 6632856 | #!/usr/bin/env python
import cv2
import os
import subprocess as sp
import sys
import numpy as np
import time
import pickle as pk
# import datetime
from matrix import get_pupil_transformation_matrix
from threading import Thread
sys.path.append(os.path.abspath('../../TEST'))
sys.path.append(os.path.abspath('../../TEST/shared_modules'))
from pupil_detectors import Detector_3D
from methods import Roi
sys.path.append(os.path.abspath('../'))
# from calibrateHaar import calibrate
# from pbcvt import findPupilEllipse
# from params import pupil_tracker_params
from cameras import cam0mat as cameraMatrix0
from cameras import cam0dcoef as distCoeffs0
from cameras import cam1mat as cameraMatrix1
from cameras import cam1dcoef as distCoeffs1
cameraMatrix0 = np.array(cameraMatrix0)
distCoeffs0 = np.array(distCoeffs0)
cameraMatrix1 = np.array(cameraMatrix1)
distCoeffs1 = np.array(distCoeffs1)
# from cameras import cam1mat as cameraMatrix1
# from cameras import cam1dcoef as distCoeffs1
TIMEOUT = 10000
FFMPEG_BIN = "ffmpeg"
from sklearn.model_selection import train_test_split
import sklearn.linear_model
import sklearn.utils
from sklearn.preprocessing import PolynomialFeatures
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.multioutput import MultiOutputRegressor
import xgboost as xgb
'''
This code will be able to open fast and low latency streams
and capture and save photos from webcams and network raspberry pi's
The Readme.txt in this dir will help with debugging
'''
class WebcamVideoStream:
def __init__(self, src=None, fifo=None):
# initialize the video camera stream and read the first frame
# from the stream
# self.stream = cv2.VideoCapture(src)
# (self.grabbed, self.frame) = self.stream.read()
###
if fifo == 'fifo0':
self.height = 640
self.width = 480
elif fifo == 'fifo1':
self.height = 480
self.width = 640
else:
print('error please specify what camera type ')
raise (Exception)
if not fifo:
fifo = 'fifo0'
print("no input using fifo0")
print("about to init command")
command = [
FFMPEG_BIN,
'-i',
fifo,
'-pix_fmt',
'bgr24', # opencv requires bgr24 pixel format.
'-vcodec',
'rawvideo',
'-an',
'-sn',
'-f',
'image2pipe',
'-'
] # '-framerate', '100',
print("about to sp.popen")
self.pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=1024)
print("about read first frame")
try:
raw_image = self.pipe.stdout.read(self.height * self.width * 3)
self.image = np.fromstring(
raw_image, dtype='uint8'
).reshape((self.height, self.width, 3))
except Exception:
self.image = np.zeros((self.height, self.width, 3))
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
print("starting thread")
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
print("starting while true loop")
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
self.pipe.kill()
return
raw_image = self.pipe.stdout.read(self.height * self.width * 3)
self.image = np.fromstring(
raw_image, dtype='uint8'
).reshape((self.height, self.width, 3))
self.pipe.stdout.flush()
# otherwise, read the next frame from the stream
# (self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.image
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
markdict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250)
arucoParams = cv2.aruco.DetectorParameters_create()
arucoParams.adaptiveThreshConstant = 10
def getNewArucoImg():
markerSize = 93
outimg = cv2.aruco.drawMarker(markdict, 5, markerSize)
height = 1050
width = 1680
bigPic = np.ones((height,width))
#random offset
yo = np.random.randint(0,width-markerSize)
xo = np.random.randint(0,height-markerSize)
bigPic[xo:xo+markerSize,yo:yo+markerSize] = outimg
return bigPic, (xo+markerSize/2,yo+markerSize/2)
def drawArucoImg(xo,yo):
markerSize = 93
outimg = cv2.aruco.drawMarker(markdict, 5, markerSize)
height = 1050
width = 1680
bigPic = np.ones((height,width))
xo = int(xo)
yo = int(yo)
bigPic[xo:xo+markerSize,yo:yo+markerSize] = outimg
return bigPic
def draw_ellipse(
img,
center,
axes,
angle,
startAngle,
endAngle,
color,
thickness=3,
lineType=cv2.LINE_AA,
shift=10):
center = (int(round(center[0] * 2**shift)), int(round(center[1] * 2**shift)))
axes = (int(round(axes[0] * 2**shift)), int(round(axes[1] * 2**shift)))
cv2.ellipse(
img,
center,
axes,
angle,
startAngle,
endAngle,
color,
thickness,
lineType,
shift,
)
class Frame(object):
def __init__(self, camType):
if camType == 0:
self.height = 640
self.width = 480
elif camType == 1:
self.height = 480
self.width = 640
self.gray = np.zeros((self.height, self.width))
self.img = np.zeros((self.height, self.width, 3))
self.timestamp = time.time()
def solveperp(objectPoints, imagePoints, cameraMatrix, distCoeffs, method):
if method == 1:
return cv2.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs)
elif method == 2:
return cv2.solvePnPRansac(
objectPoints, imagePoints, cameraMatrix, distCoeffs
)
else:
return cv2.solveP3P(objectPoints, imagePoints, cameraMatrix, distCoeffs)
def draw_gaze(img, start, end, H, K, dist):
# unit is mm
try:
rvec, _ = cv2.Rodrigues(H[:3,:3])
tvec = H[:3,3]
points = np.float32([
start,
end,
]).reshape(-1, 3)
axisPoints, _ = cv2.projectPoints(points, rvec, tvec, K, dist)
img = cv2.arrowedLine(
img, tuple(axisPoints[0].ravel()), tuple(axisPoints[1].ravel()),
(0, 255, 0), 3
)
except OverflowError:
pass
return img
def draw_plane(img, corners, H, K, dist):
# unit is mm
try:
rvec, _ = cv2.Rodrigues(H[:3,:3])
tvec = H[:3,3]
points = np.float32(corners).reshape(-1, 3)
axisPoints, _ = cv2.projectPoints(points, rvec, tvec, K, dist)
img = cv2.arrowedLine(
img, tuple(axisPoints[0].ravel()), tuple(axisPoints[1].ravel()),
(0, 0, 255), 3
)
img = cv2.arrowedLine(
img, tuple(axisPoints[1].ravel()), tuple(axisPoints[2].ravel()),
(255, 0, 0), 3
)
img = cv2.arrowedLine(
img, tuple(axisPoints[2].ravel()), tuple(axisPoints[3].ravel()),
(255, 0, 0), 3
)
img = cv2.arrowedLine(
img, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()),
(255, 0, 0), 3
)
except OverflowError:
pass
return img
def useHomog(plane,pupil,sphere,R,t):
H = np.eye(4)
H[:3,:3] = cv2.Rodrigues(R)[0]
H[:3,3]= t
sphere2 = H[:3,:3] @ sphere + H[:3,3] # THESE TWO LINES
pupil2 = H[:3,:3] @ pupil + H[:3,3] # THATS THIS ONE TOO
gaze = pupil2-sphere2
gazeEnd = lineIntersection(plane[0],np.cross(plane[1]-plane[0],plane[2]-plane[1]), pupil2, gaze)
return gazeEnd #predi
def lineIntersection(planePoint, planeNormal, linePoint, lineDirection): #THIS FUNCTION
if np.dot(planeNormal,lineDirection) == 0:
return planePoint
t = (np.dot(planeNormal,planePoint) - np.dot(planeNormal,linePoint)) / np.dot(planeNormal,lineDirection)
return linePoint + t*lineDirection;
# class Roi(object):
# """this is a simple 2D Region of Interest class
# it is applied on numpy arrays for convenient slicing
# like this:
# roi_array_slice = full_array[r.view]
# # do something with roi_array_slice
# this creates a view, no data copying done
# """
# def __init__(self, array_shape):
# self.array_shape = array_shape
# self.lX = 0
# self.lY = 0
# self.uX = array_shape[1]
# self.uY = array_shape[0]
# self.nX = 0
# self.nY = 0
# open a named pipe for each pi and start listening
pipeinit0 = sp.Popen(['./r0.sh'], stdout=sp.PIPE)
pipeinit1 = sp.Popen(['./r1.sh'], stdout=sp.PIPE)
# start streaming from the pi to this computer
sshPi0 = sp.Popen(['ssh', 'pi@10.0.0.3', '-p', '6622', '~/stream.sh'], stdout=sp.PIPE)
vs0 = WebcamVideoStream(fifo="fifo0").start()
print()
print()
print('Fifo 0 started')
print()
print()
sshPi1 = sp.Popen(['ssh', 'pi@10.0.0.5', '~/stream.sh'], stdout=sp.PIPE)
vs1 = WebcamVideoStream(fifo="fifo1").start()
print()
print()
print('Fifo 1 started')
print()
print()
# i = 0
# j = 0
frame = Frame(0)
roi = Roi(frame.img.shape)
cv2.namedWindow('Video0')
cv2.namedWindow('Video1')
cv2.namedWindow('aruco')
vout0 = None
vout1 = None
if len(sys.argv) > 1:
fourcc = cv2.VideoWriter_fourcc(*'x264')
vout0 = cv2.VideoWriter(sys.argv[1]+'0.mp4', fourcc, 24.0, (frame.img.shape[1], frame.img.shape[0]))
vout1 = cv2.VideoWriter(sys.argv[2]+'1.mp4', fourcc, 24.0, (frame.img.shape[0], frame.img.shape[1]))
## ACTUAL STUFF BELOW
pupil_detector = Detector_3D()
pupil_detector.set_2d_detector_property('pupil_size_max', 150)
# pupil_detector.set_2d_detector_property('pupil_size_min', 10)
# pupil_detector.set_2d_detector_property('ellipse_roundness_ratio', 0.1)
# pupil_detector.set_2d_detector_property('coarse_filter_max', 240)
# pupil_detector.set_2d_detector_property('intensity_range', 30)
# pupil_detector.set_2d_detector_property('canny_treshold', 200)
# pupil_detector.set_2d_detector_property('canny_ration', 3)
# pupil_detector.set_2d_detector_property('support_pixel_ratio_exponent', 3.0)
# pupil_detector.set_2d_detector_property('initial_ellipse_fit_treshhold', 1.5)
'''
'coarse_detection': True,
'coarse_filter_min': 128,
'coarse_filter_max': 280,
'intensity_range': 23,
'blur_size': 5,
'canny_treshold': 160,
'canny_ration': 2,
'canny_aperture': 5,
'pupil_size_max': 100,
'pupil_size_min': 10,
'strong_perimeter_ratio_range_min': 0.8,
'strong_perimeter_ratio_range_max': 1.1,
'strong_area_ratio_range_min': 0.6,
'strong_area_ratio_range_max': 1.1,
'contour_size_min': 5,
'ellipse_roundness_ratio': 0.1,
'initial_ellipse_fit_treshhold': 1.8,
'final_perimeter_ratio_range_min': 0.6,
'final_perimeter_ratio_range_max': 1.2,
'ellipse_true_support_min_dist': 2.5,
'support_pixel_ratio_exponent': 2.0
'''
objPoints = np.array(
[(0, 0, 0), (536.575, 0, 0), (536.575, -361.95, 0), (0, -361.95, 0)]
)
UNITS_E = 1 # mm per box
UNITS_W = 14 # mm per box
# Hoff = np.eye(4)
# Hoff[:3,3] = np.array([-0.64, -1.28, 0.0])
# HoffW = np.eye(4)
# HoffW[:3,3] = np.array([0.0,0.0,0.0])
Hoff = np.eye(4)
Hoff[:3, 3] = np.array([-1.06, -1.28, 0.0])
HoffW = np.eye(4)
HoffW[:3, 3] = np.array([-168.0, -100.0, -235.0])
HEW = np.eye(4)
# R = np.array([78.69,90.0,180+39.67])
R = np.array([-14.0,40.0,143]) # ********** DONT DELETE
HEW[:3,:3] = cv2.Rodrigues(R)[0]
HEW[:3,3] = np.array([-58.58,-18.19,32.47])
# H90 = np.eye(4)
# H90[:3,:3] = cv2.Rodrigues(np.array([0.0,0.0,0.0]))[0]
# Z = 1000
# HEATMAP
def gkern(kernlen, sigma):
# First a 1-D Gaussian
lim = kernlen // 2 + (kernlen % 2) / 2
t = np.linspace(-lim, lim, kernlen)
bump = np.exp(-0.25 * (t / sigma)**2)
bump /= np.trapz(bump) # normalize the integral to 1
# make a 2-D kernel out of it
return bump[:, np.newaxis] * bump[np.newaxis, :]
def convertFeat(f):
# print(f)
feat = np.zeros((1, 26 ))
feat[0][:12] = f['corners'].flatten()
feat[0][12:15] = np.array(f['eyeCenter']['center'])
feat[0][15] = f['eyeCenter']['radius']
feat[0][16:19] = np.array(f['pupilCenter']['center'])
feat[0][19:22] = np.array(f['pupilCenter']['normal'])
feat[0][22] = f['pupilCenter']['radius']
plane = f['corners']
pupil = f['pupilCenter']['center']
sphere= f['eyeCenter']['center']
t = np.array([-168.0, -100.0, -235.0])
R = np.array([-14.0,40.0,143])
feat[0][23:] = useHomog(plane,pupil,sphere,R,t)
return feat
radius = 200
sigma = 30
gain = 500
decay = 1.007
mask = gkern(2 * radius + 1, sigma) * gain
img0 = np.zeros((1050, 1680, 3))
img1 = np.zeros((1050, 1680, 3))
cv2.namedWindow('heatmap')
curpos = [int(img1.shape[0] / 2), int(img1.shape[1] / 2)]
# aruco
rvecM = [0.0,0.0,0.0]
tvecM = [0.0,0.0,0.0]
plane = None
aflag = False
########################################
# open and unpickle
with open ("training/databig.pickle", 'rb') as handle:
Data = pk.load(handle)
feat = np.zeros((len(Data), 26 ))
lab = np.zeros((len(Data), 2 ))
ones = np.ones((len(Data),1))
for i in range(len(Data)):
feat[i][:12] = Data[i][0]['corners'].flatten()
feat[i][12:15] = np.array(Data[i][0]['eyeCenter']['center'])
feat[i][15] = Data[i][0]['eyeCenter']['radius']
feat[i][16:19] = np.array(Data[i][0]['pupilCenter']['center'])
feat[i][19:22] = np.array(Data[i][0]['pupilCenter']['normal'])
feat[i][22] = Data[i][0]['pupilCenter']['radius']
plane = Data[i][0]['corners']
pupil = Data[i][0]['pupilCenter']['center']
sphere= Data[i][0]['eyeCenter']['center']
t = np.array([-0.64, -1.28, 0.0])
R = np.array([-14.0,40.0,143])
feat[i][23:] = useHomog(plane,pupil,sphere,R,t)
lab[i][:] =np.array( Data[i][1]['2dpoint'] )
if i ==1:
print(feat[1],lab[1])
def mae(a,b): # mean absolute error
return np.mean(abs(a.flatten()-b.flatten()))
def mse(a,b): # mean-squared error, input: Nx2
#return np.sqrt(np.mean(abs(a.flatten()-b.flatten())**2))
#return np.linalg.norm(a-b)
return np.sqrt(np.mean(np.linalg.norm(a-b,axis=1)**2))
# shuffle & split
feat,lab = sklearn.utils.shuffle(feat,lab)
X_train, X_test, y_train, y_test = train_test_split(feat, lab, test_size=0.1, random_state=0)
# XGBoost
# '''
dtrainx = xgb.DMatrix(X_train,y_train[:,0])
dtest = xgb.DMatrix(X_test)
paramsx = {'eta': 0.1, 'gamma': 1.0,
'min_child_weight': 0.1, 'max_depth': 6}
xgb_modelx = xgb.train(paramsx, dtrainx, num_boost_round=100)
dtrainy = xgb.DMatrix(X_train,y_train[:,1])
paramsy = {'eta': 0.1, 'gamma': 1.0,
'min_child_weight': 0.1, 'max_depth': 6}
xgb_modely = xgb.train(paramsy, dtrainy, num_boost_round=100)
predx = xgb_modelx.predict(dtest)
predy = xgb_modely.predict(dtest)
'''
# polynomial regression
poly = PolynomialFeatures(degree=2,include_bias=False)
X_poly = poly.fit_transform(X_train)
poly_reg = sklearn.linear_model.LinearRegression()
poly_reg.fit(X_poly,y_train)
y_poly_est = poly_reg.predict(poly.transform(X_test))
#print(np.hstack((y_poly_est,y_test)))
print('poly MAE:',[ mae(y_poly_est[:,0],y_test[:,0]), mae(y_poly_est[:,1],y_test[:,1])])
print('poly MSE: ',mse(y_poly_est,y_test))
'''
########################################
xoff = 1680/2
yoff = 1050/2
factorx = 4
factory = 4
## MAIN LOOP
outData = []
count = 0
gazepoint= None
while True:
features = {}
labels = {}
image0 = vs0.read()
image1 = vs1.read()
if image0 is not None:
# image0 = cv2.rotate(image0, cv2.ROTATE_90_CLOCKWISE)
frame.gray = cv2.cvtColor(image0, cv2.COLOR_BGR2GRAY)
frame.img = image0.copy()
prevImage = image0.copy()
frame.timestamp = time.time()
else:
frame.img = prevImage.copy()
frame.gray = cv2.cvtColor(prevImage, cv2.COLOR_BGR2GRAY)
frame.timestamp = time.time()
if image1 is not None:
image1 = cv2.rotate(image1, cv2.ROTATE_180)
prevImage1 = image1.copy()
else:
image1 = prevImage1
corners, ids, rejected = cv2.aruco.detectMarkers(image1, markdict, cameraMatrix=cameraMatrix1, distCoeff=distCoeffs1)
# print(corners)
# print('ids:',ids)
image1 = cv2.aruco.drawDetectedMarkers(image1, corners, ids, (255,0,255))
rvecsC, tvecsC, _ = cv2.aruco.estimatePoseSingleMarkers(corners, 50, cameraMatrix1, distCoeffs1)
# rvecsCs, tvecsCs, _ = cv2.aruco.estimatePoseSingleMarkers(corners, 20, cameraMatrix1, distCoeffs1)
# print(rvecsC)
# print("individual t vecs: ",tvecsC)
if ids is not None and len(corners) == len(ids) == 5:
imgPoints = np.array([corners[x] for x in ids.T[0].argsort()])
plane = np.array([tvecsC[x][0][:] for x in ids.T[0].argsort()])
gazepoint = plane[4]
plane = plane[:4]
# print("Monitor: ",plane)
features['corners'] = plane
# print("3d gaze point: ",gazepoint)
labels['3dpoint'] = gazepoint
result = pupil_detector.detect(frame, roi, True)
draw_ellipse(
frame.img, result['ellipse']['center'],
[x / 2 for x in result['ellipse']['axes']], result['ellipse']['angle'], 0, 360,
(255, 255, 0), 2
)
sphere = np.array(result['sphere']['center'])
pupil = np.array(result['circle_3d']['center'])
# print("sphere: ", sphere)
# print("pupil: ", pupil)
features['eyeCenter'] = result['sphere']
features['pupilCenter'] = result['circle_3d']
if 'corners' in features and 'eyeCenter' in features and 'pupilCenter'in features:
feat = convertFeat(features)
dtest = xgb.DMatrix(feat)
predx = xgb_modelx.predict(dtest)
predy = xgb_modely.predict(dtest)
curpos = int(predx),int(predy)
# y_poly_est = poly_reg.predict(poly.transform(feat))
# print(y_poly_est)
# curpos =[int(y_poly_est[0][0]), int(y_poly_est[0][1])]
# print('curpos',curpos)
else:
print('corners' in features)
curpos = None
if curpos is not None:
#heatmap
xn = max(curpos[0] - radius, 0)
yn = max(curpos[1] - radius, 0)
xm = min(curpos[0] + radius + 1, img1.shape[0])
ym = min(curpos[1] + radius + 1, img1.shape[1])
kxn = radius - (curpos[0] - xn)
kyn = radius - (curpos[1] - yn)
kxm = radius + xm - curpos[0]
kym = radius + ym - curpos[1]
# print(curpos)
# print((xn, yn), ' ', (xm, ym))
# print((kxn, kyn), ' ', (kxm, kym))
img1[xn:xm, yn:ym, 0] += mask[kxn:kxm, kyn:kym]
img1[xn:xm, yn:ym, 1] -= mask[kxn:kxm, kyn:kym] / 4
img1[xn:xm, yn:ym, 2] -= mask[kxn:kxm, kyn:kym] / 2
img1[:, :, :] /= decay
cv2.imshow('heatmap', img0 + img1)
draw_gaze(
frame.img, sphere, pupil, Hoff,
cameraMatrix0, distCoeffs0
)
HEW[:3,:3] = cv2.Rodrigues(R)[0]
H_all = Hoff @ HEW @ HoffW
# print(H_all)
sphere2 = H_all[:3,:3] @ sphere + H_all[:3,3] # THESE TWO LINES
pupil2 = H_all[:3,:3] @ pupil + H_all[:3,3] # THATS THIS ONE TOO
pupil2[0] *= -1
sphere2[0] *= -1
# pupil2 *= UNITS_E/UNITS_W
# sphere2 *= UNITS_E/UNITS_W
# print("sphere2: ", sphere2)
# print("pupil2: ", pupil2)
gaze = pupil2-sphere2
if plane is None:
plane = objPoints.copy()
plane[:,0] -= 536.575/2
plane[:,1] += 361.95/2
plane /= UNITS_W
plane[:,2] = 10000
# print("Plane: ",plane)
draw_plane(image1, plane[0:4], np.eye(4), cameraMatrix1, distCoeffs1)
gazeEnd = lineIntersection(plane[0],np.cross(plane[1]-plane[0],plane[2]-plane[1]), pupil2, gaze) #TODO fix the thing to be either pupil 0 k
draw_gaze(
image1, pupil, gazeEnd, np.eye(4),
cameraMatrix1, distCoeffs1
)
image1 = cv2.aruco.drawAxis(image1, cameraMatrix1, distCoeffs1,
cv2.Rodrigues(H_all[:3,:3])[0], plane[0], 100)
# gazepoint2d = np.abs(plane[1] - gazeEnd)[:2] * 2
# features["2dpoint"] = gazepoint2d
if image0 is not None:
cv2.imshow('Video0', frame.img)
if vout0:
vout0.write(frame.img)
if image1 is not None:
cv2.imshow('Video1', image1)
if vout1:
vout1.write(image1)
if aflag == True:
if xoff >= 1680-96 or xoff<=1:
factorx*=-1
if yoff >= 1050-96 or yoff<=1:
factory*=-1
xoff += factorx
yoff += factory
# print(xoff,yoff)
aimg = drawArucoImg(yoff,xoff)
cv2.imshow("aruco", aimg)
# cv2.imwrite('training/img0-'+str(count)+'.png', frame.img)
# cv2.imwrite('training/img1-'+str(count)+'.png', image1)
# count += 1
labels["2dpoint"]=(xoff+(93/2),yoff +(93/2))
if 'corners' in features and 'eyeCenter' in features and 'pupilCenter'in features and '3dpoint' in labels and '2dpoint' in labels:
outData.append((features,labels))
print("appended")
print((features,labels))
else:
print("Didn't quite catch that")
# if aflag == True:
# aimg,(xxx,yyy)= getNewArucoImg()
# cv2.imshow("aruco", aimg)
# # print('the x and y of the center aruco img',xxx ,' ',yyy)
# aflag = False
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
elif key & 0xFF == ord('h'):
Hoff[:3,3][0] += 0.02
elif key & 0xFF == ord('j'):
Hoff[:3,3][0] -= 0.02
elif key & 0xFF == ord('k'):
Hoff[:3,3][1] += 0.02
elif key & 0xFF == ord('l'):
Hoff[:3,3][1] -= 0.02
elif key & 0xFF == ord('t'):
R[0] += 1.0
elif key & 0xFF == ord('y'):
R[0] -= 1.0
elif key & 0xFF == ord('u'):
R[1] += 1.0
elif key & 0xFF == ord('i'):
R[1] -= 1.0
elif key & 0xFF == ord('o'):
R[2] += 1.0
elif key & 0xFF == ord('p'):
R[2] -= 1.0
elif key & 0xFF == ord('x'):
HoffW[:3,3][0] += 1.02
elif key & 0xFF == ord('c'):
HoffW[:3,3][0] -= 1.02
elif key & 0xFF == ord('v'):
HoffW[:3,3][1] += 1.02
elif key & 0xFF == ord('b'):
HoffW[:3,3][1] -= 1.02
elif key & 0xFF == ord('n'):
HoffW[:3,3][2] += 1.02
elif key & 0xFF == ord('m'):
HoffW[:3,3][2] -= 1.02
elif key & 0xFF == ord('a'):
aflag = not aflag
# elif key & 0xFF == ord('z'):
# Z += 1
# elif key & 0xFF == ord('x'):
# Z -= 1
# elif key == 32: # spacebar will save the following images
# pass
with open('./training/data'+str(time.time())+'.pickle', 'wb') as handle:
pk.dump(outData, handle, protocol=pk.HIGHEST_PROTOCOL)
print("Saved")
if vout0:
vout0.release()
if vout1:
vout1.release()
cv2.destroyAllWindows()
time.sleep(0.5)
vs0.stop()
vs1.stop()
| #!/usr/bin/env python
import cv2
import os
import subprocess as sp
import sys
import numpy as np
import time
import pickle as pk
# import datetime
from matrix import get_pupil_transformation_matrix
from threading import Thread
sys.path.append(os.path.abspath('../../TEST'))
sys.path.append(os.path.abspath('../../TEST/shared_modules'))
from pupil_detectors import Detector_3D
from methods import Roi
sys.path.append(os.path.abspath('../'))
# from calibrateHaar import calibrate
# from pbcvt import findPupilEllipse
# from params import pupil_tracker_params
from cameras import cam0mat as cameraMatrix0
from cameras import cam0dcoef as distCoeffs0
from cameras import cam1mat as cameraMatrix1
from cameras import cam1dcoef as distCoeffs1
cameraMatrix0 = np.array(cameraMatrix0)
distCoeffs0 = np.array(distCoeffs0)
cameraMatrix1 = np.array(cameraMatrix1)
distCoeffs1 = np.array(distCoeffs1)
# from cameras import cam1mat as cameraMatrix1
# from cameras import cam1dcoef as distCoeffs1
TIMEOUT = 10000
FFMPEG_BIN = "ffmpeg"
from sklearn.model_selection import train_test_split
import sklearn.linear_model
import sklearn.utils
from sklearn.preprocessing import PolynomialFeatures
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.multioutput import MultiOutputRegressor
import xgboost as xgb
'''
This code will be able to open fast and low latency streams
and capture and save photos from webcams and network raspberry pi's
The Readme.txt in this dir will help with debugging
'''
class WebcamVideoStream:
def __init__(self, src=None, fifo=None):
# initialize the video camera stream and read the first frame
# from the stream
# self.stream = cv2.VideoCapture(src)
# (self.grabbed, self.frame) = self.stream.read()
###
if fifo == 'fifo0':
self.height = 640
self.width = 480
elif fifo == 'fifo1':
self.height = 480
self.width = 640
else:
print('error please specify what camera type ')
raise (Exception)
if not fifo:
fifo = 'fifo0'
print("no input using fifo0")
print("about to init command")
command = [
FFMPEG_BIN,
'-i',
fifo,
'-pix_fmt',
'bgr24', # opencv requires bgr24 pixel format.
'-vcodec',
'rawvideo',
'-an',
'-sn',
'-f',
'image2pipe',
'-'
] # '-framerate', '100',
print("about to sp.popen")
self.pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=1024)
print("about read first frame")
try:
raw_image = self.pipe.stdout.read(self.height * self.width * 3)
self.image = np.fromstring(
raw_image, dtype='uint8'
).reshape((self.height, self.width, 3))
except Exception:
self.image = np.zeros((self.height, self.width, 3))
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
print("starting thread")
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
print("starting while true loop")
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
self.pipe.kill()
return
raw_image = self.pipe.stdout.read(self.height * self.width * 3)
self.image = np.fromstring(
raw_image, dtype='uint8'
).reshape((self.height, self.width, 3))
self.pipe.stdout.flush()
# otherwise, read the next frame from the stream
# (self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.image
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
markdict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250)
arucoParams = cv2.aruco.DetectorParameters_create()
arucoParams.adaptiveThreshConstant = 10
def getNewArucoImg():
markerSize = 93
outimg = cv2.aruco.drawMarker(markdict, 5, markerSize)
height = 1050
width = 1680
bigPic = np.ones((height,width))
#random offset
yo = np.random.randint(0,width-markerSize)
xo = np.random.randint(0,height-markerSize)
bigPic[xo:xo+markerSize,yo:yo+markerSize] = outimg
return bigPic, (xo+markerSize/2,yo+markerSize/2)
def drawArucoImg(xo,yo):
markerSize = 93
outimg = cv2.aruco.drawMarker(markdict, 5, markerSize)
height = 1050
width = 1680
bigPic = np.ones((height,width))
xo = int(xo)
yo = int(yo)
bigPic[xo:xo+markerSize,yo:yo+markerSize] = outimg
return bigPic
def draw_ellipse(
img,
center,
axes,
angle,
startAngle,
endAngle,
color,
thickness=3,
lineType=cv2.LINE_AA,
shift=10):
center = (int(round(center[0] * 2**shift)), int(round(center[1] * 2**shift)))
axes = (int(round(axes[0] * 2**shift)), int(round(axes[1] * 2**shift)))
cv2.ellipse(
img,
center,
axes,
angle,
startAngle,
endAngle,
color,
thickness,
lineType,
shift,
)
class Frame(object):
def __init__(self, camType):
if camType == 0:
self.height = 640
self.width = 480
elif camType == 1:
self.height = 480
self.width = 640
self.gray = np.zeros((self.height, self.width))
self.img = np.zeros((self.height, self.width, 3))
self.timestamp = time.time()
def solveperp(objectPoints, imagePoints, cameraMatrix, distCoeffs, method):
if method == 1:
return cv2.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs)
elif method == 2:
return cv2.solvePnPRansac(
objectPoints, imagePoints, cameraMatrix, distCoeffs
)
else:
return cv2.solveP3P(objectPoints, imagePoints, cameraMatrix, distCoeffs)
def draw_gaze(img, start, end, H, K, dist):
# unit is mm
try:
rvec, _ = cv2.Rodrigues(H[:3,:3])
tvec = H[:3,3]
points = np.float32([
start,
end,
]).reshape(-1, 3)
axisPoints, _ = cv2.projectPoints(points, rvec, tvec, K, dist)
img = cv2.arrowedLine(
img, tuple(axisPoints[0].ravel()), tuple(axisPoints[1].ravel()),
(0, 255, 0), 3
)
except OverflowError:
pass
return img
def draw_plane(img, corners, H, K, dist):
# unit is mm
try:
rvec, _ = cv2.Rodrigues(H[:3,:3])
tvec = H[:3,3]
points = np.float32(corners).reshape(-1, 3)
axisPoints, _ = cv2.projectPoints(points, rvec, tvec, K, dist)
img = cv2.arrowedLine(
img, tuple(axisPoints[0].ravel()), tuple(axisPoints[1].ravel()),
(0, 0, 255), 3
)
img = cv2.arrowedLine(
img, tuple(axisPoints[1].ravel()), tuple(axisPoints[2].ravel()),
(255, 0, 0), 3
)
img = cv2.arrowedLine(
img, tuple(axisPoints[2].ravel()), tuple(axisPoints[3].ravel()),
(255, 0, 0), 3
)
img = cv2.arrowedLine(
img, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()),
(255, 0, 0), 3
)
except OverflowError:
pass
return img
def useHomog(plane,pupil,sphere,R,t):
H = np.eye(4)
H[:3,:3] = cv2.Rodrigues(R)[0]
H[:3,3]= t
sphere2 = H[:3,:3] @ sphere + H[:3,3] # THESE TWO LINES
pupil2 = H[:3,:3] @ pupil + H[:3,3] # THATS THIS ONE TOO
gaze = pupil2-sphere2
gazeEnd = lineIntersection(plane[0],np.cross(plane[1]-plane[0],plane[2]-plane[1]), pupil2, gaze)
return gazeEnd #predi
def lineIntersection(planePoint, planeNormal, linePoint, lineDirection): #THIS FUNCTION
if np.dot(planeNormal,lineDirection) == 0:
return planePoint
t = (np.dot(planeNormal,planePoint) - np.dot(planeNormal,linePoint)) / np.dot(planeNormal,lineDirection)
return linePoint + t*lineDirection;
# class Roi(object):
# """this is a simple 2D Region of Interest class
# it is applied on numpy arrays for convenient slicing
# like this:
# roi_array_slice = full_array[r.view]
# # do something with roi_array_slice
# this creates a view, no data copying done
# """
# def __init__(self, array_shape):
# self.array_shape = array_shape
# self.lX = 0
# self.lY = 0
# self.uX = array_shape[1]
# self.uY = array_shape[0]
# self.nX = 0
# self.nY = 0
# open a named pipe for each pi and start listening
pipeinit0 = sp.Popen(['./r0.sh'], stdout=sp.PIPE)
pipeinit1 = sp.Popen(['./r1.sh'], stdout=sp.PIPE)
# start streaming from the pi to this computer
sshPi0 = sp.Popen(['ssh', 'pi@10.0.0.3', '-p', '6622', '~/stream.sh'], stdout=sp.PIPE)
vs0 = WebcamVideoStream(fifo="fifo0").start()
print()
print()
print('Fifo 0 started')
print()
print()
sshPi1 = sp.Popen(['ssh', 'pi@10.0.0.5', '~/stream.sh'], stdout=sp.PIPE)
vs1 = WebcamVideoStream(fifo="fifo1").start()
print()
print()
print('Fifo 1 started')
print()
print()
# i = 0
# j = 0
frame = Frame(0)
roi = Roi(frame.img.shape)
cv2.namedWindow('Video0')
cv2.namedWindow('Video1')
cv2.namedWindow('aruco')
vout0 = None
vout1 = None
if len(sys.argv) > 1:
fourcc = cv2.VideoWriter_fourcc(*'x264')
vout0 = cv2.VideoWriter(sys.argv[1]+'0.mp4', fourcc, 24.0, (frame.img.shape[1], frame.img.shape[0]))
vout1 = cv2.VideoWriter(sys.argv[2]+'1.mp4', fourcc, 24.0, (frame.img.shape[0], frame.img.shape[1]))
## ACTUAL STUFF BELOW
pupil_detector = Detector_3D()
pupil_detector.set_2d_detector_property('pupil_size_max', 150)
# pupil_detector.set_2d_detector_property('pupil_size_min', 10)
# pupil_detector.set_2d_detector_property('ellipse_roundness_ratio', 0.1)
# pupil_detector.set_2d_detector_property('coarse_filter_max', 240)
# pupil_detector.set_2d_detector_property('intensity_range', 30)
# pupil_detector.set_2d_detector_property('canny_treshold', 200)
# pupil_detector.set_2d_detector_property('canny_ration', 3)
# pupil_detector.set_2d_detector_property('support_pixel_ratio_exponent', 3.0)
# pupil_detector.set_2d_detector_property('initial_ellipse_fit_treshhold', 1.5)
'''
'coarse_detection': True,
'coarse_filter_min': 128,
'coarse_filter_max': 280,
'intensity_range': 23,
'blur_size': 5,
'canny_treshold': 160,
'canny_ration': 2,
'canny_aperture': 5,
'pupil_size_max': 100,
'pupil_size_min': 10,
'strong_perimeter_ratio_range_min': 0.8,
'strong_perimeter_ratio_range_max': 1.1,
'strong_area_ratio_range_min': 0.6,
'strong_area_ratio_range_max': 1.1,
'contour_size_min': 5,
'ellipse_roundness_ratio': 0.1,
'initial_ellipse_fit_treshhold': 1.8,
'final_perimeter_ratio_range_min': 0.6,
'final_perimeter_ratio_range_max': 1.2,
'ellipse_true_support_min_dist': 2.5,
'support_pixel_ratio_exponent': 2.0
'''
objPoints = np.array(
[(0, 0, 0), (536.575, 0, 0), (536.575, -361.95, 0), (0, -361.95, 0)]
)
UNITS_E = 1 # mm per box
UNITS_W = 14 # mm per box
# Hoff = np.eye(4)
# Hoff[:3,3] = np.array([-0.64, -1.28, 0.0])
# HoffW = np.eye(4)
# HoffW[:3,3] = np.array([0.0,0.0,0.0])
Hoff = np.eye(4)
Hoff[:3, 3] = np.array([-1.06, -1.28, 0.0])
HoffW = np.eye(4)
HoffW[:3, 3] = np.array([-168.0, -100.0, -235.0])
HEW = np.eye(4)
# R = np.array([78.69,90.0,180+39.67])
R = np.array([-14.0,40.0,143]) # ********** DONT DELETE
HEW[:3,:3] = cv2.Rodrigues(R)[0]
HEW[:3,3] = np.array([-58.58,-18.19,32.47])
# H90 = np.eye(4)
# H90[:3,:3] = cv2.Rodrigues(np.array([0.0,0.0,0.0]))[0]
# Z = 1000
# HEATMAP
def gkern(kernlen, sigma):
# First a 1-D Gaussian
lim = kernlen // 2 + (kernlen % 2) / 2
t = np.linspace(-lim, lim, kernlen)
bump = np.exp(-0.25 * (t / sigma)**2)
bump /= np.trapz(bump) # normalize the integral to 1
# make a 2-D kernel out of it
return bump[:, np.newaxis] * bump[np.newaxis, :]
def convertFeat(f):
# print(f)
feat = np.zeros((1, 26 ))
feat[0][:12] = f['corners'].flatten()
feat[0][12:15] = np.array(f['eyeCenter']['center'])
feat[0][15] = f['eyeCenter']['radius']
feat[0][16:19] = np.array(f['pupilCenter']['center'])
feat[0][19:22] = np.array(f['pupilCenter']['normal'])
feat[0][22] = f['pupilCenter']['radius']
plane = f['corners']
pupil = f['pupilCenter']['center']
sphere= f['eyeCenter']['center']
t = np.array([-168.0, -100.0, -235.0])
R = np.array([-14.0,40.0,143])
feat[0][23:] = useHomog(plane,pupil,sphere,R,t)
return feat
radius = 200
sigma = 30
gain = 500
decay = 1.007
mask = gkern(2 * radius + 1, sigma) * gain
img0 = np.zeros((1050, 1680, 3))
img1 = np.zeros((1050, 1680, 3))
cv2.namedWindow('heatmap')
curpos = [int(img1.shape[0] / 2), int(img1.shape[1] / 2)]
# aruco
rvecM = [0.0,0.0,0.0]
tvecM = [0.0,0.0,0.0]
plane = None
aflag = False
########################################
# open and unpickle
with open ("training/databig.pickle", 'rb') as handle:
Data = pk.load(handle)
feat = np.zeros((len(Data), 26 ))
lab = np.zeros((len(Data), 2 ))
ones = np.ones((len(Data),1))
for i in range(len(Data)):
feat[i][:12] = Data[i][0]['corners'].flatten()
feat[i][12:15] = np.array(Data[i][0]['eyeCenter']['center'])
feat[i][15] = Data[i][0]['eyeCenter']['radius']
feat[i][16:19] = np.array(Data[i][0]['pupilCenter']['center'])
feat[i][19:22] = np.array(Data[i][0]['pupilCenter']['normal'])
feat[i][22] = Data[i][0]['pupilCenter']['radius']
plane = Data[i][0]['corners']
pupil = Data[i][0]['pupilCenter']['center']
sphere= Data[i][0]['eyeCenter']['center']
t = np.array([-0.64, -1.28, 0.0])
R = np.array([-14.0,40.0,143])
feat[i][23:] = useHomog(plane,pupil,sphere,R,t)
lab[i][:] =np.array( Data[i][1]['2dpoint'] )
if i ==1:
print(feat[1],lab[1])
def mae(a,b): # mean absolute error
return np.mean(abs(a.flatten()-b.flatten()))
def mse(a,b): # mean-squared error, input: Nx2
#return np.sqrt(np.mean(abs(a.flatten()-b.flatten())**2))
#return np.linalg.norm(a-b)
return np.sqrt(np.mean(np.linalg.norm(a-b,axis=1)**2))
# shuffle & split
feat,lab = sklearn.utils.shuffle(feat,lab)
X_train, X_test, y_train, y_test = train_test_split(feat, lab, test_size=0.1, random_state=0)
# XGBoost
# '''
dtrainx = xgb.DMatrix(X_train,y_train[:,0])
dtest = xgb.DMatrix(X_test)
paramsx = {'eta': 0.1, 'gamma': 1.0,
'min_child_weight': 0.1, 'max_depth': 6}
xgb_modelx = xgb.train(paramsx, dtrainx, num_boost_round=100)
dtrainy = xgb.DMatrix(X_train,y_train[:,1])
paramsy = {'eta': 0.1, 'gamma': 1.0,
'min_child_weight': 0.1, 'max_depth': 6}
xgb_modely = xgb.train(paramsy, dtrainy, num_boost_round=100)
predx = xgb_modelx.predict(dtest)
predy = xgb_modely.predict(dtest)
'''
# polynomial regression
poly = PolynomialFeatures(degree=2,include_bias=False)
X_poly = poly.fit_transform(X_train)
poly_reg = sklearn.linear_model.LinearRegression()
poly_reg.fit(X_poly,y_train)
y_poly_est = poly_reg.predict(poly.transform(X_test))
#print(np.hstack((y_poly_est,y_test)))
print('poly MAE:',[ mae(y_poly_est[:,0],y_test[:,0]), mae(y_poly_est[:,1],y_test[:,1])])
print('poly MSE: ',mse(y_poly_est,y_test))
'''
########################################
xoff = 1680/2
yoff = 1050/2
factorx = 4
factory = 4
## MAIN LOOP
outData = []
count = 0
gazepoint= None
while True:
features = {}
labels = {}
image0 = vs0.read()
image1 = vs1.read()
if image0 is not None:
# image0 = cv2.rotate(image0, cv2.ROTATE_90_CLOCKWISE)
frame.gray = cv2.cvtColor(image0, cv2.COLOR_BGR2GRAY)
frame.img = image0.copy()
prevImage = image0.copy()
frame.timestamp = time.time()
else:
frame.img = prevImage.copy()
frame.gray = cv2.cvtColor(prevImage, cv2.COLOR_BGR2GRAY)
frame.timestamp = time.time()
if image1 is not None:
image1 = cv2.rotate(image1, cv2.ROTATE_180)
prevImage1 = image1.copy()
else:
image1 = prevImage1
corners, ids, rejected = cv2.aruco.detectMarkers(image1, markdict, cameraMatrix=cameraMatrix1, distCoeff=distCoeffs1)
# print(corners)
# print('ids:',ids)
image1 = cv2.aruco.drawDetectedMarkers(image1, corners, ids, (255,0,255))
rvecsC, tvecsC, _ = cv2.aruco.estimatePoseSingleMarkers(corners, 50, cameraMatrix1, distCoeffs1)
# rvecsCs, tvecsCs, _ = cv2.aruco.estimatePoseSingleMarkers(corners, 20, cameraMatrix1, distCoeffs1)
# print(rvecsC)
# print("individual t vecs: ",tvecsC)
if ids is not None and len(corners) == len(ids) == 5:
imgPoints = np.array([corners[x] for x in ids.T[0].argsort()])
plane = np.array([tvecsC[x][0][:] for x in ids.T[0].argsort()])
gazepoint = plane[4]
plane = plane[:4]
# print("Monitor: ",plane)
features['corners'] = plane
# print("3d gaze point: ",gazepoint)
labels['3dpoint'] = gazepoint
result = pupil_detector.detect(frame, roi, True)
draw_ellipse(
frame.img, result['ellipse']['center'],
[x / 2 for x in result['ellipse']['axes']], result['ellipse']['angle'], 0, 360,
(255, 255, 0), 2
)
sphere = np.array(result['sphere']['center'])
pupil = np.array(result['circle_3d']['center'])
# print("sphere: ", sphere)
# print("pupil: ", pupil)
features['eyeCenter'] = result['sphere']
features['pupilCenter'] = result['circle_3d']
if 'corners' in features and 'eyeCenter' in features and 'pupilCenter'in features:
feat = convertFeat(features)
dtest = xgb.DMatrix(feat)
predx = xgb_modelx.predict(dtest)
predy = xgb_modely.predict(dtest)
curpos = int(predx),int(predy)
# y_poly_est = poly_reg.predict(poly.transform(feat))
# print(y_poly_est)
# curpos =[int(y_poly_est[0][0]), int(y_poly_est[0][1])]
# print('curpos',curpos)
else:
print('corners' in features)
curpos = None
if curpos is not None:
#heatmap
xn = max(curpos[0] - radius, 0)
yn = max(curpos[1] - radius, 0)
xm = min(curpos[0] + radius + 1, img1.shape[0])
ym = min(curpos[1] + radius + 1, img1.shape[1])
kxn = radius - (curpos[0] - xn)
kyn = radius - (curpos[1] - yn)
kxm = radius + xm - curpos[0]
kym = radius + ym - curpos[1]
# print(curpos)
# print((xn, yn), ' ', (xm, ym))
# print((kxn, kyn), ' ', (kxm, kym))
img1[xn:xm, yn:ym, 0] += mask[kxn:kxm, kyn:kym]
img1[xn:xm, yn:ym, 1] -= mask[kxn:kxm, kyn:kym] / 4
img1[xn:xm, yn:ym, 2] -= mask[kxn:kxm, kyn:kym] / 2
img1[:, :, :] /= decay
cv2.imshow('heatmap', img0 + img1)
draw_gaze(
frame.img, sphere, pupil, Hoff,
cameraMatrix0, distCoeffs0
)
HEW[:3,:3] = cv2.Rodrigues(R)[0]
H_all = Hoff @ HEW @ HoffW
# print(H_all)
sphere2 = H_all[:3,:3] @ sphere + H_all[:3,3] # THESE TWO LINES
pupil2 = H_all[:3,:3] @ pupil + H_all[:3,3] # THATS THIS ONE TOO
pupil2[0] *= -1
sphere2[0] *= -1
# pupil2 *= UNITS_E/UNITS_W
# sphere2 *= UNITS_E/UNITS_W
# print("sphere2: ", sphere2)
# print("pupil2: ", pupil2)
gaze = pupil2-sphere2
if plane is None:
plane = objPoints.copy()
plane[:,0] -= 536.575/2
plane[:,1] += 361.95/2
plane /= UNITS_W
plane[:,2] = 10000
# print("Plane: ",plane)
draw_plane(image1, plane[0:4], np.eye(4), cameraMatrix1, distCoeffs1)
gazeEnd = lineIntersection(plane[0],np.cross(plane[1]-plane[0],plane[2]-plane[1]), pupil2, gaze) #TODO fix the thing to be either pupil 0 k
draw_gaze(
image1, pupil, gazeEnd, np.eye(4),
cameraMatrix1, distCoeffs1
)
image1 = cv2.aruco.drawAxis(image1, cameraMatrix1, distCoeffs1,
cv2.Rodrigues(H_all[:3,:3])[0], plane[0], 100)
# gazepoint2d = np.abs(plane[1] - gazeEnd)[:2] * 2
# features["2dpoint"] = gazepoint2d
if image0 is not None:
cv2.imshow('Video0', frame.img)
if vout0:
vout0.write(frame.img)
if image1 is not None:
cv2.imshow('Video1', image1)
if vout1:
vout1.write(image1)
if aflag == True:
if xoff >= 1680-96 or xoff<=1:
factorx*=-1
if yoff >= 1050-96 or yoff<=1:
factory*=-1
xoff += factorx
yoff += factory
# print(xoff,yoff)
aimg = drawArucoImg(yoff,xoff)
cv2.imshow("aruco", aimg)
# cv2.imwrite('training/img0-'+str(count)+'.png', frame.img)
# cv2.imwrite('training/img1-'+str(count)+'.png', image1)
# count += 1
labels["2dpoint"]=(xoff+(93/2),yoff +(93/2))
if 'corners' in features and 'eyeCenter' in features and 'pupilCenter'in features and '3dpoint' in labels and '2dpoint' in labels:
outData.append((features,labels))
print("appended")
print((features,labels))
else:
print("Didn't quite catch that")
# if aflag == True:
# aimg,(xxx,yyy)= getNewArucoImg()
# cv2.imshow("aruco", aimg)
# # print('the x and y of the center aruco img',xxx ,' ',yyy)
# aflag = False
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
elif key & 0xFF == ord('h'):
Hoff[:3,3][0] += 0.02
elif key & 0xFF == ord('j'):
Hoff[:3,3][0] -= 0.02
elif key & 0xFF == ord('k'):
Hoff[:3,3][1] += 0.02
elif key & 0xFF == ord('l'):
Hoff[:3,3][1] -= 0.02
elif key & 0xFF == ord('t'):
R[0] += 1.0
elif key & 0xFF == ord('y'):
R[0] -= 1.0
elif key & 0xFF == ord('u'):
R[1] += 1.0
elif key & 0xFF == ord('i'):
R[1] -= 1.0
elif key & 0xFF == ord('o'):
R[2] += 1.0
elif key & 0xFF == ord('p'):
R[2] -= 1.0
elif key & 0xFF == ord('x'):
HoffW[:3,3][0] += 1.02
elif key & 0xFF == ord('c'):
HoffW[:3,3][0] -= 1.02
elif key & 0xFF == ord('v'):
HoffW[:3,3][1] += 1.02
elif key & 0xFF == ord('b'):
HoffW[:3,3][1] -= 1.02
elif key & 0xFF == ord('n'):
HoffW[:3,3][2] += 1.02
elif key & 0xFF == ord('m'):
HoffW[:3,3][2] -= 1.02
elif key & 0xFF == ord('a'):
aflag = not aflag
# elif key & 0xFF == ord('z'):
# Z += 1
# elif key & 0xFF == ord('x'):
# Z -= 1
# elif key == 32: # spacebar will save the following images
# pass
with open('./training/data'+str(time.time())+'.pickle', 'wb') as handle:
pk.dump(outData, handle, protocol=pk.HIGHEST_PROTOCOL)
print("Saved")
if vout0:
vout0.release()
if vout1:
vout1.release()
cv2.destroyAllWindows()
time.sleep(0.5)
vs0.stop()
vs1.stop()
| en | 0.461544 | #!/usr/bin/env python # import datetime # from calibrateHaar import calibrate # from pbcvt import findPupilEllipse # from params import pupil_tracker_params # from cameras import cam1mat as cameraMatrix1 # from cameras import cam1dcoef as distCoeffs1 This code will be able to open fast and low latency streams and capture and save photos from webcams and network raspberry pi's The Readme.txt in this dir will help with debugging # initialize the video camera stream and read the first frame # from the stream # self.stream = cv2.VideoCapture(src) # (self.grabbed, self.frame) = self.stream.read() ### # opencv requires bgr24 pixel format. # '-framerate', '100', # initialize the variable used to indicate if the thread should # be stopped # start the thread to read frames from the video stream # keep looping infinitely until the thread is stopped # if the thread indicator variable is set, stop the thread # otherwise, read the next frame from the stream # (self.grabbed, self.frame) = self.stream.read() # return the frame most recently read # indicate that the thread should be stopped #random offset # unit is mm # unit is mm # THESE TWO LINES # THATS THIS ONE TOO #predi #THIS FUNCTION # class Roi(object): # """this is a simple 2D Region of Interest class # it is applied on numpy arrays for convenient slicing # like this: # roi_array_slice = full_array[r.view] # # do something with roi_array_slice # this creates a view, no data copying done # """ # def __init__(self, array_shape): # self.array_shape = array_shape # self.lX = 0 # self.lY = 0 # self.uX = array_shape[1] # self.uY = array_shape[0] # self.nX = 0 # self.nY = 0 # open a named pipe for each pi and start listening # start streaming from the pi to this computer # i = 0 # j = 0 ## ACTUAL STUFF BELOW # pupil_detector.set_2d_detector_property('pupil_size_min', 10) # pupil_detector.set_2d_detector_property('ellipse_roundness_ratio', 0.1) # pupil_detector.set_2d_detector_property('coarse_filter_max', 240) # pupil_detector.set_2d_detector_property('intensity_range', 30) # pupil_detector.set_2d_detector_property('canny_treshold', 200) # pupil_detector.set_2d_detector_property('canny_ration', 3) # pupil_detector.set_2d_detector_property('support_pixel_ratio_exponent', 3.0) # pupil_detector.set_2d_detector_property('initial_ellipse_fit_treshhold', 1.5) 'coarse_detection': True, 'coarse_filter_min': 128, 'coarse_filter_max': 280, 'intensity_range': 23, 'blur_size': 5, 'canny_treshold': 160, 'canny_ration': 2, 'canny_aperture': 5, 'pupil_size_max': 100, 'pupil_size_min': 10, 'strong_perimeter_ratio_range_min': 0.8, 'strong_perimeter_ratio_range_max': 1.1, 'strong_area_ratio_range_min': 0.6, 'strong_area_ratio_range_max': 1.1, 'contour_size_min': 5, 'ellipse_roundness_ratio': 0.1, 'initial_ellipse_fit_treshhold': 1.8, 'final_perimeter_ratio_range_min': 0.6, 'final_perimeter_ratio_range_max': 1.2, 'ellipse_true_support_min_dist': 2.5, 'support_pixel_ratio_exponent': 2.0 # mm per box # mm per box # Hoff = np.eye(4) # Hoff[:3,3] = np.array([-0.64, -1.28, 0.0]) # HoffW = np.eye(4) # HoffW[:3,3] = np.array([0.0,0.0,0.0]) # R = np.array([78.69,90.0,180+39.67]) # ********** DONT DELETE # H90 = np.eye(4) # H90[:3,:3] = cv2.Rodrigues(np.array([0.0,0.0,0.0]))[0] # Z = 1000 # HEATMAP # First a 1-D Gaussian # normalize the integral to 1 # make a 2-D kernel out of it # print(f) # aruco ######################################## # open and unpickle # mean absolute error # mean-squared error, input: Nx2 #return np.sqrt(np.mean(abs(a.flatten()-b.flatten())**2)) #return np.linalg.norm(a-b) # shuffle & split # XGBoost # ''' # polynomial regression poly = PolynomialFeatures(degree=2,include_bias=False) X_poly = poly.fit_transform(X_train) poly_reg = sklearn.linear_model.LinearRegression() poly_reg.fit(X_poly,y_train) y_poly_est = poly_reg.predict(poly.transform(X_test)) #print(np.hstack((y_poly_est,y_test))) print('poly MAE:',[ mae(y_poly_est[:,0],y_test[:,0]), mae(y_poly_est[:,1],y_test[:,1])]) print('poly MSE: ',mse(y_poly_est,y_test)) ######################################## ## MAIN LOOP # image0 = cv2.rotate(image0, cv2.ROTATE_90_CLOCKWISE) # print(corners) # print('ids:',ids) # rvecsCs, tvecsCs, _ = cv2.aruco.estimatePoseSingleMarkers(corners, 20, cameraMatrix1, distCoeffs1) # print(rvecsC) # print("individual t vecs: ",tvecsC) # print("Monitor: ",plane) # print("3d gaze point: ",gazepoint) # print("sphere: ", sphere) # print("pupil: ", pupil) # y_poly_est = poly_reg.predict(poly.transform(feat)) # print(y_poly_est) # curpos =[int(y_poly_est[0][0]), int(y_poly_est[0][1])] # print('curpos',curpos) #heatmap # print(curpos) # print((xn, yn), ' ', (xm, ym)) # print((kxn, kyn), ' ', (kxm, kym)) # print(H_all) # THESE TWO LINES # THATS THIS ONE TOO # pupil2 *= UNITS_E/UNITS_W # sphere2 *= UNITS_E/UNITS_W # print("sphere2: ", sphere2) # print("pupil2: ", pupil2) # print("Plane: ",plane) #TODO fix the thing to be either pupil 0 k # gazepoint2d = np.abs(plane[1] - gazeEnd)[:2] * 2 # features["2dpoint"] = gazepoint2d # print(xoff,yoff) # cv2.imwrite('training/img0-'+str(count)+'.png', frame.img) # cv2.imwrite('training/img1-'+str(count)+'.png', image1) # count += 1 # if aflag == True: # aimg,(xxx,yyy)= getNewArucoImg() # cv2.imshow("aruco", aimg) # # print('the x and y of the center aruco img',xxx ,' ',yyy) # aflag = False # elif key & 0xFF == ord('z'): # Z += 1 # elif key & 0xFF == ord('x'): # Z -= 1 # elif key == 32: # spacebar will save the following images # pass | 1.904953 | 2 |
bin/model_funcs/fortran_versions/turb2_wrap.py | AlexT-L/RANS | 0 | 6632857 | # updates eddy viscosity (ev/rev)
# append to path so we can access Field class
import sys
sys.path.append("../../../")
# class dependencies
import numpy as np
from bin.Field import Field, max, abs, isfinite
# fortran module
from bin.model_funcs.fortran_versions import turb2_fort
def turb_BL(model,ws,w,ncyc=0):
# grid parameters
[nx, ny] = ws.field_size()
[il, jl] = [nx+1, ny+1]
[ie, je] = [nx+2, ny+2]
[ib, jb] = [nx+3, ny+3]
dims = ws.get_dims()
itl = dims['itl']
itu = dims['itu']
# flow related variabless
def get(varName):
return ws.get_field(varName, model.className)
p = get('p') # pressure
ev = get('ev') # eddy viscosity
# mesh vars
vol = get('vol')
x = ws.get_field('x')
# flow params
gamma = model.params['gamma']
mach = model.params['rm']
Re = model.params['re']
xtran = model.params['xtran']
# call turb
turb2_fort.turb2(ie,je,itl+1,itu+1, w,p,ev, x,vol, \
gamma,mach,Re,xtran, ncyc, [il,jl,ib,jb])
| # updates eddy viscosity (ev/rev)
# append to path so we can access Field class
import sys
sys.path.append("../../../")
# class dependencies
import numpy as np
from bin.Field import Field, max, abs, isfinite
# fortran module
from bin.model_funcs.fortran_versions import turb2_fort
def turb_BL(model,ws,w,ncyc=0):
# grid parameters
[nx, ny] = ws.field_size()
[il, jl] = [nx+1, ny+1]
[ie, je] = [nx+2, ny+2]
[ib, jb] = [nx+3, ny+3]
dims = ws.get_dims()
itl = dims['itl']
itu = dims['itu']
# flow related variabless
def get(varName):
return ws.get_field(varName, model.className)
p = get('p') # pressure
ev = get('ev') # eddy viscosity
# mesh vars
vol = get('vol')
x = ws.get_field('x')
# flow params
gamma = model.params['gamma']
mach = model.params['rm']
Re = model.params['re']
xtran = model.params['xtran']
# call turb
turb2_fort.turb2(ie,je,itl+1,itu+1, w,p,ev, x,vol, \
gamma,mach,Re,xtran, ncyc, [il,jl,ib,jb])
| en | 0.440956 | # updates eddy viscosity (ev/rev) # append to path so we can access Field class # class dependencies # fortran module # grid parameters # flow related variabless # pressure # eddy viscosity # mesh vars # flow params # call turb | 2.403036 | 2 |
src/python3/sdp/scripts/nstx_reflectometry/nstx_equi_plot.py | LeiShi/Synthetic-Diagnostics-Platform | 5 | 6632858 | <gh_stars>1-10
from scipy.io.netcdf import netcdf_file
import numpy as np
import matplotlib.pyplot as plt
eqf = netcdf_file('/p/gkp/lshi/XGC1_NSTX_Case/new_3D_fluctuations/time_step_8/eqfile108.cdf','r')
ne = eqf.variables['ne']
r = eqf.variables['rr'][:]
nz = eqf.dimensions['nz']
midz = (nz-1)/2
ne_midz = ne[midz,:]
freqs = np.array([30,32.5,35,37.5,42.5,45,47.5,50,55,57.5,60,62.5,65,66.5,67.5,70,72.5,75])*1e9
ref_ne = (freqs/8.98e3)**2 *1e6
ref_lines = np.zeros((2,len(freqs)))+ref_ne
bot_range = [2,8]
top_range = [8,14]
def plot():
fig = plt.figure()
plt.plot(r,ne_midz)
plt.plot(r[[0,-1]],ref_lines[:,0],'b-.',label = 'outer')
plt.plot(r[[0,-1]],ref_lines[:,1:bot_range[0]],'b-.')
plt.plot(r[[0,-1]],ref_lines[:,bot_range[0]],'b-',label = 'lower pedestal')
plt.plot(r[[0,-1]],ref_lines[:,bot_range[0]+1:bot_range[1]],'b-')
plt.plot(r[[0,-1]],ref_lines[:,top_range[0]],'b--',label = 'upper pedestal')
plt.plot(r[[0,-1]],ref_lines[:,top_range[0]+1:top_range[1]],'b--')
plt.plot(r[[0,-1]],ref_lines[:,top_range[1]],'b:',label = 'inner')
plt.plot(r[[0,-1]],ref_lines[:,top_range[1]+1:],'b:')
plt.legend()
plt.title('NSTX Reflectometry Layout')
plt.xlabel('$R(M)$')
plt.ylabel('$ne(m^{-3})$')
| from scipy.io.netcdf import netcdf_file
import numpy as np
import matplotlib.pyplot as plt
eqf = netcdf_file('/p/gkp/lshi/XGC1_NSTX_Case/new_3D_fluctuations/time_step_8/eqfile108.cdf','r')
ne = eqf.variables['ne']
r = eqf.variables['rr'][:]
nz = eqf.dimensions['nz']
midz = (nz-1)/2
ne_midz = ne[midz,:]
freqs = np.array([30,32.5,35,37.5,42.5,45,47.5,50,55,57.5,60,62.5,65,66.5,67.5,70,72.5,75])*1e9
ref_ne = (freqs/8.98e3)**2 *1e6
ref_lines = np.zeros((2,len(freqs)))+ref_ne
bot_range = [2,8]
top_range = [8,14]
def plot():
fig = plt.figure()
plt.plot(r,ne_midz)
plt.plot(r[[0,-1]],ref_lines[:,0],'b-.',label = 'outer')
plt.plot(r[[0,-1]],ref_lines[:,1:bot_range[0]],'b-.')
plt.plot(r[[0,-1]],ref_lines[:,bot_range[0]],'b-',label = 'lower pedestal')
plt.plot(r[[0,-1]],ref_lines[:,bot_range[0]+1:bot_range[1]],'b-')
plt.plot(r[[0,-1]],ref_lines[:,top_range[0]],'b--',label = 'upper pedestal')
plt.plot(r[[0,-1]],ref_lines[:,top_range[0]+1:top_range[1]],'b--')
plt.plot(r[[0,-1]],ref_lines[:,top_range[1]],'b:',label = 'inner')
plt.plot(r[[0,-1]],ref_lines[:,top_range[1]+1:],'b:')
plt.legend()
plt.title('NSTX Reflectometry Layout')
plt.xlabel('$R(M)$')
plt.ylabel('$ne(m^{-3})$') | none | 1 | 1.82854 | 2 |
|
enamlkv/kv/kv_mime_data.py | frmdstryr/enaml-kivy | 1 | 6632859 | <gh_stars>1-10
#------------------------------------------------------------------------------
# Copyright (c) 2016, frmdstryr.
# Copyright (c) 2014, Nucleic Development Team.
#
# Distributed under the terms of the MIT License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Dict
from enaml.mime_data import MimeData
class KvMimeData(object):
""" A Qt implementation of an Enaml MimeData object.
"""
_q_data = Dict()
def __init__(self, data=None):
""" Initialize a QtMimeData object.
Parameters
----------
data : QMimeData, optional
The mime data to wrap. If not provided, one will be created.
"""
self._q_data = data or {}
def q_data(self):
""" Get the internal QMimeData object.
This method is for toolkit backend use only.
Returns
-------
result : QMimeData
The Qt specific mime data object.
"""
return self._q_data
def formats(self):
""" Get a list of the supported mime type formats.
Returns
-------
result : list
A list of mime types supported by the data.
"""
return self._q_data.keys()
def has_format(self, mime_type):
""" Test whether the data supports the given mime type.
Parameters
----------
mime_type : unicode
The mime type of interest.
Returns
-------
result : bool
True if there is data for the given type, False otherwise.
"""
return mime_type in self._q_data
def remove_format(self, mime_type):
""" Remove the data entry for the given mime type.
Parameters
----------
mime_type : unicode
The mime type of interest.
"""
del self._q_data[mime_type]
def data(self, mime_type):
""" Get the data for the specified mime type.
Parameters
----------
mime_type : unicode
The mime type of interest.
Returns
-------
result : str
The data for the specified mime type.
"""
return self._q_data[mime_type]
def set_data(self, mime_type, data):
""" Set the data for the specified mime type.
Parameters
----------
mime_type : unicode
The mime type of interest.
data : str
The serialized data for the given type.
"""
self._q_data[mime_type] = data
| #------------------------------------------------------------------------------
# Copyright (c) 2016, frmdstryr.
# Copyright (c) 2014, Nucleic Development Team.
#
# Distributed under the terms of the MIT License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Dict
from enaml.mime_data import MimeData
class KvMimeData(object):
""" A Qt implementation of an Enaml MimeData object.
"""
_q_data = Dict()
def __init__(self, data=None):
""" Initialize a QtMimeData object.
Parameters
----------
data : QMimeData, optional
The mime data to wrap. If not provided, one will be created.
"""
self._q_data = data or {}
def q_data(self):
""" Get the internal QMimeData object.
This method is for toolkit backend use only.
Returns
-------
result : QMimeData
The Qt specific mime data object.
"""
return self._q_data
def formats(self):
""" Get a list of the supported mime type formats.
Returns
-------
result : list
A list of mime types supported by the data.
"""
return self._q_data.keys()
def has_format(self, mime_type):
""" Test whether the data supports the given mime type.
Parameters
----------
mime_type : unicode
The mime type of interest.
Returns
-------
result : bool
True if there is data for the given type, False otherwise.
"""
return mime_type in self._q_data
def remove_format(self, mime_type):
""" Remove the data entry for the given mime type.
Parameters
----------
mime_type : unicode
The mime type of interest.
"""
del self._q_data[mime_type]
def data(self, mime_type):
""" Get the data for the specified mime type.
Parameters
----------
mime_type : unicode
The mime type of interest.
Returns
-------
result : str
The data for the specified mime type.
"""
return self._q_data[mime_type]
def set_data(self, mime_type, data):
""" Set the data for the specified mime type.
Parameters
----------
mime_type : unicode
The mime type of interest.
data : str
The serialized data for the given type.
"""
self._q_data[mime_type] = data | en | 0.391176 | #------------------------------------------------------------------------------ # Copyright (c) 2016, frmdstryr. # Copyright (c) 2014, Nucleic Development Team. # # Distributed under the terms of the MIT License. # # The full license is in the file LICENSE, distributed with this software. #------------------------------------------------------------------------------ A Qt implementation of an Enaml MimeData object. Initialize a QtMimeData object. Parameters ---------- data : QMimeData, optional The mime data to wrap. If not provided, one will be created. Get the internal QMimeData object. This method is for toolkit backend use only. Returns ------- result : QMimeData The Qt specific mime data object. Get a list of the supported mime type formats. Returns ------- result : list A list of mime types supported by the data. Test whether the data supports the given mime type. Parameters ---------- mime_type : unicode The mime type of interest. Returns ------- result : bool True if there is data for the given type, False otherwise. Remove the data entry for the given mime type. Parameters ---------- mime_type : unicode The mime type of interest. Get the data for the specified mime type. Parameters ---------- mime_type : unicode The mime type of interest. Returns ------- result : str The data for the specified mime type. Set the data for the specified mime type. Parameters ---------- mime_type : unicode The mime type of interest. data : str The serialized data for the given type. | 2.140404 | 2 |
Medium/127.py | Hellofafar/Leetcode | 6 | 6632860 | # ------------------------------
# 127. Word Ladder
#
# Description:
# Given two words (beginWord and endWord), and a dictionary's word list, find the length of shortest transformation sequence from beginWord to endWord, such that:
# Only one letter can be changed at a time.
# Each transformed word must exist in the word list. Note that beginWord is not a transformed word.
# Note:
# Return 0 if there is no such transformation sequence.
# All words have the same length.
# All words contain only lowercase alphabetic characters.
# You may assume no duplicates in the word list.
# You may assume beginWord and endWord are non-empty and are not the same.
#
# Example 1:
# Input:
# beginWord = "hit",
# endWord = "cog",
# wordList = ["hot","dot","dog","lot","log","cog"]
# Output: 5
# Explanation: As one shortest transformation is "hit" -> "hot" -> "dot" -> "dog" -> "cog",
# return its length 5.
#
# Example 2:
# Input:
# beginWord = "hit"
# endWord = "cog"
# wordList = ["hot","dot","dog","lot","log"]
# Output: 0
# Explanation: The endWord "cog" is not in wordList, therefore no possible transformation.
#
# Version: 1.0
# 08/15/18 by Jianfa
# ------------------------------
class Solution(object):
def ladderLength(self, beginWord, endWord, wordList):
"""
:type beginWord: str
:type endWord: str
:type wordList: List[str]
:rtype: int
"""
if endWord not in wordList:
return 0
wordList = set(wordList) # Before make it a set, I met time exceed error.
wordDist = 1
toVisit = [beginWord]
while toVisit:
for _ in range(len(toVisit)):
word = toVisit.pop(0)
if word == endWord:
return wordDist
for i in range(len(word)):
for j in range(26):
temp = word[:i] + chr(97+j) + word[i+1:]
if temp in wordList:
toVisit.append(temp)
wordList.remove(temp)
wordDist += 1
return 0
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Key point is BFS solution.
# Follow idea from https://leetcode.com/problems/word-ladder/discuss/40729/Compact-Python-solution
# An important finding is: to use remove() function, set is 100x faster than list | # ------------------------------
# 127. Word Ladder
#
# Description:
# Given two words (beginWord and endWord), and a dictionary's word list, find the length of shortest transformation sequence from beginWord to endWord, such that:
# Only one letter can be changed at a time.
# Each transformed word must exist in the word list. Note that beginWord is not a transformed word.
# Note:
# Return 0 if there is no such transformation sequence.
# All words have the same length.
# All words contain only lowercase alphabetic characters.
# You may assume no duplicates in the word list.
# You may assume beginWord and endWord are non-empty and are not the same.
#
# Example 1:
# Input:
# beginWord = "hit",
# endWord = "cog",
# wordList = ["hot","dot","dog","lot","log","cog"]
# Output: 5
# Explanation: As one shortest transformation is "hit" -> "hot" -> "dot" -> "dog" -> "cog",
# return its length 5.
#
# Example 2:
# Input:
# beginWord = "hit"
# endWord = "cog"
# wordList = ["hot","dot","dog","lot","log"]
# Output: 0
# Explanation: The endWord "cog" is not in wordList, therefore no possible transformation.
#
# Version: 1.0
# 08/15/18 by Jianfa
# ------------------------------
class Solution(object):
def ladderLength(self, beginWord, endWord, wordList):
"""
:type beginWord: str
:type endWord: str
:type wordList: List[str]
:rtype: int
"""
if endWord not in wordList:
return 0
wordList = set(wordList) # Before make it a set, I met time exceed error.
wordDist = 1
toVisit = [beginWord]
while toVisit:
for _ in range(len(toVisit)):
word = toVisit.pop(0)
if word == endWord:
return wordDist
for i in range(len(word)):
for j in range(26):
temp = word[:i] + chr(97+j) + word[i+1:]
if temp in wordList:
toVisit.append(temp)
wordList.remove(temp)
wordDist += 1
return 0
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Key point is BFS solution.
# Follow idea from https://leetcode.com/problems/word-ladder/discuss/40729/Compact-Python-solution
# An important finding is: to use remove() function, set is 100x faster than list | en | 0.785841 | # ------------------------------ # 127. Word Ladder # # Description: # Given two words (beginWord and endWord), and a dictionary's word list, find the length of shortest transformation sequence from beginWord to endWord, such that: # Only one letter can be changed at a time. # Each transformed word must exist in the word list. Note that beginWord is not a transformed word. # Note: # Return 0 if there is no such transformation sequence. # All words have the same length. # All words contain only lowercase alphabetic characters. # You may assume no duplicates in the word list. # You may assume beginWord and endWord are non-empty and are not the same. # # Example 1: # Input: # beginWord = "hit", # endWord = "cog", # wordList = ["hot","dot","dog","lot","log","cog"] # Output: 5 # Explanation: As one shortest transformation is "hit" -> "hot" -> "dot" -> "dog" -> "cog", # return its length 5. # # Example 2: # Input: # beginWord = "hit" # endWord = "cog" # wordList = ["hot","dot","dog","lot","log"] # Output: 0 # Explanation: The endWord "cog" is not in wordList, therefore no possible transformation. # # Version: 1.0 # 08/15/18 by Jianfa # ------------------------------ :type beginWord: str :type endWord: str :type wordList: List[str] :rtype: int # Before make it a set, I met time exceed error. # Used for testing # ------------------------------ # Summary: # Key point is BFS solution. # Follow idea from https://leetcode.com/problems/word-ladder/discuss/40729/Compact-Python-solution # An important finding is: to use remove() function, set is 100x faster than list | 3.925987 | 4 |
pyepisodate/__init__.py | guiszk/pyepisodate | 0 | 6632861 | from .pyepisodate import pyepisodate
| from .pyepisodate import pyepisodate
| none | 1 | 1.034458 | 1 |
|
ocr/mlhelpers.py | manhcuogntin4/handwritting-ocr | 0 | 6632862 | <reponame>manhcuogntin4/handwritting-ocr<filename>ocr/mlhelpers.py
# -*- coding: utf-8 -*-
"""
Classes for controling machine learning processes
"""
import numpy as np
import math
import matplotlib.pyplot as plt
import csv
class TrainingPlot:
"""
Creating live plot during training
REUIRES notebook backend: %matplotlib notebook
@TODO Migrate to Tensorboard
"""
trainLoss = []
trainAcc = []
validAcc = []
testInterval = 0
lossInterval = 0
interval = 0
ax1 = None
ax2 = None
fig = None
def __init__(self, steps, testItr, lossItr):
self.testInterval = testItr
self.lossInterval = lossItr
self.interval = steps
self.fig, self.ax1 = plt.subplots()
self.ax2 = self.ax1.twinx()
self.ax1.set_autoscaley_on(True)
plt.ion()
self.updatePlot()
# Description
self.ax1.set_xlabel('iteration')
self.ax1.set_ylabel('train loss')
self.ax2.set_ylabel('test accuracy')
# Axes limits
self.ax1.set_ylim([0,10])
def updatePlot(self):
self.fig.canvas.draw()
def updateCost(self, lossTrain, index):
self.trainLoss.append(lossTrain)
if len(self.trainLoss) == 1:
self.ax1.set_ylim([0, min(10, math.ceil(lossTrain))])
self.ax1.plot(self.lossInterval * np.arange(len(self.trainLoss)),
self.trainLoss, 'b', linewidth=1.0)
self.updatePlot()
def updateAcc(self, accVal, accTrain, index):
self.validAcc.append(accVal)
self.trainAcc.append(accTrain)
self.ax2.plot(self.testInterval * np.arange(len(self.validAcc)),
self.validAcc, 'r', linewidth=1.0)
self.ax2.plot(self.testInterval * np.arange(len(self.trainAcc)),
self.trainAcc, 'g',linewidth=1.0)
self.ax2.set_title('Valid. Accuracy: {:.4f}'.format(self.validAcc[-1]))
self.updatePlot()
class DataSet:
""" Class for training data and feeding train function """
images = None
labels = None
length = 0
index = 0
def __init__(self, img, lbl):
""" Crate the dataset """
self.images = img
self.labels = lbl
self.length = len(img)
self.index = 0
def next_batch(self, batchSize):
"""Return the next batch from the data set."""
start = self.index
self.index += batchSize
if self.index > self.length:
# Shuffle the data
perm = np.arange(self.length)
np.random.shuffle(perm)
self.images = self.images[perm]
self.labels = self.labels[perm]
# Start next epoch
start = 0
self.index = batchSize
end = self.index
return self.images[start:end], self.labels[start:end]
| # -*- coding: utf-8 -*-
"""
Classes for controling machine learning processes
"""
import numpy as np
import math
import matplotlib.pyplot as plt
import csv
class TrainingPlot:
"""
Creating live plot during training
REUIRES notebook backend: %matplotlib notebook
@TODO Migrate to Tensorboard
"""
trainLoss = []
trainAcc = []
validAcc = []
testInterval = 0
lossInterval = 0
interval = 0
ax1 = None
ax2 = None
fig = None
def __init__(self, steps, testItr, lossItr):
self.testInterval = testItr
self.lossInterval = lossItr
self.interval = steps
self.fig, self.ax1 = plt.subplots()
self.ax2 = self.ax1.twinx()
self.ax1.set_autoscaley_on(True)
plt.ion()
self.updatePlot()
# Description
self.ax1.set_xlabel('iteration')
self.ax1.set_ylabel('train loss')
self.ax2.set_ylabel('test accuracy')
# Axes limits
self.ax1.set_ylim([0,10])
def updatePlot(self):
self.fig.canvas.draw()
def updateCost(self, lossTrain, index):
self.trainLoss.append(lossTrain)
if len(self.trainLoss) == 1:
self.ax1.set_ylim([0, min(10, math.ceil(lossTrain))])
self.ax1.plot(self.lossInterval * np.arange(len(self.trainLoss)),
self.trainLoss, 'b', linewidth=1.0)
self.updatePlot()
def updateAcc(self, accVal, accTrain, index):
self.validAcc.append(accVal)
self.trainAcc.append(accTrain)
self.ax2.plot(self.testInterval * np.arange(len(self.validAcc)),
self.validAcc, 'r', linewidth=1.0)
self.ax2.plot(self.testInterval * np.arange(len(self.trainAcc)),
self.trainAcc, 'g',linewidth=1.0)
self.ax2.set_title('Valid. Accuracy: {:.4f}'.format(self.validAcc[-1]))
self.updatePlot()
class DataSet:
""" Class for training data and feeding train function """
images = None
labels = None
length = 0
index = 0
def __init__(self, img, lbl):
""" Crate the dataset """
self.images = img
self.labels = lbl
self.length = len(img)
self.index = 0
def next_batch(self, batchSize):
"""Return the next batch from the data set."""
start = self.index
self.index += batchSize
if self.index > self.length:
# Shuffle the data
perm = np.arange(self.length)
np.random.shuffle(perm)
self.images = self.images[perm]
self.labels = self.labels[perm]
# Start next epoch
start = 0
self.index = batchSize
end = self.index
return self.images[start:end], self.labels[start:end] | en | 0.773097 | # -*- coding: utf-8 -*- Classes for controling machine learning processes Creating live plot during training REUIRES notebook backend: %matplotlib notebook @TODO Migrate to Tensorboard # Description # Axes limits Class for training data and feeding train function Crate the dataset Return the next batch from the data set. # Shuffle the data # Start next epoch | 3.014441 | 3 |
webhelpers2/tests/test_date.py | dairiki/WebHelpers2 | 0 | 6632863 | <reponame>dairiki/WebHelpers2<filename>webhelpers2/tests/test_date.py
import calendar
from datetime import datetime as DT
import time
from pytest import fixture, raises
from webhelpers2.date import distance_of_time_in_words as dtw
from webhelpers2.date import time_ago_in_words
from webhelpers2.date import _is_leap_year
@fixture(params=range(1970, 2100, 5))
def run_in_various_years(request, monkeypatch):
""" Monkeypatch time.time() to midnight UTC on Jan 1 of various years.
The assortment of years includes both leap years and non-leap years.
"""
year = request.param
t = calendar.timegm((year, 1, 1, 0, 0, 0))
monkeypatch.setattr(time, 'time', lambda: t)
class TestDistanceOfTimeInWords(object):
from_time = DT(2000, 1, 1, 0, 0, 0, 0, None) # Midnight, Jan 1 2000 UTC
to_time = DT(1980, 5, 6, 12, 32, 40, 0, None) # 12:32pm, May 5, 1980
def test_integer_seconds(self):
# Test that if integers are supplied they are interpreted as seconds from now
assert dtw(1) == "1 second"
def test_now_to_1_year(self, run_in_various_years):
# The following two tests test the span from "now" to "a year from
# now". Depending on when the test is run, the interval may include a
# leap year. The 'try' assumes it's not a leap year, the 'except'
# tries it again as a leap year.
try:
assert dtw(0, 31536000) == "1 year"
assert dtw(1, 31536001) == "1 year"
except AssertionError: # If the intervening year contains February 29th
assert dtw(0, 31536000) == "11 months and 30 days"
assert dtw(1, 31536001) == "11 months and 30 days"
def test_invalid_granularity(self):
# Granularity is invalid
raises(Exception, dtw, 0, 1, "blah")
def test_february_nonleap(self):
# February 2nd 2007 to March 1 2007 is 27 days because February is shorter month in the year
assert dtw(DT(2007,2,2), DT(2007, 3, 1)) == "27 days"
def test_february_leap(self):
# February 2nd 2008 to March 1 2008 is 28 days because it's a leap year
assert dtw(DT(2008,2,2), DT(2008, 3, 1)) == "28 days"
def test_symmetry(self):
# Should get the same values going forward and backward
from_time = self.from_time
to_time = self.to_time
assert dtw(from_time, to_time) == dtw(to_time, from_time)
assert dtw(from_time, to_time, "month") == dtw(to_time, from_time, "month")
assert dtw(from_time, to_time, "year") == dtw(to_time, from_time, "year")
def test_granularity(self):
# Ensure we get the correct granularity when the times are the same
from_time = self.from_time
assert dtw(from_time, from_time, "second") == "0 seconds"
assert dtw(from_time, from_time, "day") == "0 days"
assert dtw(from_time, from_time, "century") == "0 centuries"
def test_smaller_than_granularity(self):
# We are not over the finest granularity
assert dtw(1, granularity="hour") == "less than 1 hour"
assert dtw(86399, granularity="day") == "less than 1 day"
def test_grandularity_round_up(self):
# Round up if the granularity after the supplied granularity is high enough
assert dtw(86399, round=True, granularity="day") == "1 day"
assert dtw(290, round=True, granularity="minute") == "5 minutes"
assert dtw(86689, round=True, granularity="minute") == "1 day and 5 minutes"
def test_grandularity_round_second_value(self):
# Rounding at the second value makes no difference
from_time = self.from_time
to_time = self.to_time
assert dtw(to_time, from_time, granularity="second") == \
dtw(to_time, from_time, granularity="second", round=True)
def test_round_smaller_than_granularity(self):
assert dtw(1, granularity="hour", round=True) == "less than 1 hour"
def test_plural(self):
# Pluralization
assert dtw(DT(200, 1,1), DT(300, 1, 1)) == "1 century"
assert dtw(DT(200, 1,1), DT(500, 1, 1)) == "3 centuries"
def test_plural2(self):
assert dtw(DT(2000, 1,1), DT(2000, 2, 1)) == "1 month"
assert dtw(DT(2000, 1,1), DT(2000, 5, 1)) == "4 months"
class TestSpotChecks(object):
def test_spot_checks(self):
# Spot checks
from_time = DT(200, 2, 2, 0, 12, 12, 0, None) # 12 minutes, 12 seconds past midnight on Feb 2 200AD
to_time = DT(1981, 3, 31, 7, 31, 0, 0, None) # 7:31 am, March 31, 1981
assert dtw(from_time, to_time) == "17 centuries, 8 decades, 1 year, 1 month, 29 days, 7 hours, 18 minutes and 48 seconds"
assert dtw(from_time, to_time, granularity = "hour") == "17 centuries, 8 decades, 1 year, 1 month, 29 days and 7 hours"
assert dtw(from_time, to_time, granularity = "hour", round=True) == "17 centuries, 8 decades, 1 year, 1 month, 29 days and 7 hours"
assert dtw(from_time, to_time, granularity = "month") == "17 centuries, 8 decades, 1 year and 1 month"
assert dtw(from_time, to_time, granularity = "month", round=True) == "17 centuries, 8 decades, 1 year and 2 months"
from_time = DT(200, 2, 2, 12, 30, 30, 0, None) # 12:30:30, Feb 2, 200AD
to_time = DT(220, 3, 2, 12, 30, 31, 0, None) # 12:30:31, Mar 2, 220AD
assert dtw(from_time, to_time) == "2 decades, 1 month and 1 second"
class TestFormerDocTests(object):
start = DT(2008,3,21, 16,34)
end = DT(2008,2,6, 9,45)
def test1(self):
assert dtw(86399, round=True, granularity="day") == "1 day"
def test2(self):
assert dtw(86399, granularity='day') == "less than 1 day"
def test3(self):
assert dtw(86399) == "23 hours, 59 minutes and 59 seconds"
def test4(self):
b = "1 month, 15 days, 6 hours and 49 minutes"
assert dtw(self.start, self.end) == b
def test5(self):
b = "less than 1 decade"
assert dtw(self.start, self.end, granularity="decade") == b
def test6(self):
b = "1 month, 15 days, 6 hours and 49 minutes"
assert dtw(self.start, self.end, granularity="second") == b
class TestLeapYears(object):
def test_is_leap_year_1900(self):
assert not _is_leap_year(1900)
def test_is_leap_year_2000(self):
assert _is_leap_year(2000)
def test_is_leap_year_2011(self):
assert not _is_leap_year(2011)
def test_is_leap_year_2012(self):
assert _is_leap_year(2012)
def test_is_leap_year_2100(self):
assert not _is_leap_year(2100)
def test_time_ago_in_words():
assert time_ago_in_words(-18*3600, granularity="day", round=True) == '1 day'
| import calendar
from datetime import datetime as DT
import time
from pytest import fixture, raises
from webhelpers2.date import distance_of_time_in_words as dtw
from webhelpers2.date import time_ago_in_words
from webhelpers2.date import _is_leap_year
@fixture(params=range(1970, 2100, 5))
def run_in_various_years(request, monkeypatch):
""" Monkeypatch time.time() to midnight UTC on Jan 1 of various years.
The assortment of years includes both leap years and non-leap years.
"""
year = request.param
t = calendar.timegm((year, 1, 1, 0, 0, 0))
monkeypatch.setattr(time, 'time', lambda: t)
class TestDistanceOfTimeInWords(object):
from_time = DT(2000, 1, 1, 0, 0, 0, 0, None) # Midnight, Jan 1 2000 UTC
to_time = DT(1980, 5, 6, 12, 32, 40, 0, None) # 12:32pm, May 5, 1980
def test_integer_seconds(self):
# Test that if integers are supplied they are interpreted as seconds from now
assert dtw(1) == "1 second"
def test_now_to_1_year(self, run_in_various_years):
# The following two tests test the span from "now" to "a year from
# now". Depending on when the test is run, the interval may include a
# leap year. The 'try' assumes it's not a leap year, the 'except'
# tries it again as a leap year.
try:
assert dtw(0, 31536000) == "1 year"
assert dtw(1, 31536001) == "1 year"
except AssertionError: # If the intervening year contains February 29th
assert dtw(0, 31536000) == "11 months and 30 days"
assert dtw(1, 31536001) == "11 months and 30 days"
def test_invalid_granularity(self):
# Granularity is invalid
raises(Exception, dtw, 0, 1, "blah")
def test_february_nonleap(self):
# February 2nd 2007 to March 1 2007 is 27 days because February is shorter month in the year
assert dtw(DT(2007,2,2), DT(2007, 3, 1)) == "27 days"
def test_february_leap(self):
# February 2nd 2008 to March 1 2008 is 28 days because it's a leap year
assert dtw(DT(2008,2,2), DT(2008, 3, 1)) == "28 days"
def test_symmetry(self):
# Should get the same values going forward and backward
from_time = self.from_time
to_time = self.to_time
assert dtw(from_time, to_time) == dtw(to_time, from_time)
assert dtw(from_time, to_time, "month") == dtw(to_time, from_time, "month")
assert dtw(from_time, to_time, "year") == dtw(to_time, from_time, "year")
def test_granularity(self):
# Ensure we get the correct granularity when the times are the same
from_time = self.from_time
assert dtw(from_time, from_time, "second") == "0 seconds"
assert dtw(from_time, from_time, "day") == "0 days"
assert dtw(from_time, from_time, "century") == "0 centuries"
def test_smaller_than_granularity(self):
# We are not over the finest granularity
assert dtw(1, granularity="hour") == "less than 1 hour"
assert dtw(86399, granularity="day") == "less than 1 day"
def test_grandularity_round_up(self):
# Round up if the granularity after the supplied granularity is high enough
assert dtw(86399, round=True, granularity="day") == "1 day"
assert dtw(290, round=True, granularity="minute") == "5 minutes"
assert dtw(86689, round=True, granularity="minute") == "1 day and 5 minutes"
def test_grandularity_round_second_value(self):
# Rounding at the second value makes no difference
from_time = self.from_time
to_time = self.to_time
assert dtw(to_time, from_time, granularity="second") == \
dtw(to_time, from_time, granularity="second", round=True)
def test_round_smaller_than_granularity(self):
assert dtw(1, granularity="hour", round=True) == "less than 1 hour"
def test_plural(self):
# Pluralization
assert dtw(DT(200, 1,1), DT(300, 1, 1)) == "1 century"
assert dtw(DT(200, 1,1), DT(500, 1, 1)) == "3 centuries"
def test_plural2(self):
assert dtw(DT(2000, 1,1), DT(2000, 2, 1)) == "1 month"
assert dtw(DT(2000, 1,1), DT(2000, 5, 1)) == "4 months"
class TestSpotChecks(object):
def test_spot_checks(self):
# Spot checks
from_time = DT(200, 2, 2, 0, 12, 12, 0, None) # 12 minutes, 12 seconds past midnight on Feb 2 200AD
to_time = DT(1981, 3, 31, 7, 31, 0, 0, None) # 7:31 am, March 31, 1981
assert dtw(from_time, to_time) == "17 centuries, 8 decades, 1 year, 1 month, 29 days, 7 hours, 18 minutes and 48 seconds"
assert dtw(from_time, to_time, granularity = "hour") == "17 centuries, 8 decades, 1 year, 1 month, 29 days and 7 hours"
assert dtw(from_time, to_time, granularity = "hour", round=True) == "17 centuries, 8 decades, 1 year, 1 month, 29 days and 7 hours"
assert dtw(from_time, to_time, granularity = "month") == "17 centuries, 8 decades, 1 year and 1 month"
assert dtw(from_time, to_time, granularity = "month", round=True) == "17 centuries, 8 decades, 1 year and 2 months"
from_time = DT(200, 2, 2, 12, 30, 30, 0, None) # 12:30:30, Feb 2, 200AD
to_time = DT(220, 3, 2, 12, 30, 31, 0, None) # 12:30:31, Mar 2, 220AD
assert dtw(from_time, to_time) == "2 decades, 1 month and 1 second"
class TestFormerDocTests(object):
start = DT(2008,3,21, 16,34)
end = DT(2008,2,6, 9,45)
def test1(self):
assert dtw(86399, round=True, granularity="day") == "1 day"
def test2(self):
assert dtw(86399, granularity='day') == "less than 1 day"
def test3(self):
assert dtw(86399) == "23 hours, 59 minutes and 59 seconds"
def test4(self):
b = "1 month, 15 days, 6 hours and 49 minutes"
assert dtw(self.start, self.end) == b
def test5(self):
b = "less than 1 decade"
assert dtw(self.start, self.end, granularity="decade") == b
def test6(self):
b = "1 month, 15 days, 6 hours and 49 minutes"
assert dtw(self.start, self.end, granularity="second") == b
class TestLeapYears(object):
def test_is_leap_year_1900(self):
assert not _is_leap_year(1900)
def test_is_leap_year_2000(self):
assert _is_leap_year(2000)
def test_is_leap_year_2011(self):
assert not _is_leap_year(2011)
def test_is_leap_year_2012(self):
assert _is_leap_year(2012)
def test_is_leap_year_2100(self):
assert not _is_leap_year(2100)
def test_time_ago_in_words():
assert time_ago_in_words(-18*3600, granularity="day", round=True) == '1 day' | en | 0.940471 | Monkeypatch time.time() to midnight UTC on Jan 1 of various years. The assortment of years includes both leap years and non-leap years. # Midnight, Jan 1 2000 UTC # 12:32pm, May 5, 1980 # Test that if integers are supplied they are interpreted as seconds from now # The following two tests test the span from "now" to "a year from # now". Depending on when the test is run, the interval may include a # leap year. The 'try' assumes it's not a leap year, the 'except' # tries it again as a leap year. # If the intervening year contains February 29th # Granularity is invalid # February 2nd 2007 to March 1 2007 is 27 days because February is shorter month in the year # February 2nd 2008 to March 1 2008 is 28 days because it's a leap year # Should get the same values going forward and backward # Ensure we get the correct granularity when the times are the same # We are not over the finest granularity # Round up if the granularity after the supplied granularity is high enough # Rounding at the second value makes no difference # Pluralization # Spot checks # 12 minutes, 12 seconds past midnight on Feb 2 200AD # 7:31 am, March 31, 1981 # 12:30:30, Feb 2, 200AD # 12:30:31, Mar 2, 220AD | 3.149146 | 3 |
autobet/util.py | Arter3r/gta-autobet | 3 | 6632864 | from functools import lru_cache
from autobet.constants import GAME_EXECUTABLE, ASPECT_RATIO, START_SCREEN_TOP_LEFT_PIXEL_RGB, RESULTS_SCREEN_TOP_LEFT_PIXEL_RGB, PLACE_BET_SCREEN_TOP_LEFT_PIXEL_RGB
import pyautogui
import autobet.constants
def log(msg):
# TODO proper logging
print(msg)
@lru_cache(maxsize=1)
def get_screen_size():
return pyautogui.size()
def check_aspect_ratio(width=None, height=None):
if width is None or height is None:
width, height = pyautogui.size()
return abs(width/height - ASPECT_RATIO) < 0.1
def at_start_screen(x, y):
return pyautogui.pixelMatchesColor(int(x), int(y), START_SCREEN_TOP_LEFT_PIXEL_RGB, tolerance=5)
def at_place_bet_screen(x, y):
return pyautogui.pixelMatchesColor(int(x), int(y), PLACE_BET_SCREEN_TOP_LEFT_PIXEL_RGB, tolerance=5)
def at_results_screen(x, y):
return pyautogui.pixelMatchesColor(int(x), int(y), RESULTS_SCREEN_TOP_LEFT_PIXEL_RGB, tolerance=5)
| from functools import lru_cache
from autobet.constants import GAME_EXECUTABLE, ASPECT_RATIO, START_SCREEN_TOP_LEFT_PIXEL_RGB, RESULTS_SCREEN_TOP_LEFT_PIXEL_RGB, PLACE_BET_SCREEN_TOP_LEFT_PIXEL_RGB
import pyautogui
import autobet.constants
def log(msg):
# TODO proper logging
print(msg)
@lru_cache(maxsize=1)
def get_screen_size():
return pyautogui.size()
def check_aspect_ratio(width=None, height=None):
if width is None or height is None:
width, height = pyautogui.size()
return abs(width/height - ASPECT_RATIO) < 0.1
def at_start_screen(x, y):
return pyautogui.pixelMatchesColor(int(x), int(y), START_SCREEN_TOP_LEFT_PIXEL_RGB, tolerance=5)
def at_place_bet_screen(x, y):
return pyautogui.pixelMatchesColor(int(x), int(y), PLACE_BET_SCREEN_TOP_LEFT_PIXEL_RGB, tolerance=5)
def at_results_screen(x, y):
return pyautogui.pixelMatchesColor(int(x), int(y), RESULTS_SCREEN_TOP_LEFT_PIXEL_RGB, tolerance=5)
| en | 0.410794 | # TODO proper logging | 2.318921 | 2 |
benchmark_tensorflow.py | ryoma-jp/009_Benchmark | 0 | 6632865 | <reponame>ryoma-jp/009_Benchmark<filename>benchmark_tensorflow.py
#! -*- coding: utf-8 -*-
"""
[tensorflow]
python benchmark_tensorflow.py --help
python benchmark_tensorflow.py --param_csv benchmark.csv
"""
#---------------------------------
# モジュールのインポート
#---------------------------------
import os
import sys
import time
import tqdm
import argparse
from argparse import RawTextHelpFormatter
import numpy as np
import pandas as pd
from common import GetParams, DataLoader
import cv2
import tensorflow as tf
#---------------------------------
# 定数定義
#---------------------------------
#---------------------------------
# 関数
#---------------------------------
"""
関数名: ArgParser
説明:引数を解析して値を取得する
"""
def ArgParser():
parser = argparse.ArgumentParser(description='TensorFlowによるベンチマークスコアの計測', formatter_class=RawTextHelpFormatter)
# --- 引数を追加 ---
parser.add_argument('--param_csv', dest='param_csv', type=str, required=True, help='ベンチマーク条件を記載したパラメータファイル\n'
'[Format] type, model_dir, data_dir\n'
' type: classification, ...[T.B.D]\n'
' model_dir: 学習済みモデルが格納されたディレクトリ\n'
' model_name: モデルファイル群のファイル名\n'
' data_dir: テストデータが格納されたディレクトリを指定')
args = parser.parse_args()
return args
#---------------------------------
# メイン処理
#---------------------------------
if __name__ == '__main__':
# --- 引数処理 ---
args = ArgParser()
# --- パラメータ取得 ---
type, model_dir, model_name, data_dir = GetParams(args.param_csv)
f_log = open('log.csv', 'w')
f_log.write('iter,elapsed_time[sec],inference_time[sec/100iter]\n')
for _type, _model_dir, _model_name, _data_dir in zip(type, model_dir, model_name, data_dir):
# --- DataLoader生成 ---
data_loader = DataLoader(_data_dir)
print(data_loader.GetData())
data = data_loader.GetData()
# --- モデルロード ---
gd = tf.compat.v1.GraphDef.FromString(open(os.path.join(_model_dir, _model_name+'_frozen.pb'), 'rb').read())
inp, predictions = tf.compat.v1.import_graph_def(gd, return_elements = ['input:0', 'MobilenetV2/Predictions/Reshape_1:0'])
# --- 推論 ---
img = None
prediction_header = ['prediction', 'label_id', 'filename']
start_time = time.time()
for cnt, (label_id, filename) in enumerate(zip(data['label_id'], data['filename'])):
if (img is None):
img = np.array([cv2.imread(os.path.join(_data_dir, label_id, filename))]) / 128 - 1
else:
try:
img = np.vstack((img, np.array([cv2.imread(os.path.join(_data_dir, label_id, filename)) / 128 - 1])))
except:
print(_data_dir)
print(label_id)
print(filename)
quit()
if ((cnt+1) % 10 == 0):
print(str(time.time()-start_time) + ' : ' + str(cnt+1) + ' of ' + str(len(data)))
with tf.compat.v1.Session(graph=inp.graph):
pre_inference = time.time()
prediction_val = predictions.eval(feed_dict={inp: img})
after_inference = time.time()
if ((cnt+1) == 10):
pd.DataFrame(np.vstack((prediction_val.argmax(axis=1), data['label_id'].values[cnt+1-10:cnt+1], data['filename'].values[cnt+1-10:cnt+1])).T).to_csv('predictions.csv', \
header=prediction_header, index=False)
else:
pd.DataFrame(np.vstack((prediction_val.argmax(axis=1), data['label_id'].values[cnt+1-10:cnt+1], data['filename'].values[cnt+1-10:cnt+1])).T).to_csv('predictions.csv', \
mode='a', \
header=False, index=False)
img = None
f_log.write(str(cnt+1)+','+str(time.time()-start_time)+','+str(after_inference-pre_inference)+'\n')
if ((cnt+1) % 10 > 0):
with tf.compat.v1.Session(graph=inp.graph):
pre_inference = time.time()
prediction_val = predictions.eval(feed_dict={inp: img})
after_inference = time.time()
pd.DataFrame(np.vstack((prediction_val.argmax(axis=1), data['label_id'].values[cnt+1-((cnt+1)%10):], data['filename'].values[cnt+1-((cnt+1)%10):])).T).to_csv('predictions.csv', \
mode='a', \
header=False, index=False)
img = None
f_log.write(str(cnt+1)+','+str(time.time()-start_time)+','+str(after_inference-pre_inference)+'\n')
f_log.close()
| #! -*- coding: utf-8 -*-
"""
[tensorflow]
python benchmark_tensorflow.py --help
python benchmark_tensorflow.py --param_csv benchmark.csv
"""
#---------------------------------
# モジュールのインポート
#---------------------------------
import os
import sys
import time
import tqdm
import argparse
from argparse import RawTextHelpFormatter
import numpy as np
import pandas as pd
from common import GetParams, DataLoader
import cv2
import tensorflow as tf
#---------------------------------
# 定数定義
#---------------------------------
#---------------------------------
# 関数
#---------------------------------
"""
関数名: ArgParser
説明:引数を解析して値を取得する
"""
def ArgParser():
parser = argparse.ArgumentParser(description='TensorFlowによるベンチマークスコアの計測', formatter_class=RawTextHelpFormatter)
# --- 引数を追加 ---
parser.add_argument('--param_csv', dest='param_csv', type=str, required=True, help='ベンチマーク条件を記載したパラメータファイル\n'
'[Format] type, model_dir, data_dir\n'
' type: classification, ...[T.B.D]\n'
' model_dir: 学習済みモデルが格納されたディレクトリ\n'
' model_name: モデルファイル群のファイル名\n'
' data_dir: テストデータが格納されたディレクトリを指定')
args = parser.parse_args()
return args
#---------------------------------
# メイン処理
#---------------------------------
if __name__ == '__main__':
# --- 引数処理 ---
args = ArgParser()
# --- パラメータ取得 ---
type, model_dir, model_name, data_dir = GetParams(args.param_csv)
f_log = open('log.csv', 'w')
f_log.write('iter,elapsed_time[sec],inference_time[sec/100iter]\n')
for _type, _model_dir, _model_name, _data_dir in zip(type, model_dir, model_name, data_dir):
# --- DataLoader生成 ---
data_loader = DataLoader(_data_dir)
print(data_loader.GetData())
data = data_loader.GetData()
# --- モデルロード ---
gd = tf.compat.v1.GraphDef.FromString(open(os.path.join(_model_dir, _model_name+'_frozen.pb'), 'rb').read())
inp, predictions = tf.compat.v1.import_graph_def(gd, return_elements = ['input:0', 'MobilenetV2/Predictions/Reshape_1:0'])
# --- 推論 ---
img = None
prediction_header = ['prediction', 'label_id', 'filename']
start_time = time.time()
for cnt, (label_id, filename) in enumerate(zip(data['label_id'], data['filename'])):
if (img is None):
img = np.array([cv2.imread(os.path.join(_data_dir, label_id, filename))]) / 128 - 1
else:
try:
img = np.vstack((img, np.array([cv2.imread(os.path.join(_data_dir, label_id, filename)) / 128 - 1])))
except:
print(_data_dir)
print(label_id)
print(filename)
quit()
if ((cnt+1) % 10 == 0):
print(str(time.time()-start_time) + ' : ' + str(cnt+1) + ' of ' + str(len(data)))
with tf.compat.v1.Session(graph=inp.graph):
pre_inference = time.time()
prediction_val = predictions.eval(feed_dict={inp: img})
after_inference = time.time()
if ((cnt+1) == 10):
pd.DataFrame(np.vstack((prediction_val.argmax(axis=1), data['label_id'].values[cnt+1-10:cnt+1], data['filename'].values[cnt+1-10:cnt+1])).T).to_csv('predictions.csv', \
header=prediction_header, index=False)
else:
pd.DataFrame(np.vstack((prediction_val.argmax(axis=1), data['label_id'].values[cnt+1-10:cnt+1], data['filename'].values[cnt+1-10:cnt+1])).T).to_csv('predictions.csv', \
mode='a', \
header=False, index=False)
img = None
f_log.write(str(cnt+1)+','+str(time.time()-start_time)+','+str(after_inference-pre_inference)+'\n')
if ((cnt+1) % 10 > 0):
with tf.compat.v1.Session(graph=inp.graph):
pre_inference = time.time()
prediction_val = predictions.eval(feed_dict={inp: img})
after_inference = time.time()
pd.DataFrame(np.vstack((prediction_val.argmax(axis=1), data['label_id'].values[cnt+1-((cnt+1)%10):], data['filename'].values[cnt+1-((cnt+1)%10):])).T).to_csv('predictions.csv', \
mode='a', \
header=False, index=False)
img = None
f_log.write(str(cnt+1)+','+str(time.time()-start_time)+','+str(after_inference-pre_inference)+'\n')
f_log.close() | ja | 0.527137 | #! -*- coding: utf-8 -*- [tensorflow] python benchmark_tensorflow.py --help python benchmark_tensorflow.py --param_csv benchmark.csv #--------------------------------- # モジュールのインポート #--------------------------------- #--------------------------------- # 定数定義 #--------------------------------- #--------------------------------- # 関数 #--------------------------------- 関数名: ArgParser 説明:引数を解析して値を取得する # --- 引数を追加 --- #--------------------------------- # メイン処理 #--------------------------------- # --- 引数処理 --- # --- パラメータ取得 --- # --- DataLoader生成 --- # --- モデルロード --- # --- 推論 --- | 2.50884 | 3 |
pandas_market_calendars/exchange_calendar_six.py | rakesh1988/pandas_market_calendars | 1 | 6632866 | from datetime import time
from pandas.tseries.holiday import (AbstractHolidayCalendar, Day, Easter, EasterMonday, GoodFriday, Holiday,
previous_friday)
from pytz import timezone
from .market_calendar import (FRIDAY, MONDAY, MarketCalendar, THURSDAY, TUESDAY, WEDNESDAY)
# New Year's Eve
NewYearsEve = Holiday(
"New Year's Eve",
month=12,
day=31,
observance=previous_friday,
)
# New Year's Day
NewYearsDay = Holiday(
"New Year's Day",
month=1,
day=1,
days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY),
)
# Berthold's Day
BertholdsDay = Holiday(
"Berthold's Day",
month=1,
day=2,
days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY),
)
# Early May bank holiday
MayBank = Holiday(
"Early May Bank Holiday",
month=5,
day=1,
days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY),
)
# Ascension Day (Auffahrt)
AscensionDay = Holiday(
'Ascension Day',
month=1,
day=1,
offset=[Easter(), Day(39)],
days_of_week=(THURSDAY,),
)
# Pentecost Day (Pfingstmontag)
PentecostMonday = Holiday(
'Pentecost Monday',
month=1,
day=1,
offset=[Easter(), Day(50)],
days_of_week=(MONDAY,),
)
# Swiss National Day
SwissNationalDay = Holiday(
"Swiss National Day",
month=8,
day=1,
days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY),
)
# Christmas Eve
ChristmasEve = Holiday(
'Christmas Eve',
month=12,
day=24,
)
# Christmas
Christmas = Holiday(
"Christmas",
month=12,
day=25,
days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY),
)
# Boxing day
BoxingDay = Holiday(
"Boxing Day",
month=12,
day=26,
)
class SIXExchangeCalendar(MarketCalendar):
"""
Exchange calendar for SIX
"""
aliases = ['SIX']
@property
def name(self):
return "SIX"
@property
def tz(self):
return timezone('Europe/Zurich')
@property
def open_time_default(self):
return time(9, 0, tzinfo=self.tz)
@property
def close_time_default(self):
return time(17, 30, tzinfo=self.tz)
@property
def regular_holidays(self):
return AbstractHolidayCalendar(rules=[
NewYearsDay,
BertholdsDay,
GoodFriday,
EasterMonday,
MayBank,
AscensionDay,
PentecostMonday,
SwissNationalDay,
ChristmasEve,
Christmas,
BoxingDay,
NewYearsEve,
])
| from datetime import time
from pandas.tseries.holiday import (AbstractHolidayCalendar, Day, Easter, EasterMonday, GoodFriday, Holiday,
previous_friday)
from pytz import timezone
from .market_calendar import (FRIDAY, MONDAY, MarketCalendar, THURSDAY, TUESDAY, WEDNESDAY)
# New Year's Eve
NewYearsEve = Holiday(
"New Year's Eve",
month=12,
day=31,
observance=previous_friday,
)
# New Year's Day
NewYearsDay = Holiday(
"New Year's Day",
month=1,
day=1,
days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY),
)
# Berthold's Day
BertholdsDay = Holiday(
"Berthold's Day",
month=1,
day=2,
days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY),
)
# Early May bank holiday
MayBank = Holiday(
"Early May Bank Holiday",
month=5,
day=1,
days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY),
)
# Ascension Day (Auffahrt)
AscensionDay = Holiday(
'Ascension Day',
month=1,
day=1,
offset=[Easter(), Day(39)],
days_of_week=(THURSDAY,),
)
# Pentecost Day (Pfingstmontag)
PentecostMonday = Holiday(
'Pentecost Monday',
month=1,
day=1,
offset=[Easter(), Day(50)],
days_of_week=(MONDAY,),
)
# Swiss National Day
SwissNationalDay = Holiday(
"Swiss National Day",
month=8,
day=1,
days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY),
)
# Christmas Eve
ChristmasEve = Holiday(
'Christmas Eve',
month=12,
day=24,
)
# Christmas
Christmas = Holiday(
"Christmas",
month=12,
day=25,
days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY),
)
# Boxing day
BoxingDay = Holiday(
"Boxing Day",
month=12,
day=26,
)
class SIXExchangeCalendar(MarketCalendar):
"""
Exchange calendar for SIX
"""
aliases = ['SIX']
@property
def name(self):
return "SIX"
@property
def tz(self):
return timezone('Europe/Zurich')
@property
def open_time_default(self):
return time(9, 0, tzinfo=self.tz)
@property
def close_time_default(self):
return time(17, 30, tzinfo=self.tz)
@property
def regular_holidays(self):
return AbstractHolidayCalendar(rules=[
NewYearsDay,
BertholdsDay,
GoodFriday,
EasterMonday,
MayBank,
AscensionDay,
PentecostMonday,
SwissNationalDay,
ChristmasEve,
Christmas,
BoxingDay,
NewYearsEve,
])
| en | 0.683648 | # New Year's Eve # New Year's Day # Berthold's Day # Early May bank holiday # Ascension Day (Auffahrt) # Pentecost Day (Pfingstmontag) # Swiss National Day # Christmas Eve # Christmas # Boxing day Exchange calendar for SIX | 2.933609 | 3 |
src/features/onset_features.py | L-Hess/HexClassifier | 0 | 6632867 | import numpy as np
class GetFeatures:
def __init__(self, input_filepaths, output_filepath):
self.input_data_0 = np.genfromtxt(input_filepaths[0], delimiter=',', skip_header=False)
self.input_data_1 = np.genfromtxt(input_filepaths[1], delimiter=',', skip_header=False)
self.log_path = input_filepaths[2]
self.vid_0_path = input_filepaths[3]
self.vid_1_path = input_filepaths[4]
self.output_filepath = output_filepath
self.mouse_presence = []
self.ground_truths = []
self.nps = []
self.time_at_gl = []
self.log_gl_15 = []
def ground_truth(self):
vid_t = (3600 * int(self.vid_0_path[63:65]) + 60 * int(self.vid_0_path[66:68]) + int(self.vid_0_path[69:71])) * 15
act = []
act_line = ["Trial ++++++++ active ++++++++"]
inact = []
inact_line = ["Trial ------- inactive -------"]
with open(self.log_path) as f:
f = f.readlines()
for line in f:
for phrase in act_line:
if phrase in line:
act.append(line)
for phrase in inact_line:
if phrase in line:
inact.append(line)
log_onsets, log_offsets = [], []
for i in range(len(act)):
on_t = (3600 * int(act[i][11:13]) + 60 * int(act[i][14:16]) + int(act[i][17:19])) * 15 - vid_t
off_t = (3600 * int(inact[i][11:13]) + 60 * int(inact[i][14:16]) + int(inact[i][17:19])) * 15 - vid_t
on_tf = np.argwhere(self.input_data_0 == on_t)
if on_t in self.input_data_0:
on_tf = on_tf[0][0]
else:
on_tf = np.nan
log_onsets.append(on_tf)
off_tf = np.argwhere(self.input_data_1 == off_t)
if off_t in self.input_data_1:
off_tf = off_tf[0][0]
else:
off_tf = np.nan
log_offsets.append(off_tf)
log = 0
k=0
for i in range(len(self.input_data_0)):
# Keep track if trial is active by log files
if i < len(self.input_data_0)-7:
if i+10 in log_onsets:
log = 1
k = 20
if k > 0:
log = 1
k -= 1
else:
log = 0
self.ground_truths.append(log)
def mouse_in(self):
for i in range(len(self.input_data_0)):
if not np.isnan(self.input_data_0[i, 4]) or not np.isnan(self.input_data_1[i, 4]):
mouse_in = 1
else:
mouse_in = 0
self.mouse_presence.append(mouse_in)
def search_onsets(self):
self.diff = np.diff(self.mouse_presence)
self.diff = np.append(self.diff, 0)
def build_features_log(self):
output_data = open(self.output_filepath, 'w')
output_data.write('Frame number, ground truth, mouse presence, diff\n')
for i in range(len(self.input_data_0)):
output_data.write('{}, {}, {}, {}\n'.format(i, self.ground_truths[i], self.mouse_presence[i], self.diff[i]))
output_data.close()
if __name__ == '__main__':
path_0 = r"C:/Users/Gebruiker/Documents/HexClassifier/data/raw/pos_log_file_lin_0.csv"
path_1 = r"C:/Users/Gebruiker/Documents/HexClassifier/data/raw/pos_log_file_lin_1.csv"
path_log = r"C:/Users/Gebruiker/Documents/HexClassifier/data/raw/2019-05-07_14-47-59_hextrack_log"
vid_0_path = r'C:/Users/Gebruiker/Documents/HexClassifier/data/raw/2019-05-07_14-53-54_cam_0.avi'
vid_1_path = r'C:/Users/Gebruiker/Documents/HexClassifier/data/raw/2019-05-07_14-53-54_cam_1.avi'
input_paths = [path_0, path_1, path_log, vid_0_path, vid_1_path]
output_path = r"C:/Users/Gebruiker/Documents/HexClassifier/data/processed/features.csv"
G = GetFeatures(input_paths, output_path)
G.ground_truth()
G.mouse_in()
G.search_onsets()
G.build_features_log()
| import numpy as np
class GetFeatures:
def __init__(self, input_filepaths, output_filepath):
self.input_data_0 = np.genfromtxt(input_filepaths[0], delimiter=',', skip_header=False)
self.input_data_1 = np.genfromtxt(input_filepaths[1], delimiter=',', skip_header=False)
self.log_path = input_filepaths[2]
self.vid_0_path = input_filepaths[3]
self.vid_1_path = input_filepaths[4]
self.output_filepath = output_filepath
self.mouse_presence = []
self.ground_truths = []
self.nps = []
self.time_at_gl = []
self.log_gl_15 = []
def ground_truth(self):
vid_t = (3600 * int(self.vid_0_path[63:65]) + 60 * int(self.vid_0_path[66:68]) + int(self.vid_0_path[69:71])) * 15
act = []
act_line = ["Trial ++++++++ active ++++++++"]
inact = []
inact_line = ["Trial ------- inactive -------"]
with open(self.log_path) as f:
f = f.readlines()
for line in f:
for phrase in act_line:
if phrase in line:
act.append(line)
for phrase in inact_line:
if phrase in line:
inact.append(line)
log_onsets, log_offsets = [], []
for i in range(len(act)):
on_t = (3600 * int(act[i][11:13]) + 60 * int(act[i][14:16]) + int(act[i][17:19])) * 15 - vid_t
off_t = (3600 * int(inact[i][11:13]) + 60 * int(inact[i][14:16]) + int(inact[i][17:19])) * 15 - vid_t
on_tf = np.argwhere(self.input_data_0 == on_t)
if on_t in self.input_data_0:
on_tf = on_tf[0][0]
else:
on_tf = np.nan
log_onsets.append(on_tf)
off_tf = np.argwhere(self.input_data_1 == off_t)
if off_t in self.input_data_1:
off_tf = off_tf[0][0]
else:
off_tf = np.nan
log_offsets.append(off_tf)
log = 0
k=0
for i in range(len(self.input_data_0)):
# Keep track if trial is active by log files
if i < len(self.input_data_0)-7:
if i+10 in log_onsets:
log = 1
k = 20
if k > 0:
log = 1
k -= 1
else:
log = 0
self.ground_truths.append(log)
def mouse_in(self):
for i in range(len(self.input_data_0)):
if not np.isnan(self.input_data_0[i, 4]) or not np.isnan(self.input_data_1[i, 4]):
mouse_in = 1
else:
mouse_in = 0
self.mouse_presence.append(mouse_in)
def search_onsets(self):
self.diff = np.diff(self.mouse_presence)
self.diff = np.append(self.diff, 0)
def build_features_log(self):
output_data = open(self.output_filepath, 'w')
output_data.write('Frame number, ground truth, mouse presence, diff\n')
for i in range(len(self.input_data_0)):
output_data.write('{}, {}, {}, {}\n'.format(i, self.ground_truths[i], self.mouse_presence[i], self.diff[i]))
output_data.close()
if __name__ == '__main__':
path_0 = r"C:/Users/Gebruiker/Documents/HexClassifier/data/raw/pos_log_file_lin_0.csv"
path_1 = r"C:/Users/Gebruiker/Documents/HexClassifier/data/raw/pos_log_file_lin_1.csv"
path_log = r"C:/Users/Gebruiker/Documents/HexClassifier/data/raw/2019-05-07_14-47-59_hextrack_log"
vid_0_path = r'C:/Users/Gebruiker/Documents/HexClassifier/data/raw/2019-05-07_14-53-54_cam_0.avi'
vid_1_path = r'C:/Users/Gebruiker/Documents/HexClassifier/data/raw/2019-05-07_14-53-54_cam_1.avi'
input_paths = [path_0, path_1, path_log, vid_0_path, vid_1_path]
output_path = r"C:/Users/Gebruiker/Documents/HexClassifier/data/processed/features.csv"
G = GetFeatures(input_paths, output_path)
G.ground_truth()
G.mouse_in()
G.search_onsets()
G.build_features_log()
| en | 0.954334 | # Keep track if trial is active by log files | 2.418387 | 2 |
gym/envs/classic_control/path_finding_data_collect.py | DragonMyth/MyDartEnv | 0 | 6632868 | <reponame>DragonMyth/MyDartEnv
"""
Classic cart-pole system implemented by <NAME> et al.
Copied from http://incompleteideas.net/sutton/book/code/pole.c
permalink: https://perma.cc/C9ZM-652R
"""
import logging
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
logger = logging.getLogger(__name__)
class PathFindingDataCollect(gym.Env):
def __init__(self):
self.grid_map = np.zeros((21, 21), dtype=int)
self.grid_map[0, :] = 1
self.grid_map[-1, :] = 1
self.grid_map[:, 0] = 1
self.grid_map[:, -1] = 1
self.grid_map[4, 4:-7] = 1
self.grid_map[4:-7, 4] = 1
self.grid_map[-5, 7:-4] = 1
self.grid_map[7:-4, -5] = 1
self.grid_map[8:13, 8:13] = 1
# This is the goal grid
goal_i, goal_j = 11, 18
self.grid_map[goal_i, goal_j] = 2
self.grid_size = 0.25
self.grid_vis_size = 30
self.point_mass = 10
self.point_pos = -2 * np.ones(2)
self.point_vel = -np.zeros(2)
self.point_acc_force = -np.zeros(2)
self.goal_pos = self.grid_idx_to_pos(goal_i, goal_j)
self.dt = 0.002
self.frameskip = 1
self.action_scale = 100
self.obs_dim = 6
self.action_space = spaces.Box(np.array([-1.0, -1.0]), np.array([1.0, 1.0]))
obs_high = np.inf * np.ones(self.obs_dim)
obs_low = -obs_high
self.observation_space = spaces.Box(obs_low, obs_high)
self._seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
self.metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': int(np.round(1.0 / self.dt)) / self.frameskip
}
# self.point_mass_trans = None
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action):
pos_before = self.point_pos
old_to_goal_dist = (pos_before[0] - self.goal_pos[0]) ** 2 + (pos_before[1] - self.goal_pos[1]) ** 2
tau = action * self.action_scale
self.do_simulation(tau, self.frameskip)
obs = self._get_obs()
pos_after = self.point_pos
curr_to_goal_dist = (pos_after[0] - self.goal_pos[0]) ** 2 + (pos_after[1] - self.goal_pos[1]) ** 2
#
alive_penalty = -5
progress_reward = 80 * (old_to_goal_dist - curr_to_goal_dist)
distance_to_goal_reward = min(70, 10 / curr_to_goal_dist)
i, j = self.pos_to_grid_idx(pos_after)
close_to_wall_penalty = 0
for neighbor_c in range(i - 2, i + 3, 1):
for neighbor_r in range(j - 2, j + 3, 1):
if neighbor_c > 0 and neighbor_c < len(self.grid_map) and neighbor_r > 0 and neighbor_r < len(
self.grid_map[0]):
grid_val = self.grid_map[neighbor_c, neighbor_r]
if grid_val == 1:
wall_pos = self.grid_idx_to_pos(neighbor_c, neighbor_r)
dist_sq = (pos_after[0] - wall_pos[0]) ** 2 + (pos_after[1] - wall_pos[1]) ** 2
if (close_to_wall_penalty < 1.0 / dist_sq):
close_to_wall_penalty = 1 / dist_sq
close_to_wall_penalty = min(10, close_to_wall_penalty)
reward = alive_penalty + progress_reward + distance_to_goal_reward - close_to_wall_penalty
done = False
if self.grid_map[i, j] == 1:
done = True
reward -= 2000
elif self.grid_map[i, j] == 2:
done = True
reward += 10000
return obs, reward, done, {'Alive penalty': alive_penalty,
'Progress Reward': progress_reward,
'Distance to Goal Reward': distance_to_goal_reward, 'tau': tau,
'Total Reward': reward,
'Close to Wall Penalty'
: -close_to_wall_penalty}
def _get_obs(self):
return np.concatenate([[self.point_pos[0], self.point_pos[1]], [self.point_vel[0], self.point_vel[1]],
[self.goal_pos[0] - self.point_pos[0],
self.goal_pos[1] - self.point_pos[1]]]).ravel()
def do_simulation(self, tau, frameskip):
wall_hit = 0
for _ in range(frameskip):
self.point_acc_force = tau
i_before, j_before = self.pos_to_grid_idx(self.point_pos)
next_pos = self.point_pos + self.dt * self.point_vel
i_after, j_after = self.pos_to_grid_idx(next_pos)
# print(i_after, j_after)
if self.grid_map[i_after, j_after] == 1:
change_dir = np.array([abs(j_after - j_before), abs(i_after - i_before)])
vel = np.zeros(len(self.point_vel))
if (change_dir[0] == 1):
self.point_vel[0] *= -.2
if (change_dir[1] == 1):
self.point_vel[1] *= -.2
wall_hit = 1
self.point_pos = self.point_pos + self.dt * self.point_vel
self.point_vel = self.point_vel + self.dt * (self.point_acc_force / self.point_mass)
return wall_hit
def _reset(self):
self.point_pos = -2 * np.ones(2) + self.np_random.uniform(low=-0.01, high=0.01, size=(2))
self.point_vel = -np.zeros(2) + self.np_random.uniform(low=-0.01, high=0.01, size=(2))
self.point_acc_force = -np.zeros(2)
return self._get_obs()
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
screen_width = 800
screen_height = 800
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
for i in range(len(self.grid_map)):
for j in range(len(self.grid_map[0])):
l = screen_width / 2 + (
j - int((len(
self.grid_map)) / 2)) * self.grid_vis_size - self.grid_vis_size / 2
r = screen_width / 2 + (
j - int((len(
self.grid_map)) / 2)) * self.grid_vis_size + self.grid_vis_size / 2
t = screen_height / 2 + (
int((len(
self.grid_map)) / 2) - i) * self.grid_vis_size + self.grid_vis_size / 2
b = screen_height / 2 + (
int((len(
self.grid_map)) / 2) - i) * self.grid_vis_size - self.grid_vis_size / 2
cell = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
cell.add_attr(rendering.Transform())
if (self.grid_map[i, j] == 1):
cell.set_color(0, 0, 0)
elif (self.grid_map[i, j] == 2):
cell.set_color(1, 0, 0)
else:
cell.set_color(1, 1, 1)
self.viewer.add_geom(cell)
right_edge = rendering.Line((l, b), (l, t))
bottom_edge = rendering.Line((l, b), (r, b))
right_edge.set_color(0.5, 0, 0)
bottom_edge.set_color(0.5, 0, 0)
self.viewer.add_geom(right_edge)
self.viewer.add_geom(bottom_edge)
#
q = self.point_pos
q_x = screen_width / 2
q_y = screen_height / 2
point_mass = rendering.FilledPolygon(
[(q_x - 5, q_y - 5), (q_x - 5, q_y + 5), (q_x + 5, q_y + 5), (q_x + 5, q_y - 5)])
point_mass.set_color(0, 0, 1)
self.point_mass_trans = rendering.Transform()
point_mass.add_attr(self.point_mass_trans)
self.viewer.add_geom(point_mass)
q = self.point_pos
q_x = (q[0] / self.grid_size * self.grid_vis_size)
q_y = (q[1] / self.grid_size * self.grid_vis_size)
# print(q)
# idx,idy = self.pos_to_grid_idx(q)
# print(idx,idy)
# posx,posy = self.grid_idx_to_pos(idx,idy)
# print(posx,posy)
self.point_mass_trans.set_translation(q_x, q_y)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def pos_to_grid_idx(self, pos):
normalized_pos = (np.round((pos) / (self.grid_size))).astype(int)
x_idx = normalized_pos[0] + int(len(self.grid_map) / 2)
y_idx = -normalized_pos[1] + int(len(self.grid_map) / 2)
return y_idx, x_idx
def grid_idx_to_pos(self, i, j):
x = (j - int((len(self.grid_map)) / 2)) * self.grid_size
y = (int((len(self.grid_map)) / 2) - i) * self.grid_size
return np.array([x, y])
| """
Classic cart-pole system implemented by <NAME> et al.
Copied from http://incompleteideas.net/sutton/book/code/pole.c
permalink: https://perma.cc/C9ZM-652R
"""
import logging
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
logger = logging.getLogger(__name__)
class PathFindingDataCollect(gym.Env):
def __init__(self):
self.grid_map = np.zeros((21, 21), dtype=int)
self.grid_map[0, :] = 1
self.grid_map[-1, :] = 1
self.grid_map[:, 0] = 1
self.grid_map[:, -1] = 1
self.grid_map[4, 4:-7] = 1
self.grid_map[4:-7, 4] = 1
self.grid_map[-5, 7:-4] = 1
self.grid_map[7:-4, -5] = 1
self.grid_map[8:13, 8:13] = 1
# This is the goal grid
goal_i, goal_j = 11, 18
self.grid_map[goal_i, goal_j] = 2
self.grid_size = 0.25
self.grid_vis_size = 30
self.point_mass = 10
self.point_pos = -2 * np.ones(2)
self.point_vel = -np.zeros(2)
self.point_acc_force = -np.zeros(2)
self.goal_pos = self.grid_idx_to_pos(goal_i, goal_j)
self.dt = 0.002
self.frameskip = 1
self.action_scale = 100
self.obs_dim = 6
self.action_space = spaces.Box(np.array([-1.0, -1.0]), np.array([1.0, 1.0]))
obs_high = np.inf * np.ones(self.obs_dim)
obs_low = -obs_high
self.observation_space = spaces.Box(obs_low, obs_high)
self._seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
self.metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': int(np.round(1.0 / self.dt)) / self.frameskip
}
# self.point_mass_trans = None
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action):
pos_before = self.point_pos
old_to_goal_dist = (pos_before[0] - self.goal_pos[0]) ** 2 + (pos_before[1] - self.goal_pos[1]) ** 2
tau = action * self.action_scale
self.do_simulation(tau, self.frameskip)
obs = self._get_obs()
pos_after = self.point_pos
curr_to_goal_dist = (pos_after[0] - self.goal_pos[0]) ** 2 + (pos_after[1] - self.goal_pos[1]) ** 2
#
alive_penalty = -5
progress_reward = 80 * (old_to_goal_dist - curr_to_goal_dist)
distance_to_goal_reward = min(70, 10 / curr_to_goal_dist)
i, j = self.pos_to_grid_idx(pos_after)
close_to_wall_penalty = 0
for neighbor_c in range(i - 2, i + 3, 1):
for neighbor_r in range(j - 2, j + 3, 1):
if neighbor_c > 0 and neighbor_c < len(self.grid_map) and neighbor_r > 0 and neighbor_r < len(
self.grid_map[0]):
grid_val = self.grid_map[neighbor_c, neighbor_r]
if grid_val == 1:
wall_pos = self.grid_idx_to_pos(neighbor_c, neighbor_r)
dist_sq = (pos_after[0] - wall_pos[0]) ** 2 + (pos_after[1] - wall_pos[1]) ** 2
if (close_to_wall_penalty < 1.0 / dist_sq):
close_to_wall_penalty = 1 / dist_sq
close_to_wall_penalty = min(10, close_to_wall_penalty)
reward = alive_penalty + progress_reward + distance_to_goal_reward - close_to_wall_penalty
done = False
if self.grid_map[i, j] == 1:
done = True
reward -= 2000
elif self.grid_map[i, j] == 2:
done = True
reward += 10000
return obs, reward, done, {'Alive penalty': alive_penalty,
'Progress Reward': progress_reward,
'Distance to Goal Reward': distance_to_goal_reward, 'tau': tau,
'Total Reward': reward,
'Close to Wall Penalty'
: -close_to_wall_penalty}
def _get_obs(self):
return np.concatenate([[self.point_pos[0], self.point_pos[1]], [self.point_vel[0], self.point_vel[1]],
[self.goal_pos[0] - self.point_pos[0],
self.goal_pos[1] - self.point_pos[1]]]).ravel()
def do_simulation(self, tau, frameskip):
wall_hit = 0
for _ in range(frameskip):
self.point_acc_force = tau
i_before, j_before = self.pos_to_grid_idx(self.point_pos)
next_pos = self.point_pos + self.dt * self.point_vel
i_after, j_after = self.pos_to_grid_idx(next_pos)
# print(i_after, j_after)
if self.grid_map[i_after, j_after] == 1:
change_dir = np.array([abs(j_after - j_before), abs(i_after - i_before)])
vel = np.zeros(len(self.point_vel))
if (change_dir[0] == 1):
self.point_vel[0] *= -.2
if (change_dir[1] == 1):
self.point_vel[1] *= -.2
wall_hit = 1
self.point_pos = self.point_pos + self.dt * self.point_vel
self.point_vel = self.point_vel + self.dt * (self.point_acc_force / self.point_mass)
return wall_hit
def _reset(self):
self.point_pos = -2 * np.ones(2) + self.np_random.uniform(low=-0.01, high=0.01, size=(2))
self.point_vel = -np.zeros(2) + self.np_random.uniform(low=-0.01, high=0.01, size=(2))
self.point_acc_force = -np.zeros(2)
return self._get_obs()
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
screen_width = 800
screen_height = 800
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
for i in range(len(self.grid_map)):
for j in range(len(self.grid_map[0])):
l = screen_width / 2 + (
j - int((len(
self.grid_map)) / 2)) * self.grid_vis_size - self.grid_vis_size / 2
r = screen_width / 2 + (
j - int((len(
self.grid_map)) / 2)) * self.grid_vis_size + self.grid_vis_size / 2
t = screen_height / 2 + (
int((len(
self.grid_map)) / 2) - i) * self.grid_vis_size + self.grid_vis_size / 2
b = screen_height / 2 + (
int((len(
self.grid_map)) / 2) - i) * self.grid_vis_size - self.grid_vis_size / 2
cell = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
cell.add_attr(rendering.Transform())
if (self.grid_map[i, j] == 1):
cell.set_color(0, 0, 0)
elif (self.grid_map[i, j] == 2):
cell.set_color(1, 0, 0)
else:
cell.set_color(1, 1, 1)
self.viewer.add_geom(cell)
right_edge = rendering.Line((l, b), (l, t))
bottom_edge = rendering.Line((l, b), (r, b))
right_edge.set_color(0.5, 0, 0)
bottom_edge.set_color(0.5, 0, 0)
self.viewer.add_geom(right_edge)
self.viewer.add_geom(bottom_edge)
#
q = self.point_pos
q_x = screen_width / 2
q_y = screen_height / 2
point_mass = rendering.FilledPolygon(
[(q_x - 5, q_y - 5), (q_x - 5, q_y + 5), (q_x + 5, q_y + 5), (q_x + 5, q_y - 5)])
point_mass.set_color(0, 0, 1)
self.point_mass_trans = rendering.Transform()
point_mass.add_attr(self.point_mass_trans)
self.viewer.add_geom(point_mass)
q = self.point_pos
q_x = (q[0] / self.grid_size * self.grid_vis_size)
q_y = (q[1] / self.grid_size * self.grid_vis_size)
# print(q)
# idx,idy = self.pos_to_grid_idx(q)
# print(idx,idy)
# posx,posy = self.grid_idx_to_pos(idx,idy)
# print(posx,posy)
self.point_mass_trans.set_translation(q_x, q_y)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def pos_to_grid_idx(self, pos):
normalized_pos = (np.round((pos) / (self.grid_size))).astype(int)
x_idx = normalized_pos[0] + int(len(self.grid_map) / 2)
y_idx = -normalized_pos[1] + int(len(self.grid_map) / 2)
return y_idx, x_idx
def grid_idx_to_pos(self, i, j):
x = (j - int((len(self.grid_map)) / 2)) * self.grid_size
y = (int((len(self.grid_map)) / 2) - i) * self.grid_size
return np.array([x, y]) | en | 0.622427 | Classic cart-pole system implemented by <NAME> et al. Copied from http://incompleteideas.net/sutton/book/code/pole.c permalink: https://perma.cc/C9ZM-652R # This is the goal grid # self.point_mass_trans = None # # print(i_after, j_after) # # print(q) # idx,idy = self.pos_to_grid_idx(q) # print(idx,idy) # posx,posy = self.grid_idx_to_pos(idx,idy) # print(posx,posy) | 2.577783 | 3 |
tensorflow/python/platform/benchmark.py | JianGoForIt/tensorflow | 0 | 6632869 | <filename>tensorflow/python/platform/benchmark.py
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to run benchmarks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import numbers
import os
import re
import sys
import six
from google.protobuf import text_format
from tensorflow.core.util import test_log_pb2
from tensorflow.python.platform import app
from tensorflow.python.platform import gfile
# When a subclass of the Benchmark class is created, it is added to
# the registry automatically
GLOBAL_BENCHMARK_REGISTRY = set()
# Environment variable that determines whether benchmarks are written.
# See also tensorflow/core/util/reporter.h TestReporter::kTestReporterEnv.
TEST_REPORTER_TEST_ENV = "TEST_REPORT_FILE_PREFIX"
def _global_report_benchmark(
name, iters=None, cpu_time=None, wall_time=None,
throughput=None, extras=None):
"""Method for recording a benchmark directly.
Args:
name: The BenchmarkEntry name.
iters: (optional) How many iterations were run
cpu_time: (optional) Total cpu time in seconds
wall_time: (optional) Total wall time in seconds
throughput: (optional) Throughput (in MB/s)
extras: (optional) Dict mapping string keys to additional benchmark info.
Raises:
TypeError: if extras is not a dict.
IOError: if the benchmark output file already exists.
"""
if extras is not None:
if not isinstance(extras, dict):
raise TypeError("extras must be a dict")
test_env = os.environ.get(TEST_REPORTER_TEST_ENV, None)
if test_env is None:
# Reporting was not requested
return
entry = test_log_pb2.BenchmarkEntry()
entry.name = name
if iters is not None:
entry.iters = iters
if cpu_time is not None:
entry.cpu_time = cpu_time
if wall_time is not None:
entry.wall_time = wall_time
if throughput is not None:
entry.throughput = throughput
if extras is not None:
for (k, v) in extras.items():
if isinstance(v, numbers.Number):
entry.extras[k].double_value = v
else:
entry.extras[k].string_value = str(v)
serialized_entry = text_format.MessageToString(entry)
mangled_name = name.replace("/", "__")
output_path = "%s%s" % (test_env, mangled_name)
if gfile.Exists(output_path):
raise IOError("File already exists: %s" % output_path)
with gfile.GFile(output_path, "w") as out:
out.write(serialized_entry)
class _BenchmarkRegistrar(type):
"""The Benchmark class registrar. Used by abstract Benchmark class."""
def __new__(mcs, clsname, base, attrs):
newclass = super(mcs, _BenchmarkRegistrar).__new__(
mcs, clsname, base, attrs)
if len(newclass.mro()) > 2:
# Only the base Benchmark abstract class has mro length 2.
# The rest subclass from it and are therefore registered.
GLOBAL_BENCHMARK_REGISTRY.add(newclass)
return newclass
class Benchmark(six.with_metaclass(_BenchmarkRegistrar, object)):
"""Abstract class that provides helper functions for running benchmarks.
Any class subclassing this one is immediately registered in the global
benchmark registry.
Only methods whose names start with the word "benchmark" will be run during
benchmarking.
"""
def _get_name(self, overwrite_name):
"""Returns full name of class and method calling report_benchmark."""
# Expect that the caller called report_benchmark, which called _get_name.
caller = inspect.stack()[2]
calling_class = caller[0].f_locals.get("self", None)
# Use the method name, or overwrite_name is provided.
name = overwrite_name if overwrite_name is not None else caller[3]
if calling_class is not None:
# Prefix the name with the class name.
class_name = type(calling_class).__name__
name = "%s.%s" % (class_name, name)
return name
def report_benchmark(
self,
iters=None,
cpu_time=None,
wall_time=None,
throughput=None,
extras=None,
name=None):
"""Report a benchmark.
Args:
iters: (optional) How many iterations were run
cpu_time: (optional) Total cpu time in seconds
wall_time: (optional) Total wall time in seconds
throughput: (optional) Throughput (in MB/s)
extras: (optional) Dict mapping string keys to additional benchmark info.
name: (optional) Override the BenchmarkEntry name with `name`.
Otherwise it is inferred from the calling class and top-level
method name.
"""
name = self._get_name(overwrite_name=name)
_global_report_benchmark(
name=name, iters=iters, cpu_time=cpu_time, wall_time=wall_time,
throughput=throughput, extras=extras)
def _run_specific_benchmark(benchmark_class):
benchmark = benchmark_class()
attrs = dir(benchmark)
# Only run methods of this class whose names start with "benchmark"
for attr in attrs:
if not attr.startswith("benchmark"):
continue
benchmark_fn = getattr(benchmark, attr)
if not callable(benchmark_fn):
continue
# Call this benchmark method
benchmark_fn()
def _run_benchmarks(regex):
"""Run benchmarks that match regex `regex`.
This function goes through the global benchmark registry, and matches
benchmark **classe names** of the form "module.name.BenchmarkClass" to
the given regex. If a class matches, all of its benchmark methods
are run.
Args:
regex: The string regular expression to match Benchmark classes against.
"""
registry = list(GLOBAL_BENCHMARK_REGISTRY)
# Match benchmarks in registry against regex
for benchmark in registry:
benchmark_name = "%s.%s" % (benchmark.__module__, benchmark.__name__)
if re.search(regex, benchmark_name):
# Found a match
_run_specific_benchmark(benchmark)
def benchmarks_main(true_main=None):
"""Run benchmarks as declared in args.
Args:
true_main: True main function to run if benchmarks are not requested.
"""
argv = sys.argv
found_arg = [arg for arg in argv
if arg.startswith("--benchmarks=")
or arg.startswith("-benchmarks=")]
if found_arg:
# Remove --benchmarks arg from sys.argv
argv.remove(found_arg[0])
regex = found_arg[0].split("=")[1]
app.run(lambda _: _run_benchmarks(regex))
else:
true_main()
| <filename>tensorflow/python/platform/benchmark.py
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to run benchmarks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import numbers
import os
import re
import sys
import six
from google.protobuf import text_format
from tensorflow.core.util import test_log_pb2
from tensorflow.python.platform import app
from tensorflow.python.platform import gfile
# When a subclass of the Benchmark class is created, it is added to
# the registry automatically
GLOBAL_BENCHMARK_REGISTRY = set()
# Environment variable that determines whether benchmarks are written.
# See also tensorflow/core/util/reporter.h TestReporter::kTestReporterEnv.
TEST_REPORTER_TEST_ENV = "TEST_REPORT_FILE_PREFIX"
def _global_report_benchmark(
name, iters=None, cpu_time=None, wall_time=None,
throughput=None, extras=None):
"""Method for recording a benchmark directly.
Args:
name: The BenchmarkEntry name.
iters: (optional) How many iterations were run
cpu_time: (optional) Total cpu time in seconds
wall_time: (optional) Total wall time in seconds
throughput: (optional) Throughput (in MB/s)
extras: (optional) Dict mapping string keys to additional benchmark info.
Raises:
TypeError: if extras is not a dict.
IOError: if the benchmark output file already exists.
"""
if extras is not None:
if not isinstance(extras, dict):
raise TypeError("extras must be a dict")
test_env = os.environ.get(TEST_REPORTER_TEST_ENV, None)
if test_env is None:
# Reporting was not requested
return
entry = test_log_pb2.BenchmarkEntry()
entry.name = name
if iters is not None:
entry.iters = iters
if cpu_time is not None:
entry.cpu_time = cpu_time
if wall_time is not None:
entry.wall_time = wall_time
if throughput is not None:
entry.throughput = throughput
if extras is not None:
for (k, v) in extras.items():
if isinstance(v, numbers.Number):
entry.extras[k].double_value = v
else:
entry.extras[k].string_value = str(v)
serialized_entry = text_format.MessageToString(entry)
mangled_name = name.replace("/", "__")
output_path = "%s%s" % (test_env, mangled_name)
if gfile.Exists(output_path):
raise IOError("File already exists: %s" % output_path)
with gfile.GFile(output_path, "w") as out:
out.write(serialized_entry)
class _BenchmarkRegistrar(type):
"""The Benchmark class registrar. Used by abstract Benchmark class."""
def __new__(mcs, clsname, base, attrs):
newclass = super(mcs, _BenchmarkRegistrar).__new__(
mcs, clsname, base, attrs)
if len(newclass.mro()) > 2:
# Only the base Benchmark abstract class has mro length 2.
# The rest subclass from it and are therefore registered.
GLOBAL_BENCHMARK_REGISTRY.add(newclass)
return newclass
class Benchmark(six.with_metaclass(_BenchmarkRegistrar, object)):
"""Abstract class that provides helper functions for running benchmarks.
Any class subclassing this one is immediately registered in the global
benchmark registry.
Only methods whose names start with the word "benchmark" will be run during
benchmarking.
"""
def _get_name(self, overwrite_name):
"""Returns full name of class and method calling report_benchmark."""
# Expect that the caller called report_benchmark, which called _get_name.
caller = inspect.stack()[2]
calling_class = caller[0].f_locals.get("self", None)
# Use the method name, or overwrite_name is provided.
name = overwrite_name if overwrite_name is not None else caller[3]
if calling_class is not None:
# Prefix the name with the class name.
class_name = type(calling_class).__name__
name = "%s.%s" % (class_name, name)
return name
def report_benchmark(
self,
iters=None,
cpu_time=None,
wall_time=None,
throughput=None,
extras=None,
name=None):
"""Report a benchmark.
Args:
iters: (optional) How many iterations were run
cpu_time: (optional) Total cpu time in seconds
wall_time: (optional) Total wall time in seconds
throughput: (optional) Throughput (in MB/s)
extras: (optional) Dict mapping string keys to additional benchmark info.
name: (optional) Override the BenchmarkEntry name with `name`.
Otherwise it is inferred from the calling class and top-level
method name.
"""
name = self._get_name(overwrite_name=name)
_global_report_benchmark(
name=name, iters=iters, cpu_time=cpu_time, wall_time=wall_time,
throughput=throughput, extras=extras)
def _run_specific_benchmark(benchmark_class):
benchmark = benchmark_class()
attrs = dir(benchmark)
# Only run methods of this class whose names start with "benchmark"
for attr in attrs:
if not attr.startswith("benchmark"):
continue
benchmark_fn = getattr(benchmark, attr)
if not callable(benchmark_fn):
continue
# Call this benchmark method
benchmark_fn()
def _run_benchmarks(regex):
"""Run benchmarks that match regex `regex`.
This function goes through the global benchmark registry, and matches
benchmark **classe names** of the form "module.name.BenchmarkClass" to
the given regex. If a class matches, all of its benchmark methods
are run.
Args:
regex: The string regular expression to match Benchmark classes against.
"""
registry = list(GLOBAL_BENCHMARK_REGISTRY)
# Match benchmarks in registry against regex
for benchmark in registry:
benchmark_name = "%s.%s" % (benchmark.__module__, benchmark.__name__)
if re.search(regex, benchmark_name):
# Found a match
_run_specific_benchmark(benchmark)
def benchmarks_main(true_main=None):
"""Run benchmarks as declared in args.
Args:
true_main: True main function to run if benchmarks are not requested.
"""
argv = sys.argv
found_arg = [arg for arg in argv
if arg.startswith("--benchmarks=")
or arg.startswith("-benchmarks=")]
if found_arg:
# Remove --benchmarks arg from sys.argv
argv.remove(found_arg[0])
regex = found_arg[0].split("=")[1]
app.run(lambda _: _run_benchmarks(regex))
else:
true_main()
| en | 0.800591 | # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Utilities to run benchmarks. # When a subclass of the Benchmark class is created, it is added to # the registry automatically # Environment variable that determines whether benchmarks are written. # See also tensorflow/core/util/reporter.h TestReporter::kTestReporterEnv. Method for recording a benchmark directly. Args: name: The BenchmarkEntry name. iters: (optional) How many iterations were run cpu_time: (optional) Total cpu time in seconds wall_time: (optional) Total wall time in seconds throughput: (optional) Throughput (in MB/s) extras: (optional) Dict mapping string keys to additional benchmark info. Raises: TypeError: if extras is not a dict. IOError: if the benchmark output file already exists. # Reporting was not requested The Benchmark class registrar. Used by abstract Benchmark class. # Only the base Benchmark abstract class has mro length 2. # The rest subclass from it and are therefore registered. Abstract class that provides helper functions for running benchmarks. Any class subclassing this one is immediately registered in the global benchmark registry. Only methods whose names start with the word "benchmark" will be run during benchmarking. Returns full name of class and method calling report_benchmark. # Expect that the caller called report_benchmark, which called _get_name. # Use the method name, or overwrite_name is provided. # Prefix the name with the class name. Report a benchmark. Args: iters: (optional) How many iterations were run cpu_time: (optional) Total cpu time in seconds wall_time: (optional) Total wall time in seconds throughput: (optional) Throughput (in MB/s) extras: (optional) Dict mapping string keys to additional benchmark info. name: (optional) Override the BenchmarkEntry name with `name`. Otherwise it is inferred from the calling class and top-level method name. # Only run methods of this class whose names start with "benchmark" # Call this benchmark method Run benchmarks that match regex `regex`. This function goes through the global benchmark registry, and matches benchmark **classe names** of the form "module.name.BenchmarkClass" to the given regex. If a class matches, all of its benchmark methods are run. Args: regex: The string regular expression to match Benchmark classes against. # Match benchmarks in registry against regex # Found a match Run benchmarks as declared in args. Args: true_main: True main function to run if benchmarks are not requested. # Remove --benchmarks arg from sys.argv | 2.110451 | 2 |
base_market_agent/__init__.py | shwethanidd/volttron-GS | 1 | 6632870 | <reponame>shwethanidd/volttron-GS<gh_stars>1-10
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2017, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import logging
from volttron.platform.agent import utils
from volttron.platform.vip.agent import PubSub
from volttron.platform.vip.agent import Agent
from volttron.platform.messaging.topics import MARKET_RESERVE, MARKET_BID, MARKET_CLEAR, MARKET_AGGREGATE, MARKET_ERROR
from volttron.platform.agent.base_market_agent.registration_manager import RegistrationManager
from volttron.platform.agent.base_market_agent.poly_line_factory import PolyLineFactory
from volttron.platform.agent.base_market_agent.rpc_proxy import RpcProxy
_log = logging.getLogger(__name__)
utils.setup_logging()
__version__ = "0.01"
class MarketAgent(Agent):
"""
The MarketAgents serves as the base class for any agent that wants to praticipate in
an auction market. By inheriting from this agent all the remote communication
with the MarketService is handled and the sub-class can be unconcerned with those details.
"""
def __init__(self, verbose_logging=True, **kwargs):
super(MarketAgent, self).__init__(**kwargs)
_log.debug("vip_identity: " + self.core.identity)
rpc_proxy = RpcProxy(self.vip.rpc.call, verbose_logging)
self.registrations = RegistrationManager(rpc_proxy)
self.verbose_logging = verbose_logging
@PubSub.subscribe('pubsub', MARKET_RESERVE)
def match_reservation(self, peer, sender, bus, topic, headers, message):
timestamp = utils.parse_timestamp_string(message)
decoded_message = "Timestamp: {}".format(timestamp)
self.log_event("match_reservation", peer, sender, bus, topic, headers, decoded_message)
self.registrations.request_reservations(timestamp)
@PubSub.subscribe('pubsub', MARKET_BID)
def match_make_offer(self, peer, sender, bus, topic, headers, message):
timestamp = utils.parse_timestamp_string(message[0])
unformed_markets = message[1]
decoded_message = "Timestamp: {}".format(timestamp)
self.log_event("match_make_offer", peer, sender, bus, topic, headers, decoded_message)
self.registrations.request_offers(timestamp, unformed_markets)
@PubSub.subscribe('pubsub', MARKET_CLEAR)
def match_report_clear_price(self, peer, sender, bus, topic, headers, message):
timestamp = utils.parse_timestamp_string(message[0])
market_name = message[1]
quantity = message[2]
price = message[3]
decoded_message = "Timestamp: {} Market: {} Price: {} Quantity: {}".format(timestamp, market_name, price, quantity)
self.log_event("match_report_clear_price", peer, sender, bus, topic, headers, decoded_message)
self.registrations.report_clear_price(timestamp, market_name, price, quantity)
@PubSub.subscribe('pubsub', MARKET_AGGREGATE)
def match_report_aggregate(self, peer, sender, bus, topic, headers, message):
timestamp = utils.parse_timestamp_string(message[0])
market_name = message[1]
buyer_seller = message[2]
aggregate_curve_points = message[3]
decoded_message = "Timestamp: {} Market: {} {} Curve: {}".format(timestamp, market_name, buyer_seller, aggregate_curve_points)
self.log_event("match_report_aggregate", peer, sender, bus, topic, headers, decoded_message)
aggregate_curve = PolyLineFactory.fromTupples(aggregate_curve_points)
self.registrations.report_aggregate(timestamp, market_name, buyer_seller, aggregate_curve)
@PubSub.subscribe('pubsub', MARKET_ERROR)
def match_report_error(self, peer, sender, bus, topic, headers, message):
timestamp = utils.parse_timestamp_string(message[0])
market_name = message[1]
error_code = message[2]
error_message = message[3]
aux = message[4]
decoded_message = "Timestamp: {} Market: {} Code: {} Message: {}".format(timestamp, market_name, error_code, error_message)
self.log_event("match_report_error", peer, sender, bus, topic, headers, decoded_message)
self.registrations.report_error(timestamp, market_name, error_code, error_message, aux)
def log_event(self, method_name, peer, sender, bus, topic, headers, decoded_message):
if self.verbose_logging:
_log.debug("{} Peer: {} Sender: {} Bus: {} Topic: {} Headers: {} Message: {}".format(method_name, peer, sender, bus, topic, headers, decoded_message))
def join_market(self, market_name, buyer_seller, reservation_callback,
offer_callback, aggregate_callback, price_callback, error_callback):
"""
This routine is called once to join a market as a buyer or a seller.
The agent supplies call-back functions that the MarketAgents calls as the market process proceeds.
:param market_name: The name of the market commodity.
:param buyer_seller: A string indicating whether the agent is buying from or selling to the market.
The agent shall use the pre-defined strings provided.
:param reservation_callback: This callback is called at the beginning of each round of bidding and clearing.
The agent can choose whether or not to participate in this round.
If the agent wants to participate it returns true otherwise it returns false.
If the agent does not specify a callback routine a reservation will be made for each round automatically.
A market will only exist if there are reservations for at least one buyer and at least one seller.
If the market fails to achieve the minimum participation the error callback will be called.
:param offer_callback: If the agent has made a reservation for the market this routine is called.
If the agent wishes to make an offer at this time the market agent computes either supply or demand curves
as appropriate and offers them to the market service by calling the make offer method.
For each market joined either an offer callback or an aggregate callback is required.
You can’t supply both for any single market.
:param aggregate_callback: When a market has received all its buy offers it calculates an aggregate
demand curve. When the market receives all of its sell offers it calculates an aggregate supply curve.
This callback delivers the aggregate curve to the market agent whenever the appropriate curve
becomes available. If the market agent want to use this to make an offer it would do that using
the make offer method. For each market joined either an offer callback or an aggregate callback is required.
You can’t supply both for any single market.
:param price_callback: This callback is called when the market clears. The price callback is optional.
:param error_callback: This callback is called at appropriate time points or when an error occurs.
If a market fails to form this will be called at the offer time.
If the market doesn’t receive all its offers this will be called at market clear time.
If the market fails to clear this would be called at the next reservation time.
This allows agents to respond at or near the normal time points. The error callback is optional.
"""
self.registrations.make_registration(market_name, buyer_seller,
reservation_callback, offer_callback,
aggregate_callback, price_callback, error_callback)
def make_offer(self, market_name, buyer_seller, curve):
"""
This call makes an offer with the MarketService.
:param market_name: The name of the market commodity.
:param buyer_seller: A string indicating whether the agent is buying from or selling to the market.
The agent shall use the pre-defined strings provided.
:param curve: The demand curve for buyers or the supply curve for sellers.
"""
result = self.registrations.make_offer(market_name, buyer_seller, curve)
return result
| # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2017, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import logging
from volttron.platform.agent import utils
from volttron.platform.vip.agent import PubSub
from volttron.platform.vip.agent import Agent
from volttron.platform.messaging.topics import MARKET_RESERVE, MARKET_BID, MARKET_CLEAR, MARKET_AGGREGATE, MARKET_ERROR
from volttron.platform.agent.base_market_agent.registration_manager import RegistrationManager
from volttron.platform.agent.base_market_agent.poly_line_factory import PolyLineFactory
from volttron.platform.agent.base_market_agent.rpc_proxy import RpcProxy
_log = logging.getLogger(__name__)
utils.setup_logging()
__version__ = "0.01"
class MarketAgent(Agent):
"""
The MarketAgents serves as the base class for any agent that wants to praticipate in
an auction market. By inheriting from this agent all the remote communication
with the MarketService is handled and the sub-class can be unconcerned with those details.
"""
def __init__(self, verbose_logging=True, **kwargs):
super(MarketAgent, self).__init__(**kwargs)
_log.debug("vip_identity: " + self.core.identity)
rpc_proxy = RpcProxy(self.vip.rpc.call, verbose_logging)
self.registrations = RegistrationManager(rpc_proxy)
self.verbose_logging = verbose_logging
@PubSub.subscribe('pubsub', MARKET_RESERVE)
def match_reservation(self, peer, sender, bus, topic, headers, message):
timestamp = utils.parse_timestamp_string(message)
decoded_message = "Timestamp: {}".format(timestamp)
self.log_event("match_reservation", peer, sender, bus, topic, headers, decoded_message)
self.registrations.request_reservations(timestamp)
@PubSub.subscribe('pubsub', MARKET_BID)
def match_make_offer(self, peer, sender, bus, topic, headers, message):
timestamp = utils.parse_timestamp_string(message[0])
unformed_markets = message[1]
decoded_message = "Timestamp: {}".format(timestamp)
self.log_event("match_make_offer", peer, sender, bus, topic, headers, decoded_message)
self.registrations.request_offers(timestamp, unformed_markets)
@PubSub.subscribe('pubsub', MARKET_CLEAR)
def match_report_clear_price(self, peer, sender, bus, topic, headers, message):
timestamp = utils.parse_timestamp_string(message[0])
market_name = message[1]
quantity = message[2]
price = message[3]
decoded_message = "Timestamp: {} Market: {} Price: {} Quantity: {}".format(timestamp, market_name, price, quantity)
self.log_event("match_report_clear_price", peer, sender, bus, topic, headers, decoded_message)
self.registrations.report_clear_price(timestamp, market_name, price, quantity)
@PubSub.subscribe('pubsub', MARKET_AGGREGATE)
def match_report_aggregate(self, peer, sender, bus, topic, headers, message):
timestamp = utils.parse_timestamp_string(message[0])
market_name = message[1]
buyer_seller = message[2]
aggregate_curve_points = message[3]
decoded_message = "Timestamp: {} Market: {} {} Curve: {}".format(timestamp, market_name, buyer_seller, aggregate_curve_points)
self.log_event("match_report_aggregate", peer, sender, bus, topic, headers, decoded_message)
aggregate_curve = PolyLineFactory.fromTupples(aggregate_curve_points)
self.registrations.report_aggregate(timestamp, market_name, buyer_seller, aggregate_curve)
@PubSub.subscribe('pubsub', MARKET_ERROR)
def match_report_error(self, peer, sender, bus, topic, headers, message):
timestamp = utils.parse_timestamp_string(message[0])
market_name = message[1]
error_code = message[2]
error_message = message[3]
aux = message[4]
decoded_message = "Timestamp: {} Market: {} Code: {} Message: {}".format(timestamp, market_name, error_code, error_message)
self.log_event("match_report_error", peer, sender, bus, topic, headers, decoded_message)
self.registrations.report_error(timestamp, market_name, error_code, error_message, aux)
def log_event(self, method_name, peer, sender, bus, topic, headers, decoded_message):
if self.verbose_logging:
_log.debug("{} Peer: {} Sender: {} Bus: {} Topic: {} Headers: {} Message: {}".format(method_name, peer, sender, bus, topic, headers, decoded_message))
def join_market(self, market_name, buyer_seller, reservation_callback,
offer_callback, aggregate_callback, price_callback, error_callback):
"""
This routine is called once to join a market as a buyer or a seller.
The agent supplies call-back functions that the MarketAgents calls as the market process proceeds.
:param market_name: The name of the market commodity.
:param buyer_seller: A string indicating whether the agent is buying from or selling to the market.
The agent shall use the pre-defined strings provided.
:param reservation_callback: This callback is called at the beginning of each round of bidding and clearing.
The agent can choose whether or not to participate in this round.
If the agent wants to participate it returns true otherwise it returns false.
If the agent does not specify a callback routine a reservation will be made for each round automatically.
A market will only exist if there are reservations for at least one buyer and at least one seller.
If the market fails to achieve the minimum participation the error callback will be called.
:param offer_callback: If the agent has made a reservation for the market this routine is called.
If the agent wishes to make an offer at this time the market agent computes either supply or demand curves
as appropriate and offers them to the market service by calling the make offer method.
For each market joined either an offer callback or an aggregate callback is required.
You can’t supply both for any single market.
:param aggregate_callback: When a market has received all its buy offers it calculates an aggregate
demand curve. When the market receives all of its sell offers it calculates an aggregate supply curve.
This callback delivers the aggregate curve to the market agent whenever the appropriate curve
becomes available. If the market agent want to use this to make an offer it would do that using
the make offer method. For each market joined either an offer callback or an aggregate callback is required.
You can’t supply both for any single market.
:param price_callback: This callback is called when the market clears. The price callback is optional.
:param error_callback: This callback is called at appropriate time points or when an error occurs.
If a market fails to form this will be called at the offer time.
If the market doesn’t receive all its offers this will be called at market clear time.
If the market fails to clear this would be called at the next reservation time.
This allows agents to respond at or near the normal time points. The error callback is optional.
"""
self.registrations.make_registration(market_name, buyer_seller,
reservation_callback, offer_callback,
aggregate_callback, price_callback, error_callback)
def make_offer(self, market_name, buyer_seller, curve):
"""
This call makes an offer with the MarketService.
:param market_name: The name of the market commodity.
:param buyer_seller: A string indicating whether the agent is buying from or selling to the market.
The agent shall use the pre-defined strings provided.
:param curve: The demand curve for buyers or the supply curve for sellers.
"""
result = self.registrations.make_offer(market_name, buyer_seller, curve)
return result | en | 0.89087 | # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # Copyright (c) 2017, Battelle Memorial Institute # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation # are those of the authors and should not be interpreted as representing # official policies, either expressed or implied, of the FreeBSD # Project. # # This material was prepared as an account of work sponsored by an # agency of the United States Government. Neither the United States # Government nor the United States Department of Energy, nor Battelle, # nor any of their employees, nor any jurisdiction or organization that # has cooperated in the development of these materials, makes any # warranty, express or implied, or assumes any legal liability or # responsibility for the accuracy, completeness, or usefulness or any # information, apparatus, product, software, or process disclosed, or # represents that its use would not infringe privately owned rights. # # Reference herein to any specific commercial product, process, or # service by trade name, trademark, manufacturer, or otherwise does not # necessarily constitute or imply its endorsement, recommendation, or # favoring by the United States Government or any agency thereof, or # Battelle Memorial Institute. The views and opinions of authors # expressed herein do not necessarily state or reflect those of the # United States Government or any agency thereof. # # PACIFIC NORTHWEST NATIONAL LABORATORY # operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY # under Contract DE-AC05-76RL01830 # }}} The MarketAgents serves as the base class for any agent that wants to praticipate in an auction market. By inheriting from this agent all the remote communication with the MarketService is handled and the sub-class can be unconcerned with those details. This routine is called once to join a market as a buyer or a seller. The agent supplies call-back functions that the MarketAgents calls as the market process proceeds. :param market_name: The name of the market commodity. :param buyer_seller: A string indicating whether the agent is buying from or selling to the market. The agent shall use the pre-defined strings provided. :param reservation_callback: This callback is called at the beginning of each round of bidding and clearing. The agent can choose whether or not to participate in this round. If the agent wants to participate it returns true otherwise it returns false. If the agent does not specify a callback routine a reservation will be made for each round automatically. A market will only exist if there are reservations for at least one buyer and at least one seller. If the market fails to achieve the minimum participation the error callback will be called. :param offer_callback: If the agent has made a reservation for the market this routine is called. If the agent wishes to make an offer at this time the market agent computes either supply or demand curves as appropriate and offers them to the market service by calling the make offer method. For each market joined either an offer callback or an aggregate callback is required. You can’t supply both for any single market. :param aggregate_callback: When a market has received all its buy offers it calculates an aggregate demand curve. When the market receives all of its sell offers it calculates an aggregate supply curve. This callback delivers the aggregate curve to the market agent whenever the appropriate curve becomes available. If the market agent want to use this to make an offer it would do that using the make offer method. For each market joined either an offer callback or an aggregate callback is required. You can’t supply both for any single market. :param price_callback: This callback is called when the market clears. The price callback is optional. :param error_callback: This callback is called at appropriate time points or when an error occurs. If a market fails to form this will be called at the offer time. If the market doesn’t receive all its offers this will be called at market clear time. If the market fails to clear this would be called at the next reservation time. This allows agents to respond at or near the normal time points. The error callback is optional. This call makes an offer with the MarketService. :param market_name: The name of the market commodity. :param buyer_seller: A string indicating whether the agent is buying from or selling to the market. The agent shall use the pre-defined strings provided. :param curve: The demand curve for buyers or the supply curve for sellers. | 0.92582 | 1 |
grr/server/instant_output_plugin.py | panhania/grr | 0 | 6632871 | #!/usr/bin/env python
"""Instant output plugins used by the API for on-the-fly conversion."""
import itertools
import re
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import utils
from grr.server import aff4
from grr.server import export
class InstantOutputPlugin(object):
"""The base class for instant output plugins.
Instant output plugins do on-the-fly data conversion and are used in
GetExportedFlowResults/GetExportedHuntResults methods.
"""
__metaclass__ = registry.MetaclassRegistry
__abstract = True # pylint: disable=g-bad-name
plugin_name = None
friendly_name = None
description = None
output_file_extension = ""
@classmethod
def GetPluginClassByPluginName(cls, name):
for plugin_cls in cls.classes.values():
if plugin_cls.plugin_name == name:
return plugin_cls
raise KeyError("No plugin with name attribute '%s'." % name)
def __init__(self, source_urn=None, token=None):
"""OutputPlugin constructor.
Args:
source_urn: URN identifying source of the data (hunt or flow).
token: Security token.
Raises:
ValueError: If one of the keyword arguments is empty.
"""
super(InstantOutputPlugin, self).__init__()
if not source_urn:
raise ValueError("source_urn can't be empty.")
if not token:
raise ValueError("token can't be empty.")
self.source_urn = source_urn
self.token = token
@property
def output_file_name(self):
"""Name of the file where plugin's output should be written to."""
safe_path = re.sub(r":|/", "_", self.source_urn.Path().lstrip("/"))
return "results_%s%s" % (safe_path, self.output_file_extension)
def Start(self):
"""Start method is called in the beginning of the export.
Yields:
Chunks of bytes.
"""
def ProcessValues(self, value_cls, values_generator_fn):
"""Processes a batch of values with the same type.
ProcessValues is called *once per value type* for each value type in
the flow/hunt results collection.
Args:
value_cls: Class identifying type of the values to be processed.
values_generator_fn: Function returning an iterable with values. Each
value is a GRRMessage wrapping a value of a value_cls type.
values_generator_fn may be called multiple times within
1 ProcessValues() call - for example, when multiple passes over
the data are required.
"""
raise NotImplementedError()
def Finish(self):
"""Finish method is called at the very end of the export.
Yields:
Chunks of bytes.
"""
class InstantOutputPluginWithExportConversion(InstantOutputPlugin):
"""Instant output plugin that flattens data before exporting."""
__abstract = True # pylint: disable=g-bad-name
BATCH_SIZE = 5000
def __init__(self, *args, **kwargs):
super(InstantOutputPluginWithExportConversion, self).__init__(
*args, **kwargs)
self._cached_metadata = {}
def _GetMetadataForClients(self, client_urns):
"""Fetches metadata for a given list of clients."""
result = {}
metadata_to_fetch = set()
for urn in client_urns:
try:
result[urn] = self._cached_metadata[urn]
except KeyError:
metadata_to_fetch.add(urn)
if metadata_to_fetch:
client_fds = aff4.FACTORY.MultiOpen(
metadata_to_fetch, mode="r", token=self.token)
fetched_metadata = [
export.GetMetadata(client_fd, token=self.token)
for client_fd in client_fds
]
for metadata in fetched_metadata:
metadata.source_urn = self.source_urn
self._cached_metadata[metadata.client_urn] = metadata
result[metadata.client_urn] = metadata
metadata_to_fetch.remove(metadata.client_urn)
for urn in metadata_to_fetch:
default_mdata = export.ExportedMetadata(source_urn=self.source_urn)
result[urn] = default_mdata
self._cached_metadata[urn] = default_mdata
return [result[urn] for urn in client_urns]
def GetExportOptions(self):
"""Rerturns export options to be used by export converter."""
return export.ExportOptions()
def ProcessSingleTypeExportedValues(self, original_type, exported_values):
"""Processes exported values of the same type.
Exported_values are guaranteed to have the same type. Consequently, this
function may be called multiple times with the same original_type
argument. Typical example: when export converters generate multiple
kinds of exported values for a given source value (for example,
Process is converted to ExportedProcess and ExportedNetworkConnection
values).
Args:
original_type: Class of the original set of values that were converted
to exported_values.
exported_values: An iterator with exported value. All values are
guranteed to have the same class.
Yields:
Chunks of bytes.
"""
raise NotImplementedError()
def _GenerateSingleTypeIteration(self, next_types, processed_types,
converted_responses):
"""Yields responses of a given type only.
_GenerateSingleTypeIteration iterates through converted_responses and
only yields responses of the same type. The type is either popped from
next_types or inferred from the first item of converted_responses.
The type is added to a set of processed_types.
Along the way _GenerateSingleTypeIteration updates next_types set.
All newly encountered and not previously processed types are added to
next_types set.
Calling _GenerateSingleTypeIteration multiple times allows doing
multiple passes on converted responses and emitting converted responses
of the same type continuously (so that they can be written into
the same file by the plugin).
Args:
next_types: List of value type classes that will be used in further
iterations.
processed_types: List of value type classes that have been used
already.
converted_responses: Iterable with values to iterate over.
Yields:
Values from converted_response with the same type. Type is either
popped from the next_types set or inferred from the first
converted_responses value.
"""
if not next_types:
current_type = None
else:
current_type = next_types.pop()
processed_types.add(current_type)
for converted_response in converted_responses:
if not current_type:
current_type = converted_response.__class__
processed_types.add(current_type)
if converted_response.__class__ != current_type:
if converted_response.__class__ not in processed_types:
next_types.add(converted_response.__class__)
continue
yield converted_response
def _GenerateConvertedValues(self, converter, grr_messages):
"""Generates converted values using given converter from given messages.
Groups values in batches of BATCH_SIZE size and applies the converter
to each batch.
Args:
converter: ExportConverter instance.
grr_messages: An iterable (a generator is assumed) with GRRMessage values.
Yields:
Values generated by the converter.
Raises:
ValueError: if any of the GrrMessage objects doesn't have "source" set.
"""
for batch in utils.Grouper(grr_messages, self.BATCH_SIZE):
metadata_items = self._GetMetadataForClients([gm.source for gm in batch])
batch_with_metadata = zip(metadata_items, [gm.payload for gm in batch])
for result in converter.BatchConvert(
batch_with_metadata, token=self.token):
yield result
def ProcessValues(self, value_type, values_generator_fn):
converter_classes = export.ExportConverter.GetConvertersByClass(value_type)
if not converter_classes:
return
converters = [cls(self.GetExportOptions()) for cls in converter_classes]
next_types = set()
processed_types = set()
while True:
converted_responses = itertools.chain.from_iterable(
self._GenerateConvertedValues(converter, values_generator_fn())
for converter in converters)
generator = self._GenerateSingleTypeIteration(next_types, processed_types,
converted_responses)
for chunk in self.ProcessSingleTypeExportedValues(value_type, generator):
yield chunk
if not next_types:
break
def ApplyPluginToMultiTypeCollection(plugin, output_collection,
source_urn=None):
"""Applies instant output plugin to a multi-type collection.
Args:
plugin: InstantOutputPlugin instance.
output_collection: MultiTypeCollection instance.
source_urn: If not None, override source_urn for collection items. This has
to be used when exporting flow results - their GrrMessages don't have
"source" attribute set.
Yields:
Bytes chunks, as generated by the plugin.
"""
for chunk in plugin.Start():
yield chunk
for stored_type_name in sorted(output_collection.ListStoredTypes()):
stored_cls = rdfvalue.RDFValue.classes[stored_type_name]
# pylint: disable=cell-var-from-loop
def GetValues():
for timestamp, value in output_collection.ScanByType(stored_type_name):
_ = timestamp
if source_urn:
value.source = source_urn
yield value
# pylint: enable=cell-var-from-loop
for chunk in plugin.ProcessValues(stored_cls, GetValues):
yield chunk
for chunk in plugin.Finish():
yield chunk
| #!/usr/bin/env python
"""Instant output plugins used by the API for on-the-fly conversion."""
import itertools
import re
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import utils
from grr.server import aff4
from grr.server import export
class InstantOutputPlugin(object):
"""The base class for instant output plugins.
Instant output plugins do on-the-fly data conversion and are used in
GetExportedFlowResults/GetExportedHuntResults methods.
"""
__metaclass__ = registry.MetaclassRegistry
__abstract = True # pylint: disable=g-bad-name
plugin_name = None
friendly_name = None
description = None
output_file_extension = ""
@classmethod
def GetPluginClassByPluginName(cls, name):
for plugin_cls in cls.classes.values():
if plugin_cls.plugin_name == name:
return plugin_cls
raise KeyError("No plugin with name attribute '%s'." % name)
def __init__(self, source_urn=None, token=None):
"""OutputPlugin constructor.
Args:
source_urn: URN identifying source of the data (hunt or flow).
token: Security token.
Raises:
ValueError: If one of the keyword arguments is empty.
"""
super(InstantOutputPlugin, self).__init__()
if not source_urn:
raise ValueError("source_urn can't be empty.")
if not token:
raise ValueError("token can't be empty.")
self.source_urn = source_urn
self.token = token
@property
def output_file_name(self):
"""Name of the file where plugin's output should be written to."""
safe_path = re.sub(r":|/", "_", self.source_urn.Path().lstrip("/"))
return "results_%s%s" % (safe_path, self.output_file_extension)
def Start(self):
"""Start method is called in the beginning of the export.
Yields:
Chunks of bytes.
"""
def ProcessValues(self, value_cls, values_generator_fn):
"""Processes a batch of values with the same type.
ProcessValues is called *once per value type* for each value type in
the flow/hunt results collection.
Args:
value_cls: Class identifying type of the values to be processed.
values_generator_fn: Function returning an iterable with values. Each
value is a GRRMessage wrapping a value of a value_cls type.
values_generator_fn may be called multiple times within
1 ProcessValues() call - for example, when multiple passes over
the data are required.
"""
raise NotImplementedError()
def Finish(self):
"""Finish method is called at the very end of the export.
Yields:
Chunks of bytes.
"""
class InstantOutputPluginWithExportConversion(InstantOutputPlugin):
"""Instant output plugin that flattens data before exporting."""
__abstract = True # pylint: disable=g-bad-name
BATCH_SIZE = 5000
def __init__(self, *args, **kwargs):
super(InstantOutputPluginWithExportConversion, self).__init__(
*args, **kwargs)
self._cached_metadata = {}
def _GetMetadataForClients(self, client_urns):
"""Fetches metadata for a given list of clients."""
result = {}
metadata_to_fetch = set()
for urn in client_urns:
try:
result[urn] = self._cached_metadata[urn]
except KeyError:
metadata_to_fetch.add(urn)
if metadata_to_fetch:
client_fds = aff4.FACTORY.MultiOpen(
metadata_to_fetch, mode="r", token=self.token)
fetched_metadata = [
export.GetMetadata(client_fd, token=self.token)
for client_fd in client_fds
]
for metadata in fetched_metadata:
metadata.source_urn = self.source_urn
self._cached_metadata[metadata.client_urn] = metadata
result[metadata.client_urn] = metadata
metadata_to_fetch.remove(metadata.client_urn)
for urn in metadata_to_fetch:
default_mdata = export.ExportedMetadata(source_urn=self.source_urn)
result[urn] = default_mdata
self._cached_metadata[urn] = default_mdata
return [result[urn] for urn in client_urns]
def GetExportOptions(self):
"""Rerturns export options to be used by export converter."""
return export.ExportOptions()
def ProcessSingleTypeExportedValues(self, original_type, exported_values):
"""Processes exported values of the same type.
Exported_values are guaranteed to have the same type. Consequently, this
function may be called multiple times with the same original_type
argument. Typical example: when export converters generate multiple
kinds of exported values for a given source value (for example,
Process is converted to ExportedProcess and ExportedNetworkConnection
values).
Args:
original_type: Class of the original set of values that were converted
to exported_values.
exported_values: An iterator with exported value. All values are
guranteed to have the same class.
Yields:
Chunks of bytes.
"""
raise NotImplementedError()
def _GenerateSingleTypeIteration(self, next_types, processed_types,
converted_responses):
"""Yields responses of a given type only.
_GenerateSingleTypeIteration iterates through converted_responses and
only yields responses of the same type. The type is either popped from
next_types or inferred from the first item of converted_responses.
The type is added to a set of processed_types.
Along the way _GenerateSingleTypeIteration updates next_types set.
All newly encountered and not previously processed types are added to
next_types set.
Calling _GenerateSingleTypeIteration multiple times allows doing
multiple passes on converted responses and emitting converted responses
of the same type continuously (so that they can be written into
the same file by the plugin).
Args:
next_types: List of value type classes that will be used in further
iterations.
processed_types: List of value type classes that have been used
already.
converted_responses: Iterable with values to iterate over.
Yields:
Values from converted_response with the same type. Type is either
popped from the next_types set or inferred from the first
converted_responses value.
"""
if not next_types:
current_type = None
else:
current_type = next_types.pop()
processed_types.add(current_type)
for converted_response in converted_responses:
if not current_type:
current_type = converted_response.__class__
processed_types.add(current_type)
if converted_response.__class__ != current_type:
if converted_response.__class__ not in processed_types:
next_types.add(converted_response.__class__)
continue
yield converted_response
def _GenerateConvertedValues(self, converter, grr_messages):
"""Generates converted values using given converter from given messages.
Groups values in batches of BATCH_SIZE size and applies the converter
to each batch.
Args:
converter: ExportConverter instance.
grr_messages: An iterable (a generator is assumed) with GRRMessage values.
Yields:
Values generated by the converter.
Raises:
ValueError: if any of the GrrMessage objects doesn't have "source" set.
"""
for batch in utils.Grouper(grr_messages, self.BATCH_SIZE):
metadata_items = self._GetMetadataForClients([gm.source for gm in batch])
batch_with_metadata = zip(metadata_items, [gm.payload for gm in batch])
for result in converter.BatchConvert(
batch_with_metadata, token=self.token):
yield result
def ProcessValues(self, value_type, values_generator_fn):
converter_classes = export.ExportConverter.GetConvertersByClass(value_type)
if not converter_classes:
return
converters = [cls(self.GetExportOptions()) for cls in converter_classes]
next_types = set()
processed_types = set()
while True:
converted_responses = itertools.chain.from_iterable(
self._GenerateConvertedValues(converter, values_generator_fn())
for converter in converters)
generator = self._GenerateSingleTypeIteration(next_types, processed_types,
converted_responses)
for chunk in self.ProcessSingleTypeExportedValues(value_type, generator):
yield chunk
if not next_types:
break
def ApplyPluginToMultiTypeCollection(plugin, output_collection,
source_urn=None):
"""Applies instant output plugin to a multi-type collection.
Args:
plugin: InstantOutputPlugin instance.
output_collection: MultiTypeCollection instance.
source_urn: If not None, override source_urn for collection items. This has
to be used when exporting flow results - their GrrMessages don't have
"source" attribute set.
Yields:
Bytes chunks, as generated by the plugin.
"""
for chunk in plugin.Start():
yield chunk
for stored_type_name in sorted(output_collection.ListStoredTypes()):
stored_cls = rdfvalue.RDFValue.classes[stored_type_name]
# pylint: disable=cell-var-from-loop
def GetValues():
for timestamp, value in output_collection.ScanByType(stored_type_name):
_ = timestamp
if source_urn:
value.source = source_urn
yield value
# pylint: enable=cell-var-from-loop
for chunk in plugin.ProcessValues(stored_cls, GetValues):
yield chunk
for chunk in plugin.Finish():
yield chunk
| en | 0.76005 | #!/usr/bin/env python Instant output plugins used by the API for on-the-fly conversion. The base class for instant output plugins. Instant output plugins do on-the-fly data conversion and are used in GetExportedFlowResults/GetExportedHuntResults methods. # pylint: disable=g-bad-name OutputPlugin constructor. Args: source_urn: URN identifying source of the data (hunt or flow). token: Security token. Raises: ValueError: If one of the keyword arguments is empty. Name of the file where plugin's output should be written to. Start method is called in the beginning of the export. Yields: Chunks of bytes. Processes a batch of values with the same type. ProcessValues is called *once per value type* for each value type in the flow/hunt results collection. Args: value_cls: Class identifying type of the values to be processed. values_generator_fn: Function returning an iterable with values. Each value is a GRRMessage wrapping a value of a value_cls type. values_generator_fn may be called multiple times within 1 ProcessValues() call - for example, when multiple passes over the data are required. Finish method is called at the very end of the export. Yields: Chunks of bytes. Instant output plugin that flattens data before exporting. # pylint: disable=g-bad-name Fetches metadata for a given list of clients. Rerturns export options to be used by export converter. Processes exported values of the same type. Exported_values are guaranteed to have the same type. Consequently, this function may be called multiple times with the same original_type argument. Typical example: when export converters generate multiple kinds of exported values for a given source value (for example, Process is converted to ExportedProcess and ExportedNetworkConnection values). Args: original_type: Class of the original set of values that were converted to exported_values. exported_values: An iterator with exported value. All values are guranteed to have the same class. Yields: Chunks of bytes. Yields responses of a given type only. _GenerateSingleTypeIteration iterates through converted_responses and only yields responses of the same type. The type is either popped from next_types or inferred from the first item of converted_responses. The type is added to a set of processed_types. Along the way _GenerateSingleTypeIteration updates next_types set. All newly encountered and not previously processed types are added to next_types set. Calling _GenerateSingleTypeIteration multiple times allows doing multiple passes on converted responses and emitting converted responses of the same type continuously (so that they can be written into the same file by the plugin). Args: next_types: List of value type classes that will be used in further iterations. processed_types: List of value type classes that have been used already. converted_responses: Iterable with values to iterate over. Yields: Values from converted_response with the same type. Type is either popped from the next_types set or inferred from the first converted_responses value. Generates converted values using given converter from given messages. Groups values in batches of BATCH_SIZE size and applies the converter to each batch. Args: converter: ExportConverter instance. grr_messages: An iterable (a generator is assumed) with GRRMessage values. Yields: Values generated by the converter. Raises: ValueError: if any of the GrrMessage objects doesn't have "source" set. Applies instant output plugin to a multi-type collection. Args: plugin: InstantOutputPlugin instance. output_collection: MultiTypeCollection instance. source_urn: If not None, override source_urn for collection items. This has to be used when exporting flow results - their GrrMessages don't have "source" attribute set. Yields: Bytes chunks, as generated by the plugin. # pylint: disable=cell-var-from-loop # pylint: enable=cell-var-from-loop | 2.385118 | 2 |
redis_client.py | amirothman/deck-of-cards-as-a-service | 1 | 6632872 | from redis import StrictRedis
redis_client = StrictRedis(decode_responses=True)
| from redis import StrictRedis
redis_client = StrictRedis(decode_responses=True)
| none | 1 | 1.163763 | 1 |
|
source/AI_game/Feed_Forward_Neural_Network.py | anshulp2912/Slytherin-Game-using-Genetic-Algorithm | 0 | 6632873 | import numpy as np
n_x = 7
n_h = 9
n_h2 = 15
n_y = 3
W1_shape = (9,7)
W2_shape = (15,9)
W3_shape = (3,15)
def get_weights_from_encoded(individual):
W1 = individual[0:W1_shape[0] * W1_shape[1]]
W2 = individual[W1_shape[0] * W1_shape[1]:W2_shape[0] * W2_shape[1] + W1_shape[0] * W1_shape[1]]
W3 = individual[W2_shape[0] * W2_shape[1] + W1_shape[0] * W1_shape[1]:]
return (
W1.reshape(W1_shape[0], W1_shape[1]), W2.reshape(W2_shape[0], W2_shape[1]), W3.reshape(W3_shape[0], W3_shape[1]))
def softmax(z):
s = np.exp(z.T) / np.sum(np.exp(z.T), axis=1).reshape(-1, 1)
return s
def sigmoid(z):
s = 1 / (1 + np.exp(-z))
return s
def forward_propagation(X, individual):
W1, W2, W3 = get_weights_from_encoded(individual)
Z1 = np.matmul(W1, X.T)
A1 = np.tanh(Z1)
Z2 = np.matmul(W2, A1)
A2 = np.tanh(Z2)
Z3 = np.matmul(W3, A2)
A3 = softmax(Z3)
return A3 | import numpy as np
n_x = 7
n_h = 9
n_h2 = 15
n_y = 3
W1_shape = (9,7)
W2_shape = (15,9)
W3_shape = (3,15)
def get_weights_from_encoded(individual):
W1 = individual[0:W1_shape[0] * W1_shape[1]]
W2 = individual[W1_shape[0] * W1_shape[1]:W2_shape[0] * W2_shape[1] + W1_shape[0] * W1_shape[1]]
W3 = individual[W2_shape[0] * W2_shape[1] + W1_shape[0] * W1_shape[1]:]
return (
W1.reshape(W1_shape[0], W1_shape[1]), W2.reshape(W2_shape[0], W2_shape[1]), W3.reshape(W3_shape[0], W3_shape[1]))
def softmax(z):
s = np.exp(z.T) / np.sum(np.exp(z.T), axis=1).reshape(-1, 1)
return s
def sigmoid(z):
s = 1 / (1 + np.exp(-z))
return s
def forward_propagation(X, individual):
W1, W2, W3 = get_weights_from_encoded(individual)
Z1 = np.matmul(W1, X.T)
A1 = np.tanh(Z1)
Z2 = np.matmul(W2, A1)
A2 = np.tanh(Z2)
Z3 = np.matmul(W3, A2)
A3 = softmax(Z3)
return A3 | none | 1 | 2.873835 | 3 |
|
kats/utils/decomposition.py | koushikroy/Kats | 1 | 6632874 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from kats.consts import TimeSeriesData
from statsmodels.tsa.seasonal import STL, seasonal_decompose
# from numpy.typing import ArrayLike
ArrayLike = Union[np.ndarray, Sequence[float]]
Figsize = Tuple[int, int]
def _identity(x: ArrayLike) -> ArrayLike:
return x
class TimeSeriesDecomposition:
"""Model class for Time Series Decomposition.
This class provides utilities to decompose an input time series
Pass specific arguments to seasonal_decompose and STL functions via kwargs.
Attributes:
data: the input time series data as `TimeSeriesData`
decomposition: `additive` or `multiplicative` decomposition
method: `STL decompostion` or `seasonal_decompose`
"""
freq: Optional[str] = None
results: Optional[Dict[str, TimeSeriesData]] = None
def __init__(
self,
data: TimeSeriesData,
decomposition: str = "additive",
method: str = "STL",
**kwargs,
) -> None:
if not isinstance(data.value, pd.Series):
msg = f"Only support univariate time series, but got {type(data.value)}."
logging.error(msg)
raise ValueError(msg)
self.data = data
if decomposition in ("additive", "multiplicative"):
self.decomposition = decomposition
else:
logging.info(
"Invalid decomposition setting specified; "
"defaulting to Additive Decomposition."
)
self.decomposition = "additive"
if method == "seasonal_decompose":
self.method = self.__decompose_seasonal
else:
if method != "STL":
logging.info(
f"""Invalid decomposition setting {method} specified.
Possible Values: STL, seasonal_decompose.
Defaulting to STL."""
)
self.method = self.__decompose_STL
## The following are params for the STL Module
self.period = kwargs.get("period", None)
self.seasonal = kwargs.get("seasonal", 7)
self.trend = kwargs.get("trend", None)
self.low_pass = kwargs.get("low_pass", None)
self.seasonal_deg = kwargs.get("seasonal_deg", 1)
self.trend_deg = kwargs.get("trend_deg", 1)
self.low_pass_deg = kwargs.get("low_pass_deg", 1)
self.robust = kwargs.get("robust", False)
self.seasonal_jump = kwargs.get("seasonal_jump", 1)
self.trend_jump = kwargs.get("trend_jump", 1)
self.low_pass_jump = kwargs.get("low_pass_jump", 1)
def __clean_ts(self) -> pd.DataFrame:
"""Internal function to clean the time series.
Internal function to interpolate time series and infer frequency of
time series required for decomposition.
"""
original = pd.DataFrame(
list(self.data.value), index=pd.to_datetime(self.data.time), columns=["y"]
)
if pd.infer_freq(original.index) is None:
original = original.asfreq("D")
logging.info("Setting frequency to Daily since it cannot be inferred")
self.freq = pd.infer_freq(original.index)
original.interpolate(
method="polynomial", limit_direction="both", order=3, inplace=True
)
## This is a hack since polynomial interpolation is not working here
if any(original["y"].isna()):
original.interpolate(method="linear", limit_direction="both", inplace=True)
# pyre-ignore[7]: Expected `DataFrame` but got
# `Union[pd.core.frame.DataFrame, pd.core.series.Series]`.
return original
def _get_period(self) -> Optional[int]:
period = self.period
freq = self.freq
if period is None:
if freq is not None and "T" in freq:
logging.warning(
"""Seasonal Decompose cannot handle sub day level granularity.
Please consider setting period yourself based on the input data.
Defaulting to a period of 2."""
)
period = 2
return period
def __decompose_seasonal(self, original: pd.DataFrame) -> Dict[str, pd.DataFrame]:
"""Internal function to call seasonal_decompose to do the decomposition."""
period = self._get_period()
result = seasonal_decompose(original, model=self.decomposition, period=period)
return {
"trend": result.trend,
"seasonal": result.seasonal,
"rem": result.resid,
}
def __decompose_STL(self, original: pd.DataFrame) -> Dict[str, pd.DataFrame]:
"""Internal function to call STL to do the decomposition.
The arguments to STL can be passed in the class via kwargs
"""
self.period = period = self._get_period()
if self.decomposition == "additive":
data = original
post_transform = _identity
else:
if np.any(original <= 0):
logging.error(
"Multiplicative seasonality is not appropriate "
"for zero and negative values."
)
data = np.log(original)
post_transform = np.exp
result = STL(
data,
period=period,
seasonal=self.seasonal,
trend=self.trend,
low_pass=self.low_pass,
seasonal_deg=self.seasonal_deg,
trend_deg=self.trend_deg,
low_pass_deg=self.low_pass_deg,
robust=self.robust,
seasonal_jump=self.seasonal_jump,
trend_jump=self.trend_jump,
low_pass_jump=self.low_pass_jump,
).fit()
return {
"trend": post_transform(result.trend),
"seasonal": post_transform(result.seasonal),
"rem": post_transform(result.resid),
}
def __decompose(self, original: pd.DataFrame) -> Dict[str, TimeSeriesData]:
output = self.method(original)
return {
name: TimeSeriesData(
ts.reset_index(), time_col_name=self.data.time_col_name
)
for name, ts in output.items()
}
def decomposer(self) -> Dict[str, TimeSeriesData]:
"""Decompose the time series.
Returns:
A dictionary with three time series for the three components:
`trend` : Trend
`seasonal` : Seasonality, and
`rem` : Residual
"""
original = self.__clean_ts()
self.results = result = self.__decompose(original)
return result
def plot(
self,
figsize: Optional[Figsize] = None,
linewidth: int = 3,
xlabel: str = "Time",
original_title: str = "Original Time Series",
trend_title="Trend",
seasonality_title="Seasonality",
residual_title="Residual",
subplot_kwargs: Optional[Dict[str, Any]] = None,
**kwargs,
) -> Tuple[plt.Axes, plt.Axes, plt.Axes, plt.Axes]:
"""Plot the original time series and the three decomposed components."""
results = self.results
if results is None:
raise ValueError("Call decomposer() before plot().")
if figsize is None:
figsize = (20, 10)
if subplot_kwargs is None:
subplot_kwargs = {"hspace": 0.2}
sharex = kwargs.pop("sharex", True)
fig, axs = plt.subplots(
nrows=4, ncols=1, figsize=figsize, sharex=sharex, **kwargs
)
titles = [trend_title, seasonality_title, residual_title]
parts = ["trend", "seasonal", "rem"]
axs[0].plot(
self.data.time.values,
self.data.value.values,
linewidth=linewidth,
)
axs[0].set_title(original_title)
for part, ax, title in zip(parts, axs, titles):
ts: TimeSeriesData = results[part]
ax.plot(ts.time.values, ts.value.values, linewidth=linewidth)
ax.set_title(title)
axs[3].set_xlabel(xlabel)
plt.subplots_adjust(**subplot_kwargs)
return (axs[0], axs[1], axs[2], axs[3])
| # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from kats.consts import TimeSeriesData
from statsmodels.tsa.seasonal import STL, seasonal_decompose
# from numpy.typing import ArrayLike
ArrayLike = Union[np.ndarray, Sequence[float]]
Figsize = Tuple[int, int]
def _identity(x: ArrayLike) -> ArrayLike:
return x
class TimeSeriesDecomposition:
"""Model class for Time Series Decomposition.
This class provides utilities to decompose an input time series
Pass specific arguments to seasonal_decompose and STL functions via kwargs.
Attributes:
data: the input time series data as `TimeSeriesData`
decomposition: `additive` or `multiplicative` decomposition
method: `STL decompostion` or `seasonal_decompose`
"""
freq: Optional[str] = None
results: Optional[Dict[str, TimeSeriesData]] = None
def __init__(
self,
data: TimeSeriesData,
decomposition: str = "additive",
method: str = "STL",
**kwargs,
) -> None:
if not isinstance(data.value, pd.Series):
msg = f"Only support univariate time series, but got {type(data.value)}."
logging.error(msg)
raise ValueError(msg)
self.data = data
if decomposition in ("additive", "multiplicative"):
self.decomposition = decomposition
else:
logging.info(
"Invalid decomposition setting specified; "
"defaulting to Additive Decomposition."
)
self.decomposition = "additive"
if method == "seasonal_decompose":
self.method = self.__decompose_seasonal
else:
if method != "STL":
logging.info(
f"""Invalid decomposition setting {method} specified.
Possible Values: STL, seasonal_decompose.
Defaulting to STL."""
)
self.method = self.__decompose_STL
## The following are params for the STL Module
self.period = kwargs.get("period", None)
self.seasonal = kwargs.get("seasonal", 7)
self.trend = kwargs.get("trend", None)
self.low_pass = kwargs.get("low_pass", None)
self.seasonal_deg = kwargs.get("seasonal_deg", 1)
self.trend_deg = kwargs.get("trend_deg", 1)
self.low_pass_deg = kwargs.get("low_pass_deg", 1)
self.robust = kwargs.get("robust", False)
self.seasonal_jump = kwargs.get("seasonal_jump", 1)
self.trend_jump = kwargs.get("trend_jump", 1)
self.low_pass_jump = kwargs.get("low_pass_jump", 1)
def __clean_ts(self) -> pd.DataFrame:
"""Internal function to clean the time series.
Internal function to interpolate time series and infer frequency of
time series required for decomposition.
"""
original = pd.DataFrame(
list(self.data.value), index=pd.to_datetime(self.data.time), columns=["y"]
)
if pd.infer_freq(original.index) is None:
original = original.asfreq("D")
logging.info("Setting frequency to Daily since it cannot be inferred")
self.freq = pd.infer_freq(original.index)
original.interpolate(
method="polynomial", limit_direction="both", order=3, inplace=True
)
## This is a hack since polynomial interpolation is not working here
if any(original["y"].isna()):
original.interpolate(method="linear", limit_direction="both", inplace=True)
# pyre-ignore[7]: Expected `DataFrame` but got
# `Union[pd.core.frame.DataFrame, pd.core.series.Series]`.
return original
def _get_period(self) -> Optional[int]:
period = self.period
freq = self.freq
if period is None:
if freq is not None and "T" in freq:
logging.warning(
"""Seasonal Decompose cannot handle sub day level granularity.
Please consider setting period yourself based on the input data.
Defaulting to a period of 2."""
)
period = 2
return period
def __decompose_seasonal(self, original: pd.DataFrame) -> Dict[str, pd.DataFrame]:
"""Internal function to call seasonal_decompose to do the decomposition."""
period = self._get_period()
result = seasonal_decompose(original, model=self.decomposition, period=period)
return {
"trend": result.trend,
"seasonal": result.seasonal,
"rem": result.resid,
}
def __decompose_STL(self, original: pd.DataFrame) -> Dict[str, pd.DataFrame]:
"""Internal function to call STL to do the decomposition.
The arguments to STL can be passed in the class via kwargs
"""
self.period = period = self._get_period()
if self.decomposition == "additive":
data = original
post_transform = _identity
else:
if np.any(original <= 0):
logging.error(
"Multiplicative seasonality is not appropriate "
"for zero and negative values."
)
data = np.log(original)
post_transform = np.exp
result = STL(
data,
period=period,
seasonal=self.seasonal,
trend=self.trend,
low_pass=self.low_pass,
seasonal_deg=self.seasonal_deg,
trend_deg=self.trend_deg,
low_pass_deg=self.low_pass_deg,
robust=self.robust,
seasonal_jump=self.seasonal_jump,
trend_jump=self.trend_jump,
low_pass_jump=self.low_pass_jump,
).fit()
return {
"trend": post_transform(result.trend),
"seasonal": post_transform(result.seasonal),
"rem": post_transform(result.resid),
}
def __decompose(self, original: pd.DataFrame) -> Dict[str, TimeSeriesData]:
output = self.method(original)
return {
name: TimeSeriesData(
ts.reset_index(), time_col_name=self.data.time_col_name
)
for name, ts in output.items()
}
def decomposer(self) -> Dict[str, TimeSeriesData]:
"""Decompose the time series.
Returns:
A dictionary with three time series for the three components:
`trend` : Trend
`seasonal` : Seasonality, and
`rem` : Residual
"""
original = self.__clean_ts()
self.results = result = self.__decompose(original)
return result
def plot(
self,
figsize: Optional[Figsize] = None,
linewidth: int = 3,
xlabel: str = "Time",
original_title: str = "Original Time Series",
trend_title="Trend",
seasonality_title="Seasonality",
residual_title="Residual",
subplot_kwargs: Optional[Dict[str, Any]] = None,
**kwargs,
) -> Tuple[plt.Axes, plt.Axes, plt.Axes, plt.Axes]:
"""Plot the original time series and the three decomposed components."""
results = self.results
if results is None:
raise ValueError("Call decomposer() before plot().")
if figsize is None:
figsize = (20, 10)
if subplot_kwargs is None:
subplot_kwargs = {"hspace": 0.2}
sharex = kwargs.pop("sharex", True)
fig, axs = plt.subplots(
nrows=4, ncols=1, figsize=figsize, sharex=sharex, **kwargs
)
titles = [trend_title, seasonality_title, residual_title]
parts = ["trend", "seasonal", "rem"]
axs[0].plot(
self.data.time.values,
self.data.value.values,
linewidth=linewidth,
)
axs[0].set_title(original_title)
for part, ax, title in zip(parts, axs, titles):
ts: TimeSeriesData = results[part]
ax.plot(ts.time.values, ts.value.values, linewidth=linewidth)
ax.set_title(title)
axs[3].set_xlabel(xlabel)
plt.subplots_adjust(**subplot_kwargs)
return (axs[0], axs[1], axs[2], axs[3])
| en | 0.835982 | # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from numpy.typing import ArrayLike Model class for Time Series Decomposition. This class provides utilities to decompose an input time series Pass specific arguments to seasonal_decompose and STL functions via kwargs. Attributes: data: the input time series data as `TimeSeriesData` decomposition: `additive` or `multiplicative` decomposition method: `STL decompostion` or `seasonal_decompose` Invalid decomposition setting {method} specified. Possible Values: STL, seasonal_decompose. Defaulting to STL. ## The following are params for the STL Module Internal function to clean the time series. Internal function to interpolate time series and infer frequency of time series required for decomposition. ## This is a hack since polynomial interpolation is not working here # pyre-ignore[7]: Expected `DataFrame` but got # `Union[pd.core.frame.DataFrame, pd.core.series.Series]`. Seasonal Decompose cannot handle sub day level granularity. Please consider setting period yourself based on the input data. Defaulting to a period of 2. Internal function to call seasonal_decompose to do the decomposition. Internal function to call STL to do the decomposition. The arguments to STL can be passed in the class via kwargs Decompose the time series. Returns: A dictionary with three time series for the three components: `trend` : Trend `seasonal` : Seasonality, and `rem` : Residual Plot the original time series and the three decomposed components. | 2.632339 | 3 |
aid/pycopia/ascii.py | kdart/pycopia | 89 | 6632875 | <reponame>kdart/pycopia
#!/usr/bin/python
# -*- coding: us-ascii -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# ASCII text classified into named sets.
lowercase = 'abcdefghijklmnopqrstuvwxyz'
uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
digits = '0123456789'
hexdigits = '0123456789ABCDEF'
letters = lowercase + uppercase
alphanumeric = lowercase + uppercase + digits
whitespace = ' \t\n\r\v\f'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + letters + punctuation + whitespace
control = "".join(map(chr, range(32))) + chr(127)
ascii = control + " " + digits + letters + punctuation
CR = "\r"
LF = "\n"
CRLF = CR + LF
ESCAPE = chr(27)
DEL = chr(127)
| #!/usr/bin/python
# -*- coding: us-ascii -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# ASCII text classified into named sets.
lowercase = 'abcdefghijklmnopqrstuvwxyz'
uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
digits = '0123456789'
hexdigits = '0123456789ABCDEF'
letters = lowercase + uppercase
alphanumeric = lowercase + uppercase + digits
whitespace = ' \t\n\r\v\f'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + letters + punctuation + whitespace
control = "".join(map(chr, range(32))) + chr(127)
ascii = control + " " + digits + letters + punctuation
CR = "\r"
LF = "\n"
CRLF = CR + LF
ESCAPE = chr(27)
DEL = chr(127) | en | 0.237343 | #!/usr/bin/python # -*- coding: us-ascii -*- # vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab # ASCII text classified into named sets. !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ | 2.860444 | 3 |
puv.py | beachpeeps/puv_process | 0 | 6632876 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 18 12:03:37 2019
@author: cassandra
"""
import pandas as pd
import numpy as np
import gc
import linear_wave_convert
g = 9.81
rho = 1025
def get_k(omega, h):
"""
Linear wave dispersion relation solver
Given to me in matlab by <NAME>, implemented in Python by me
"""
k = omega/np.sqrt(g*h)
f = g*k*np.tanh(k*h) - omega**2
while abs(f) > 1e-10:
dfdk = g*k*h*(1/np.cosh(k*h))**2 + g*np.tanh(k*h)
k = k - f/dfdk
f = g*k*np.tanh(k*h) - omega**2
return k
def NortekVectorConvert(dat, vhd, csv, sample_Hz=2):
"""
Take a NortekVector output file set (.dat and .vhd files only) and convert
them into csv files with times and measurements
@inputs
dat - path to .dat file output by Nortek, containing PUV data
vhd - path to .vhd file output by Nortek, containing start time
csv - path to .csv file to save to
"""
dat_cols = ["Burst","Ensemble","u","v","w",
"str1","str2","str3",
"snr1","snr2","snr3",
"corr1","corr2","corr3",
"p","analog1","analog2","checksum(1=failed)"]
df = pd.read_table(dat,sep='\s+',names=dat_cols)
df = df[["u","v","w","p"]]
gc.collect()
mo, da, ye, ho, mi, se = open(vhd).readline().split(' ')[:6]
start_time = pd.Timestamp(month = int(mo), day= int(da), year=int(ye),
hour=int(ho), minute=int(mi), second=int(se))
t0 = start_time.to_datetime64()
timestep = np.timedelta64(int(1000*(sample_Hz**-1)),'ms')
t = np.arange(t0, t0+len(df)*timestep, timestep)
df['t'] = t
df.to_csv(csv, index=False)
class PUV(object):
"""
PUV object contains a dataframe with PUV data (among other stuff)
and is tied to a specific CSV file.
Has the capacity to save header information associated with the file
"""
def __init__(self, csv, sample_Hz=2):
"""
Initialize this PUVCSV object with a .csv file
"""
self.df = pd.read_csv(csv)
self.filename = csv
self.header = {'sample_Hz':sample_Hz}
self.start_time = np.datetime64(self.df['t'].values[0])
self.end_time = np.datetime64(self.df['t'].values[-1])
@classmethod
def fromCSV(cls, csv):
"""
Use an existing CSV file to initialize a PUV object
"""
return cls(csv)
@classmethod
def fromNortekVector(cls, dat, vhd, csv, sample_Hz=2):
"""
Use NortekVectorConvert to initialize a PUV object
"""
NortekVectorConvert(dat, vhd, csv, sample_Hz=sample_Hz)
return cls(csv)
def bottomPressureToElevation(self, burial_depth_start, burial_depth_end,
pressure_units='depth'):
"""
Convert from bottom pressure into sea level elevation with linear wave
theory
"""
eta, h = linear_wave_convert.p_to_eta(self.df['p'].values,
burial_depth_start,
burial_depth_end,
dt=self.header['sample_Hz']**-1,
pressure_units=pressure_units)
self.df['eta'] = eta
self.df['h'] = h
def bottomCurrentToCurrent(self):
pass
def addCDIPBuoy(self, buoy_dat):
"""
Add CDIP buoy z displacement measurements to the csv file from the
provided buoy_dat file
Projects NaNs to fill gaps
Overwrites existing data
"""
def add(self, PUV):
"""
Add another PUV to this one, concatenating in time
Projects NaNs to fill gaps
Overwrites existing data
"""
def segment(self, start, end, csv):
"""
Create a new PUV object with the times trimmed from start to end
@params
start - numpy datetime64 start time
end - numpy datetime64 end time
@returns
PUV object trimmed to these times specifically
"""
t = np.array(self.df['t'].values, dtype='datetime64')
self.df.iloc[np.where(np.logical_and(t >= start, t <= end))].to_csv(csv, index=False)
return PUV.fromCSV(csv)
def save(self, header=True):
"""
Save, with header
"""
self.df.to_csv(self.filename, index=False)
desired_times = [['2019-01-18T12:51:00' , '2019-01-18T13:24:00'],
['2019-01-18T13:21:00' , '2019-01-18T13:54:00'],
['2019-01-18T13:51:00' , '2019-01-18T14:24:00'],
['2019-01-18T14:21:00' , '2019-01-18T14:54:00'],
['2019-01-18T15:01:00' , '2019-01-18T15:34:00'],
['2019-01-18T15:51:00' , '2019-01-18T16:24:00'],
['2019-01-18T16:21:00' , '2019-01-18T16:54:00'],
['2019-01-18T16:50:00' , '2019-01-18T17:23:00'],
['2019-01-18T17:27:00' , '2019-01-18T18:00:00']]
puv_raws = [['/media/reefbreakcopy/zdata/group/NortekVector/20190422_IB_South/IB-S02.dat',
'/media/reefbreakcopy/zdata/group/NortekVector/20190422_IB_South/IB-S02.vhd'],
['/media/reefbreakcopy/zdata/group/NortekVector/20190422_IB_South/20190301-20190423/IB-S02.dat',
'/media/reefbreakcopy/zdata/group/NortekVector/20190422_IB_South/20190301-20190423/IB-S02.vhd']]
puv_csv_path = '/media/reefbreakcopy/zdata/group/NortekVector/20190422_IB_South/csvs/'
puv_julia_path = '/media/reefbreakcopy/zdata/group/NortekVector/20190422_IB_South/julia/'
deploy_times = [np.datetime64('2018-11-27T12:00:00.00').astype(int),
np.datetime64('2018-11-27T12:00:00.00').astype(int)]
deploy_burial_depths = [0.73, 0.56]
def main():
"""
For each desired time, create a PUV with CDIP buoy information, shoal, and save
"""
# Step 1: Convert all the NortekVector data into CSV files
csv0 = puv_csv_path + '2018_11-2019_03.csv'
csv1 = puv_csv_path + '2019_03-2019_04.csv'
csv2 = puv_csv_path + 'julia.csv'
NortekVectorConvert(puv_raws[0][0], puv_raws[0][1], csv0)
NortekVectorConvert(puv_raws[1][0], puv_raws[1][1], csv1)
# Step 2: Initialize PUV object and add water elevation, water height, and surface currents
puv = PUV.fromCSV(csv0)
# Optionally select right away a subset of data to work on
# puv = puv.segment(np.datetime64(desired_times[0][0]),
# np.datetime64(desired_times[-1][-1]),
# csv2)
start_depth, end_depth = np.interp([puv.start_time.astype(int),
puv.end_time.astype(int)],
deploy_times, deploy_burial_depths)
puv.bottomPressureToElevation(start_depth, end_depth)
puv.save()
# Step 3: Add the buoy data and shoal/deshoal
# [not implemented]
# Step 4: Select segments corresponding to the desired_times list and save
for timerange in desired_times:
ti, tf = np.datetime64(timerange[0]), np.datetime64(timerange[1])
filename = puv_julia_path + timerange[0] + '.csv'
puv.segment(ti, tf, filename)
if __name__ == '__main__':
main() | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 18 12:03:37 2019
@author: cassandra
"""
import pandas as pd
import numpy as np
import gc
import linear_wave_convert
g = 9.81
rho = 1025
def get_k(omega, h):
"""
Linear wave dispersion relation solver
Given to me in matlab by <NAME>, implemented in Python by me
"""
k = omega/np.sqrt(g*h)
f = g*k*np.tanh(k*h) - omega**2
while abs(f) > 1e-10:
dfdk = g*k*h*(1/np.cosh(k*h))**2 + g*np.tanh(k*h)
k = k - f/dfdk
f = g*k*np.tanh(k*h) - omega**2
return k
def NortekVectorConvert(dat, vhd, csv, sample_Hz=2):
"""
Take a NortekVector output file set (.dat and .vhd files only) and convert
them into csv files with times and measurements
@inputs
dat - path to .dat file output by Nortek, containing PUV data
vhd - path to .vhd file output by Nortek, containing start time
csv - path to .csv file to save to
"""
dat_cols = ["Burst","Ensemble","u","v","w",
"str1","str2","str3",
"snr1","snr2","snr3",
"corr1","corr2","corr3",
"p","analog1","analog2","checksum(1=failed)"]
df = pd.read_table(dat,sep='\s+',names=dat_cols)
df = df[["u","v","w","p"]]
gc.collect()
mo, da, ye, ho, mi, se = open(vhd).readline().split(' ')[:6]
start_time = pd.Timestamp(month = int(mo), day= int(da), year=int(ye),
hour=int(ho), minute=int(mi), second=int(se))
t0 = start_time.to_datetime64()
timestep = np.timedelta64(int(1000*(sample_Hz**-1)),'ms')
t = np.arange(t0, t0+len(df)*timestep, timestep)
df['t'] = t
df.to_csv(csv, index=False)
class PUV(object):
"""
PUV object contains a dataframe with PUV data (among other stuff)
and is tied to a specific CSV file.
Has the capacity to save header information associated with the file
"""
def __init__(self, csv, sample_Hz=2):
"""
Initialize this PUVCSV object with a .csv file
"""
self.df = pd.read_csv(csv)
self.filename = csv
self.header = {'sample_Hz':sample_Hz}
self.start_time = np.datetime64(self.df['t'].values[0])
self.end_time = np.datetime64(self.df['t'].values[-1])
@classmethod
def fromCSV(cls, csv):
"""
Use an existing CSV file to initialize a PUV object
"""
return cls(csv)
@classmethod
def fromNortekVector(cls, dat, vhd, csv, sample_Hz=2):
"""
Use NortekVectorConvert to initialize a PUV object
"""
NortekVectorConvert(dat, vhd, csv, sample_Hz=sample_Hz)
return cls(csv)
def bottomPressureToElevation(self, burial_depth_start, burial_depth_end,
pressure_units='depth'):
"""
Convert from bottom pressure into sea level elevation with linear wave
theory
"""
eta, h = linear_wave_convert.p_to_eta(self.df['p'].values,
burial_depth_start,
burial_depth_end,
dt=self.header['sample_Hz']**-1,
pressure_units=pressure_units)
self.df['eta'] = eta
self.df['h'] = h
def bottomCurrentToCurrent(self):
pass
def addCDIPBuoy(self, buoy_dat):
"""
Add CDIP buoy z displacement measurements to the csv file from the
provided buoy_dat file
Projects NaNs to fill gaps
Overwrites existing data
"""
def add(self, PUV):
"""
Add another PUV to this one, concatenating in time
Projects NaNs to fill gaps
Overwrites existing data
"""
def segment(self, start, end, csv):
"""
Create a new PUV object with the times trimmed from start to end
@params
start - numpy datetime64 start time
end - numpy datetime64 end time
@returns
PUV object trimmed to these times specifically
"""
t = np.array(self.df['t'].values, dtype='datetime64')
self.df.iloc[np.where(np.logical_and(t >= start, t <= end))].to_csv(csv, index=False)
return PUV.fromCSV(csv)
def save(self, header=True):
"""
Save, with header
"""
self.df.to_csv(self.filename, index=False)
desired_times = [['2019-01-18T12:51:00' , '2019-01-18T13:24:00'],
['2019-01-18T13:21:00' , '2019-01-18T13:54:00'],
['2019-01-18T13:51:00' , '2019-01-18T14:24:00'],
['2019-01-18T14:21:00' , '2019-01-18T14:54:00'],
['2019-01-18T15:01:00' , '2019-01-18T15:34:00'],
['2019-01-18T15:51:00' , '2019-01-18T16:24:00'],
['2019-01-18T16:21:00' , '2019-01-18T16:54:00'],
['2019-01-18T16:50:00' , '2019-01-18T17:23:00'],
['2019-01-18T17:27:00' , '2019-01-18T18:00:00']]
puv_raws = [['/media/reefbreakcopy/zdata/group/NortekVector/20190422_IB_South/IB-S02.dat',
'/media/reefbreakcopy/zdata/group/NortekVector/20190422_IB_South/IB-S02.vhd'],
['/media/reefbreakcopy/zdata/group/NortekVector/20190422_IB_South/20190301-20190423/IB-S02.dat',
'/media/reefbreakcopy/zdata/group/NortekVector/20190422_IB_South/20190301-20190423/IB-S02.vhd']]
puv_csv_path = '/media/reefbreakcopy/zdata/group/NortekVector/20190422_IB_South/csvs/'
puv_julia_path = '/media/reefbreakcopy/zdata/group/NortekVector/20190422_IB_South/julia/'
deploy_times = [np.datetime64('2018-11-27T12:00:00.00').astype(int),
np.datetime64('2018-11-27T12:00:00.00').astype(int)]
deploy_burial_depths = [0.73, 0.56]
def main():
"""
For each desired time, create a PUV with CDIP buoy information, shoal, and save
"""
# Step 1: Convert all the NortekVector data into CSV files
csv0 = puv_csv_path + '2018_11-2019_03.csv'
csv1 = puv_csv_path + '2019_03-2019_04.csv'
csv2 = puv_csv_path + 'julia.csv'
NortekVectorConvert(puv_raws[0][0], puv_raws[0][1], csv0)
NortekVectorConvert(puv_raws[1][0], puv_raws[1][1], csv1)
# Step 2: Initialize PUV object and add water elevation, water height, and surface currents
puv = PUV.fromCSV(csv0)
# Optionally select right away a subset of data to work on
# puv = puv.segment(np.datetime64(desired_times[0][0]),
# np.datetime64(desired_times[-1][-1]),
# csv2)
start_depth, end_depth = np.interp([puv.start_time.astype(int),
puv.end_time.astype(int)],
deploy_times, deploy_burial_depths)
puv.bottomPressureToElevation(start_depth, end_depth)
puv.save()
# Step 3: Add the buoy data and shoal/deshoal
# [not implemented]
# Step 4: Select segments corresponding to the desired_times list and save
for timerange in desired_times:
ti, tf = np.datetime64(timerange[0]), np.datetime64(timerange[1])
filename = puv_julia_path + timerange[0] + '.csv'
puv.segment(ti, tf, filename)
if __name__ == '__main__':
main() | en | 0.732634 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Thu Jul 18 12:03:37 2019 @author: cassandra Linear wave dispersion relation solver Given to me in matlab by <NAME>, implemented in Python by me Take a NortekVector output file set (.dat and .vhd files only) and convert them into csv files with times and measurements @inputs dat - path to .dat file output by Nortek, containing PUV data vhd - path to .vhd file output by Nortek, containing start time csv - path to .csv file to save to PUV object contains a dataframe with PUV data (among other stuff) and is tied to a specific CSV file. Has the capacity to save header information associated with the file Initialize this PUVCSV object with a .csv file Use an existing CSV file to initialize a PUV object Use NortekVectorConvert to initialize a PUV object Convert from bottom pressure into sea level elevation with linear wave theory Add CDIP buoy z displacement measurements to the csv file from the provided buoy_dat file Projects NaNs to fill gaps Overwrites existing data Add another PUV to this one, concatenating in time Projects NaNs to fill gaps Overwrites existing data Create a new PUV object with the times trimmed from start to end @params start - numpy datetime64 start time end - numpy datetime64 end time @returns PUV object trimmed to these times specifically Save, with header For each desired time, create a PUV with CDIP buoy information, shoal, and save # Step 1: Convert all the NortekVector data into CSV files # Step 2: Initialize PUV object and add water elevation, water height, and surface currents # Optionally select right away a subset of data to work on # puv = puv.segment(np.datetime64(desired_times[0][0]), # np.datetime64(desired_times[-1][-1]), # csv2) # Step 3: Add the buoy data and shoal/deshoal # [not implemented] # Step 4: Select segments corresponding to the desired_times list and save | 2.756213 | 3 |
django/esite/customer/views.py | vollov/django-template | 0 | 6632877 | from django.shortcuts import render_to_response, get_object_or_404
from models import Customer
def get_customers(request):
customers = Customer.objects.filter(active=True)
return render_to_response('list.html', {
'page_title': 'Customers',
'customers': customers,
})
| from django.shortcuts import render_to_response, get_object_or_404
from models import Customer
def get_customers(request):
customers = Customer.objects.filter(active=True)
return render_to_response('list.html', {
'page_title': 'Customers',
'customers': customers,
})
| none | 1 | 1.988791 | 2 |
|
pymeasure/instruments/anritsu/anritsuMG3692C.py | Endever42/pymeasure | 2 | 6632878 | <filename>pymeasure/instruments/anritsu/anritsuMG3692C.py
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2019 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from pymeasure.instruments import Instrument, discreteTruncate, RangeException
class AnritsuMG3692C(Instrument):
""" Represents the Anritsu MG3692C Signal Generator
"""
power = Instrument.control(
":POWER?;", ":POWER %g dBm;",
""" A floating point property that represents the output power
in dBm. This property can be set. """
)
frequency = Instrument.control(
":FREQUENCY?;", ":FREQUENCY %e Hz;",
""" A floating point property that represents the output frequency
in Hz. This property can be set. """
)
def __init__(self, resourceName, **kwargs):
super(AnritsuMG3692C, self).__init__(
resourceName,
"Anritsu MG3692C Signal Generator",
**kwargs
)
@property
def output(self):
""" A boolean property that represents the signal output state.
This property can be set to control the output.
"""
return int(self.ask(":OUTPUT?")) == 1
@output.setter
def output(self, value):
if value:
self.write(":OUTPUT ON;")
else:
self.write(":OUTPUT OFF;")
def enable(self):
""" Enables the signal output.
"""
self.output = True
def disable(self):
""" Disables the signal output.
"""
self.output = False
def shutdown(self):
""" Shuts down the instrument, putting it in a safe state.
"""
# TODO: Implement modulation
self.modulation = False
self.disable()
| <filename>pymeasure/instruments/anritsu/anritsuMG3692C.py
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2019 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from pymeasure.instruments import Instrument, discreteTruncate, RangeException
class AnritsuMG3692C(Instrument):
""" Represents the Anritsu MG3692C Signal Generator
"""
power = Instrument.control(
":POWER?;", ":POWER %g dBm;",
""" A floating point property that represents the output power
in dBm. This property can be set. """
)
frequency = Instrument.control(
":FREQUENCY?;", ":FREQUENCY %e Hz;",
""" A floating point property that represents the output frequency
in Hz. This property can be set. """
)
def __init__(self, resourceName, **kwargs):
super(AnritsuMG3692C, self).__init__(
resourceName,
"Anritsu MG3692C Signal Generator",
**kwargs
)
@property
def output(self):
""" A boolean property that represents the signal output state.
This property can be set to control the output.
"""
return int(self.ask(":OUTPUT?")) == 1
@output.setter
def output(self, value):
if value:
self.write(":OUTPUT ON;")
else:
self.write(":OUTPUT OFF;")
def enable(self):
""" Enables the signal output.
"""
self.output = True
def disable(self):
""" Disables the signal output.
"""
self.output = False
def shutdown(self):
""" Shuts down the instrument, putting it in a safe state.
"""
# TODO: Implement modulation
self.modulation = False
self.disable()
| en | 0.769969 | # # This file is part of the PyMeasure package. # # Copyright (c) 2013-2019 PyMeasure Developers # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # Represents the Anritsu MG3692C Signal Generator A floating point property that represents the output power in dBm. This property can be set. A floating point property that represents the output frequency in Hz. This property can be set. A boolean property that represents the signal output state. This property can be set to control the output. Enables the signal output. Disables the signal output. Shuts down the instrument, putting it in a safe state. # TODO: Implement modulation | 1.590855 | 2 |
problems/A/ExercisingWalk.py | deveshbajpai19/CodeForces | 55 | 6632879 | <reponame>deveshbajpai19/CodeForces
__author__ = '<NAME>'
'''
https://codeforces.com/problemset/problem/1332/A
Solution: Mostly we need to calculate the net distance to move in both dimensions. That should be in the range
[x1, x2] and [y1, y2] respectively. There is a special case when we are required to move positive distance in
each dimension and there in space to move (i.e. x1 == x2 or y1 == y2). Evaluate these conditions and return the
result accordingly.
'''
def solve(a, b, c, d, x, y, x1, y1, x2, y2):
if a + b > 0 and x1 == x2:
return "NO"
if c + d > 0 and y1 == y2:
return "NO"
net_dist_x = x + (b - a)
net_dist_y = y + (d - c)
if not net_dist_x >= x1 or not net_dist_x <= x2:
return "NO"
if not net_dist_y >= y1 or not net_dist_y <= y2:
return "NO"
return "YES"
if __name__ == "__main__":
t = int(raw_input())
results = list()
for _ in xrange(0, t):
a, b, c, d = map(int, raw_input().split(" "))
x, y, x1, y1, x2, y2 = map(int, raw_input().split(" "))
results.append(solve(a, b, c, d, x, y, x1, y1, x2, y2))
for result in results:
print result
| __author__ = '<NAME>'
'''
https://codeforces.com/problemset/problem/1332/A
Solution: Mostly we need to calculate the net distance to move in both dimensions. That should be in the range
[x1, x2] and [y1, y2] respectively. There is a special case when we are required to move positive distance in
each dimension and there in space to move (i.e. x1 == x2 or y1 == y2). Evaluate these conditions and return the
result accordingly.
'''
def solve(a, b, c, d, x, y, x1, y1, x2, y2):
if a + b > 0 and x1 == x2:
return "NO"
if c + d > 0 and y1 == y2:
return "NO"
net_dist_x = x + (b - a)
net_dist_y = y + (d - c)
if not net_dist_x >= x1 or not net_dist_x <= x2:
return "NO"
if not net_dist_y >= y1 or not net_dist_y <= y2:
return "NO"
return "YES"
if __name__ == "__main__":
t = int(raw_input())
results = list()
for _ in xrange(0, t):
a, b, c, d = map(int, raw_input().split(" "))
x, y, x1, y1, x2, y2 = map(int, raw_input().split(" "))
results.append(solve(a, b, c, d, x, y, x1, y1, x2, y2))
for result in results:
print result | en | 0.917542 | https://codeforces.com/problemset/problem/1332/A Solution: Mostly we need to calculate the net distance to move in both dimensions. That should be in the range [x1, x2] and [y1, y2] respectively. There is a special case when we are required to move positive distance in each dimension and there in space to move (i.e. x1 == x2 or y1 == y2). Evaluate these conditions and return the result accordingly. | 3.696386 | 4 |
solum/tests/common/test_solum_keystoneclient.py | ed-/solum | 0 | 6632880 | # Copyright 2014 - Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import cfg
cfg.CONF.import_group('keystone_authtoken',
'keystoneclient.middleware.auth_token')
import keystoneclient.exceptions as kc_exception # noqa
from solum.common import exception
from solum.common import solum_keystoneclient
from solum.tests import base
from solum.tests import utils
@mock.patch('keystoneclient.v3.client.Client')
class KeystoneClientTest(base.BaseTestCase):
"""Test cases for solum.common.solum_keystoneclient."""
def setUp(self):
super(KeystoneClientTest, self).setUp()
dummy_url = 'http://server.test:5000/v2.0'
self.ctx = utils.dummy_context()
self.ctx.auth_url = dummy_url
self.ctx.auth_token = '<PASSWORD>'
self.ctx.auth_token_info = None
cfg.CONF.set_override('auth_uri', dummy_url,
group='keystone_authtoken')
cfg.CONF.set_override('admin_user', 'solum',
group='keystone_authtoken')
cfg.CONF.set_override('admin_password', '<PASSWORD>',
group='keystone_authtoken')
cfg.CONF.set_override('admin_tenant_name', 'service',
group='keystone_authtoken')
def test_init_v3_token(self, mock_ks):
"""Test creating the client, token auth."""
self.ctx.tenant = None
self.ctx.trust_id = None
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
solum_ks_client.client
self.assertIsNotNone(solum_ks_client._client)
mock_ks.assert_called_once_with(token='<PASSWORD>', project_id=None,
auth_url='http://server.test:5000/v3',
endpoint='http://server.test:5000/v3')
mock_ks.return_value.authenticate.assert_called_once_with()
def test_init_v3_bad_nocreds(self, mock_ks):
"""Test creating the client, no credentials."""
self.ctx.auth_token = None
self.ctx.trust_id = None
self.ctx.username = None
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertRaises(exception.AuthorizationFailure,
solum_ks_client._v3_client_init)
def test_init_trust_token_access(self, mock_ks):
"""Test creating the client, token auth."""
self.ctx.tenant = 'abcd1234'
self.ctx.trust_id = None
self.ctx.auth_token_info = {'access': {'token': {'id': 'placeholder'}}}
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
solum_ks_client.client
self.assertIsNotNone(solum_ks_client._client)
mock_ks.assert_called_once_with(auth_ref={'version': 'v2.0',
'token': {
'id': '<PASSWORD>'}},
endpoint='http://server.test:5000/v3',
auth_url='http://server.test:5000/v3')
def test_init_trust_token_token(self, mock_ks):
self.ctx.tenant = None
self.ctx.trust_id = None
self.ctx.auth_token_info = {'token': {}}
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
solum_ks_client.client
self.assertIsNotNone(solum_ks_client._client)
mock_ks.assert_called_once_with(auth_ref={'auth_token': '<PASSWORD>',
'version': 'v3'},
endpoint='http://server.test:5000/v3',
auth_url='http://server.test:5000/v3')
def test_init_trust_token_none(self, mock_ks):
self.ctx.tenant = None
self.ctx.trust_id = None
self.ctx.auth_token_info = {'<PASSWORD>': '<PASSWORD>'}
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertRaises(exception.AuthorizationFailure,
solum_ks_client._v3_client_init)
def test_create_trust_context_trust_id(self, mock_ks):
"""Test create_trust_context with existing trust_id."""
self.ctx.trust_id = 'atrust123'
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
trust_context = solum_ks_client.create_trust_context()
self.assertEqual(self.ctx.to_dict(), trust_context.to_dict())
mock_ks.assert_called_once_with(username='solum',
auth_url='http://server.test:5000/v3',
password='<PASSWORD>',
endpoint='http://server.test:5000/v3',
trust_id='atrust123')
mock_ks.return_value.authenticate.assert_called_once_with()
def test_create_trust_context_trust_create(self, mock_ks):
"""Test create_trust_context when creating a trust."""
class FakeTrust(object):
id = 'atrust123'
cfg.CONF.set_override('trusts_delegated_roles',
['solum_assembly_update'])
getter_mock = mock.PropertyMock(side_effect=['1234', '5678'])
type(mock_ks.return_value.auth_ref).user_id = getter_mock
mock_ks.return_value.auth_ref.project_id = '42'
mock_ks.return_value.trusts.create.return_value = FakeTrust()
self.ctx.trust_id = None
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
trust_context = solum_ks_client.create_trust_context()
# admin_client and user client
expected = [mock.call(username='solum',
project_name='service',
password='<PASSWORD>',
auth_url='http://server.test:5000/v3',
endpoint='http://server.test:5000/v3'),
mock.call(token='<PASSWORD>',
project_id='test_tenant_id',
auth_url='http://server.test:5000/v3',
endpoint='http://server.test:5000/v3')]
self.assertEqual(expected, mock_ks.call_args_list)
self.assertEqual([mock.call(), mock.call()],
mock_ks.return_value.authenticate.call_args_list)
# trust creation
self.assertEqual('atrust123', trust_context.trust_id)
mock_ks.return_value.trusts.create.assert_called_once_with(
trustor_user='5678',
trustee_user='1234',
project='42',
impersonation=True,
role_names=['solum_assembly_update'])
def test_init_admin_client_denied(self, mock_ks):
"""Test the admin_client property, auth failure path."""
self.ctx.username = None
self.ctx.password = <PASSWORD>
self.ctx.trust_id = None
mock_ks.return_value.authenticate.return_value = False
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
# Define wrapper for property or the property raises the exception
# outside of the assertRaises which fails the test
def get_admin_client():
solum_ks_client.admin_client
self.assertRaises(exception.AuthorizationFailure,
get_admin_client)
def test_trust_init_fail(self, mock_ks):
"""Test consuming a trust when initializing, error scoping."""
self.ctx.username = None
self.ctx.auth_token = None
self.ctx.trust_id = 'atrust123'
mock_ks.return_value.auth_ref.trust_scoped = False
self.assertRaises(exception.AuthorizationFailure,
solum_keystoneclient.KeystoneClientV3, self.ctx)
def test_trust_init_token(self, mock_ks):
"""Test trust_id takes precedence when token specified."""
self.ctx.username = None
self.ctx.trust_id = 'atrust123'
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertIsNotNone(solum_ks_client._client)
mock_ks.assert_called_once_with(username='solum',
auth_url='http://server.test:5000/v3',
password='<PASSWORD>',
endpoint='http://server.test:5000/v3',
trust_id='atrust123')
mock_ks.return_value.authenticate.assert_called_once_with()
def test_delete_trust(self, mock_ks):
"""Test delete_trust when deleting trust."""
mock_ks.return_value.trusts.delete.return_value = None
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertIsNone(solum_ks_client.delete_trust(trust_id='atrust123'))
mock_ks.return_value.trusts.delete.assert_called_once_with('atrust123')
def test_delete_trust_not_found(self, mock_ks):
"""Test delete_trust when trust already deleted."""
mock_delete = mock_ks.return_value.trusts.delete
mock_delete.side_effect = kc_exception.NotFound()
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertIsNone(solum_ks_client.delete_trust(trust_id='atrust123'))
| # Copyright 2014 - Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import cfg
cfg.CONF.import_group('keystone_authtoken',
'keystoneclient.middleware.auth_token')
import keystoneclient.exceptions as kc_exception # noqa
from solum.common import exception
from solum.common import solum_keystoneclient
from solum.tests import base
from solum.tests import utils
@mock.patch('keystoneclient.v3.client.Client')
class KeystoneClientTest(base.BaseTestCase):
"""Test cases for solum.common.solum_keystoneclient."""
def setUp(self):
super(KeystoneClientTest, self).setUp()
dummy_url = 'http://server.test:5000/v2.0'
self.ctx = utils.dummy_context()
self.ctx.auth_url = dummy_url
self.ctx.auth_token = '<PASSWORD>'
self.ctx.auth_token_info = None
cfg.CONF.set_override('auth_uri', dummy_url,
group='keystone_authtoken')
cfg.CONF.set_override('admin_user', 'solum',
group='keystone_authtoken')
cfg.CONF.set_override('admin_password', '<PASSWORD>',
group='keystone_authtoken')
cfg.CONF.set_override('admin_tenant_name', 'service',
group='keystone_authtoken')
def test_init_v3_token(self, mock_ks):
"""Test creating the client, token auth."""
self.ctx.tenant = None
self.ctx.trust_id = None
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
solum_ks_client.client
self.assertIsNotNone(solum_ks_client._client)
mock_ks.assert_called_once_with(token='<PASSWORD>', project_id=None,
auth_url='http://server.test:5000/v3',
endpoint='http://server.test:5000/v3')
mock_ks.return_value.authenticate.assert_called_once_with()
def test_init_v3_bad_nocreds(self, mock_ks):
"""Test creating the client, no credentials."""
self.ctx.auth_token = None
self.ctx.trust_id = None
self.ctx.username = None
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertRaises(exception.AuthorizationFailure,
solum_ks_client._v3_client_init)
def test_init_trust_token_access(self, mock_ks):
"""Test creating the client, token auth."""
self.ctx.tenant = 'abcd1234'
self.ctx.trust_id = None
self.ctx.auth_token_info = {'access': {'token': {'id': 'placeholder'}}}
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
solum_ks_client.client
self.assertIsNotNone(solum_ks_client._client)
mock_ks.assert_called_once_with(auth_ref={'version': 'v2.0',
'token': {
'id': '<PASSWORD>'}},
endpoint='http://server.test:5000/v3',
auth_url='http://server.test:5000/v3')
def test_init_trust_token_token(self, mock_ks):
self.ctx.tenant = None
self.ctx.trust_id = None
self.ctx.auth_token_info = {'token': {}}
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
solum_ks_client.client
self.assertIsNotNone(solum_ks_client._client)
mock_ks.assert_called_once_with(auth_ref={'auth_token': '<PASSWORD>',
'version': 'v3'},
endpoint='http://server.test:5000/v3',
auth_url='http://server.test:5000/v3')
def test_init_trust_token_none(self, mock_ks):
self.ctx.tenant = None
self.ctx.trust_id = None
self.ctx.auth_token_info = {'<PASSWORD>': '<PASSWORD>'}
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertRaises(exception.AuthorizationFailure,
solum_ks_client._v3_client_init)
def test_create_trust_context_trust_id(self, mock_ks):
"""Test create_trust_context with existing trust_id."""
self.ctx.trust_id = 'atrust123'
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
trust_context = solum_ks_client.create_trust_context()
self.assertEqual(self.ctx.to_dict(), trust_context.to_dict())
mock_ks.assert_called_once_with(username='solum',
auth_url='http://server.test:5000/v3',
password='<PASSWORD>',
endpoint='http://server.test:5000/v3',
trust_id='atrust123')
mock_ks.return_value.authenticate.assert_called_once_with()
def test_create_trust_context_trust_create(self, mock_ks):
"""Test create_trust_context when creating a trust."""
class FakeTrust(object):
id = 'atrust123'
cfg.CONF.set_override('trusts_delegated_roles',
['solum_assembly_update'])
getter_mock = mock.PropertyMock(side_effect=['1234', '5678'])
type(mock_ks.return_value.auth_ref).user_id = getter_mock
mock_ks.return_value.auth_ref.project_id = '42'
mock_ks.return_value.trusts.create.return_value = FakeTrust()
self.ctx.trust_id = None
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
trust_context = solum_ks_client.create_trust_context()
# admin_client and user client
expected = [mock.call(username='solum',
project_name='service',
password='<PASSWORD>',
auth_url='http://server.test:5000/v3',
endpoint='http://server.test:5000/v3'),
mock.call(token='<PASSWORD>',
project_id='test_tenant_id',
auth_url='http://server.test:5000/v3',
endpoint='http://server.test:5000/v3')]
self.assertEqual(expected, mock_ks.call_args_list)
self.assertEqual([mock.call(), mock.call()],
mock_ks.return_value.authenticate.call_args_list)
# trust creation
self.assertEqual('atrust123', trust_context.trust_id)
mock_ks.return_value.trusts.create.assert_called_once_with(
trustor_user='5678',
trustee_user='1234',
project='42',
impersonation=True,
role_names=['solum_assembly_update'])
def test_init_admin_client_denied(self, mock_ks):
"""Test the admin_client property, auth failure path."""
self.ctx.username = None
self.ctx.password = <PASSWORD>
self.ctx.trust_id = None
mock_ks.return_value.authenticate.return_value = False
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
# Define wrapper for property or the property raises the exception
# outside of the assertRaises which fails the test
def get_admin_client():
solum_ks_client.admin_client
self.assertRaises(exception.AuthorizationFailure,
get_admin_client)
def test_trust_init_fail(self, mock_ks):
"""Test consuming a trust when initializing, error scoping."""
self.ctx.username = None
self.ctx.auth_token = None
self.ctx.trust_id = 'atrust123'
mock_ks.return_value.auth_ref.trust_scoped = False
self.assertRaises(exception.AuthorizationFailure,
solum_keystoneclient.KeystoneClientV3, self.ctx)
def test_trust_init_token(self, mock_ks):
"""Test trust_id takes precedence when token specified."""
self.ctx.username = None
self.ctx.trust_id = 'atrust123'
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertIsNotNone(solum_ks_client._client)
mock_ks.assert_called_once_with(username='solum',
auth_url='http://server.test:5000/v3',
password='<PASSWORD>',
endpoint='http://server.test:5000/v3',
trust_id='atrust123')
mock_ks.return_value.authenticate.assert_called_once_with()
def test_delete_trust(self, mock_ks):
"""Test delete_trust when deleting trust."""
mock_ks.return_value.trusts.delete.return_value = None
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertIsNone(solum_ks_client.delete_trust(trust_id='atrust123'))
mock_ks.return_value.trusts.delete.assert_called_once_with('atrust123')
def test_delete_trust_not_found(self, mock_ks):
"""Test delete_trust when trust already deleted."""
mock_delete = mock_ks.return_value.trusts.delete
mock_delete.side_effect = kc_exception.NotFound()
solum_ks_client = solum_keystoneclient.KeystoneClientV3(self.ctx)
self.assertIsNone(solum_ks_client.delete_trust(trust_id='atrust123'))
| en | 0.809394 | # Copyright 2014 - Rackspace Hosting. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # noqa Test cases for solum.common.solum_keystoneclient. Test creating the client, token auth. Test creating the client, no credentials. Test creating the client, token auth. Test create_trust_context with existing trust_id. Test create_trust_context when creating a trust. # admin_client and user client # trust creation Test the admin_client property, auth failure path. # Define wrapper for property or the property raises the exception # outside of the assertRaises which fails the test Test consuming a trust when initializing, error scoping. Test trust_id takes precedence when token specified. Test delete_trust when deleting trust. Test delete_trust when trust already deleted. | 1.799396 | 2 |
preprocessing_tools/generate_phi0.py | ReneVelasquez18/PYGBE | 0 | 6632881 | <reponame>ReneVelasquez18/PYGBE
#!/usr/bin/env python
"""
It generates a .phi0 for a sensor brick. The user has to set the values
of dphi/dn on each face of the brick.
The surface charge \sigma is usually units of C/m^2. However, the code takes as
input dphi/dn which has to be in units of electron charge and angstrom^2.
dphi/dn = - sigma / epsilon therefore, to convert to the proper units, from sigma
to dphi/dn, we have to multiply the value of sigma by (Å)^2, divided it by the
charge of the electron q_e = 1.602x10^-19, and finally divided it by epsilon = 80
(dielectric constant of the medium outside, usually water).
For example:
If the user desires a surface charge sigma = 0.05 C/m^2 then in this code he/she
will set up dphi/dn in the faces of the brick, where:
dphi/dn = - (0.05 x (1x10^-10)^2)/ (80 x 1.602x10^-19) = -4x10^-5
"""
import numpy
import sys
import os
from pygbe.util.read_data import read_triangle, read_vertex
from argparse import ArgumentParser
def zeroAreas(vertex, triangle_raw, Area_null):
"""
Looks for "zero-areas", areas that are really small, almost zero. It appends
them to Area_null list.
"""
for i in range(len(triangle_raw)):
L0 = vertex[triangle_raw[i,1]] - vertex[triangle_raw[i,0]]
L2 = vertex[triangle_raw[i,0]] - vertex[triangle_raw[i,2]]
normal_aux = numpy.cross(L0,L2)
Area_aux = numpy.linalg.norm(normal_aux)/2
if Area_aux<1e-10:
Area_null.append(i)
return Area_null
## Designed for a cube which faces are aligned with cartesian coordinates
def read_inputs():
"""
Parse command-line arguments to generate_phi0.
User should provide:
- Problem folder (can be inferred from files if not provided)
- Mesh file (without .vert or .face) which phi0 is desired.
- x_right : value of dphi/dn in the x_right face.
- x_left : value of dphi/dn in the x_left face.
- y_top : value of dphi/dn in the y_top face.
- y_bottom: value of dphi/dn in the y_bottom face.
- z_front : value of dphi/dn in the z_front face.
- z_back : value of dphi/dn in the z_back face.
"""
parser = ArgumentParser(description='Manage generate_phi0 command line arguments')
parser.add_argument('problem_folder', type=str,
help="Path to folder containing problem files")
parser.add_argument('-m', '--mesh', dest='mesh', type=str, default=None,
help="Path to sensor-brick mesh file")
parser.add_argument('-x_r', '--x_right', dest='x_right', type=float, default=None,
help="charge assigned to x_right face")
parser.add_argument('-x_l', '--x_left', dest='x_left', type=float, default=None,
help="charge assigned to x_left face")
parser.add_argument('-y_t', '--y_top', dest='y_top', type=float, default=None,
help="charge assigned to y_top face")
parser.add_argument('-y_b', '--y_bottom', dest='y_bottom', type=float, default=None,
help="charge assigned to y_bottom face")
parser.add_argument('-z_f', '--z_front', dest='z_front', type=float, default=None,
help="charge assigned to z_front face")
parser.add_argument('-z_b', '--z_back', dest='z_back', type=float, default=None,
help="charge assigned to z_back face")
return parser.parse_args()
args = read_inputs()
meshFile = args.mesh
x_right = args.x_right
x_left = args.x_left
y_top = args.y_top
y_bott = args.y_bottom
z_front = args.z_front
z_back = args.z_back
full_path = args.problem_folder
if not os.path.isdir(full_path):
full_path = os.getcwd() + '/' + full_path
full_path = os.path.normpath(full_path)
os.environ['PYGBE_PROBLEM_FOLDER'] = full_path
vertex = read_vertex(meshFile+'.vert', float)
triangle_raw = read_triangle(meshFile+'.face', 'neumann_surface')
Area_null = []
Area_null = zeroAreas(vertex, triangle_raw, Area_null)
triangle = numpy.delete(triangle_raw, Area_null, 0)
if len(triangle) != len(triangle_raw):
print '%i deleted triangles'%(len(triangle_raw)-len(triangle))
phi0 = numpy.zeros(len(triangle), float)
tri_ctr = numpy.average(vertex[triangle], axis=1)
print len(tri_ctr)
print len(triangle)
max_x = max(tri_ctr[:,0])
min_x = min(tri_ctr[:,0])
max_y = max(tri_ctr[:,1])
min_y = min(tri_ctr[:,1])
max_z = max(tri_ctr[:,2])
min_z = min(tri_ctr[:,2])
for i in range(len(triangle)):
if abs(tri_ctr[i,0]-max_x)<1e-10:
phi0[i] = x_right
if abs(tri_ctr[i,0]-min_x)<1e-10:
phi0[i] = x_left
if abs(tri_ctr[i,1]-max_y)<1e-10:
phi0[i] = y_top
if abs(tri_ctr[i,1]-min_y)<1e-10:
phi0[i] = y_bott
if abs(tri_ctr[i,2]-max_z)<1e-10:
phi0[i] = z_front
if abs(tri_ctr[i,2]-min_z)<1e-10:
phi0[i] = z_back
meshFile = meshFile.rsplit('/', 1)[-1]
faces_values = [x_right, x_left, y_top, y_bott, z_front, z_back]
faces_values = ''.join([str(face) for face in faces_values])
file_out = meshFile+'_'+faces_values+'.phi0'
with open(full_path+'/'+file_out, 'w') as f:
numpy.savetxt(f, phi0)
| #!/usr/bin/env python
"""
It generates a .phi0 for a sensor brick. The user has to set the values
of dphi/dn on each face of the brick.
The surface charge \sigma is usually units of C/m^2. However, the code takes as
input dphi/dn which has to be in units of electron charge and angstrom^2.
dphi/dn = - sigma / epsilon therefore, to convert to the proper units, from sigma
to dphi/dn, we have to multiply the value of sigma by (Å)^2, divided it by the
charge of the electron q_e = 1.602x10^-19, and finally divided it by epsilon = 80
(dielectric constant of the medium outside, usually water).
For example:
If the user desires a surface charge sigma = 0.05 C/m^2 then in this code he/she
will set up dphi/dn in the faces of the brick, where:
dphi/dn = - (0.05 x (1x10^-10)^2)/ (80 x 1.602x10^-19) = -4x10^-5
"""
import numpy
import sys
import os
from pygbe.util.read_data import read_triangle, read_vertex
from argparse import ArgumentParser
def zeroAreas(vertex, triangle_raw, Area_null):
"""
Looks for "zero-areas", areas that are really small, almost zero. It appends
them to Area_null list.
"""
for i in range(len(triangle_raw)):
L0 = vertex[triangle_raw[i,1]] - vertex[triangle_raw[i,0]]
L2 = vertex[triangle_raw[i,0]] - vertex[triangle_raw[i,2]]
normal_aux = numpy.cross(L0,L2)
Area_aux = numpy.linalg.norm(normal_aux)/2
if Area_aux<1e-10:
Area_null.append(i)
return Area_null
## Designed for a cube which faces are aligned with cartesian coordinates
def read_inputs():
"""
Parse command-line arguments to generate_phi0.
User should provide:
- Problem folder (can be inferred from files if not provided)
- Mesh file (without .vert or .face) which phi0 is desired.
- x_right : value of dphi/dn in the x_right face.
- x_left : value of dphi/dn in the x_left face.
- y_top : value of dphi/dn in the y_top face.
- y_bottom: value of dphi/dn in the y_bottom face.
- z_front : value of dphi/dn in the z_front face.
- z_back : value of dphi/dn in the z_back face.
"""
parser = ArgumentParser(description='Manage generate_phi0 command line arguments')
parser.add_argument('problem_folder', type=str,
help="Path to folder containing problem files")
parser.add_argument('-m', '--mesh', dest='mesh', type=str, default=None,
help="Path to sensor-brick mesh file")
parser.add_argument('-x_r', '--x_right', dest='x_right', type=float, default=None,
help="charge assigned to x_right face")
parser.add_argument('-x_l', '--x_left', dest='x_left', type=float, default=None,
help="charge assigned to x_left face")
parser.add_argument('-y_t', '--y_top', dest='y_top', type=float, default=None,
help="charge assigned to y_top face")
parser.add_argument('-y_b', '--y_bottom', dest='y_bottom', type=float, default=None,
help="charge assigned to y_bottom face")
parser.add_argument('-z_f', '--z_front', dest='z_front', type=float, default=None,
help="charge assigned to z_front face")
parser.add_argument('-z_b', '--z_back', dest='z_back', type=float, default=None,
help="charge assigned to z_back face")
return parser.parse_args()
args = read_inputs()
meshFile = args.mesh
x_right = args.x_right
x_left = args.x_left
y_top = args.y_top
y_bott = args.y_bottom
z_front = args.z_front
z_back = args.z_back
full_path = args.problem_folder
if not os.path.isdir(full_path):
full_path = os.getcwd() + '/' + full_path
full_path = os.path.normpath(full_path)
os.environ['PYGBE_PROBLEM_FOLDER'] = full_path
vertex = read_vertex(meshFile+'.vert', float)
triangle_raw = read_triangle(meshFile+'.face', 'neumann_surface')
Area_null = []
Area_null = zeroAreas(vertex, triangle_raw, Area_null)
triangle = numpy.delete(triangle_raw, Area_null, 0)
if len(triangle) != len(triangle_raw):
print '%i deleted triangles'%(len(triangle_raw)-len(triangle))
phi0 = numpy.zeros(len(triangle), float)
tri_ctr = numpy.average(vertex[triangle], axis=1)
print len(tri_ctr)
print len(triangle)
max_x = max(tri_ctr[:,0])
min_x = min(tri_ctr[:,0])
max_y = max(tri_ctr[:,1])
min_y = min(tri_ctr[:,1])
max_z = max(tri_ctr[:,2])
min_z = min(tri_ctr[:,2])
for i in range(len(triangle)):
if abs(tri_ctr[i,0]-max_x)<1e-10:
phi0[i] = x_right
if abs(tri_ctr[i,0]-min_x)<1e-10:
phi0[i] = x_left
if abs(tri_ctr[i,1]-max_y)<1e-10:
phi0[i] = y_top
if abs(tri_ctr[i,1]-min_y)<1e-10:
phi0[i] = y_bott
if abs(tri_ctr[i,2]-max_z)<1e-10:
phi0[i] = z_front
if abs(tri_ctr[i,2]-min_z)<1e-10:
phi0[i] = z_back
meshFile = meshFile.rsplit('/', 1)[-1]
faces_values = [x_right, x_left, y_top, y_bott, z_front, z_back]
faces_values = ''.join([str(face) for face in faces_values])
file_out = meshFile+'_'+faces_values+'.phi0'
with open(full_path+'/'+file_out, 'w') as f:
numpy.savetxt(f, phi0) | en | 0.867509 | #!/usr/bin/env python It generates a .phi0 for a sensor brick. The user has to set the values of dphi/dn on each face of the brick. The surface charge \sigma is usually units of C/m^2. However, the code takes as input dphi/dn which has to be in units of electron charge and angstrom^2. dphi/dn = - sigma / epsilon therefore, to convert to the proper units, from sigma to dphi/dn, we have to multiply the value of sigma by (Å)^2, divided it by the charge of the electron q_e = 1.602x10^-19, and finally divided it by epsilon = 80 (dielectric constant of the medium outside, usually water). For example: If the user desires a surface charge sigma = 0.05 C/m^2 then in this code he/she will set up dphi/dn in the faces of the brick, where: dphi/dn = - (0.05 x (1x10^-10)^2)/ (80 x 1.602x10^-19) = -4x10^-5 Looks for "zero-areas", areas that are really small, almost zero. It appends them to Area_null list. ## Designed for a cube which faces are aligned with cartesian coordinates Parse command-line arguments to generate_phi0. User should provide: - Problem folder (can be inferred from files if not provided) - Mesh file (without .vert or .face) which phi0 is desired. - x_right : value of dphi/dn in the x_right face. - x_left : value of dphi/dn in the x_left face. - y_top : value of dphi/dn in the y_top face. - y_bottom: value of dphi/dn in the y_bottom face. - z_front : value of dphi/dn in the z_front face. - z_back : value of dphi/dn in the z_back face. | 4.025391 | 4 |
wieracentral/gen-py/ApplicationToWieraIface/ApplicationToWieraIface.py | LiPengze97/wiera | 0 | 6632882 | <filename>wieracentral/gen-py/ApplicationToWieraIface/ApplicationToWieraIface.py
#
# Autogenerated by Thrift Compiler (0.10.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
class Iface(object):
def startInstances(self, policy):
"""
Parameters:
- policy
"""
pass
def stopInstances(self, key):
"""
Parameters:
- key
"""
pass
def getInstances(self, key):
"""
Parameters:
- key
"""
pass
def getLocalServerList(self):
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def startInstances(self, policy):
"""
Parameters:
- policy
"""
self.send_startInstances(policy)
return self.recv_startInstances()
def send_startInstances(self, policy):
self._oprot.writeMessageBegin('startInstances', TMessageType.CALL, self._seqid)
args = startInstances_args()
args.policy = policy
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_startInstances(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = startInstances_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "startInstances failed: unknown result")
def stopInstances(self, key):
"""
Parameters:
- key
"""
self.send_stopInstances(key)
return self.recv_stopInstances()
def send_stopInstances(self, key):
self._oprot.writeMessageBegin('stopInstances', TMessageType.CALL, self._seqid)
args = stopInstances_args()
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_stopInstances(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = stopInstances_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "stopInstances failed: unknown result")
def getInstances(self, key):
"""
Parameters:
- key
"""
self.send_getInstances(key)
return self.recv_getInstances()
def send_getInstances(self, key):
self._oprot.writeMessageBegin('getInstances', TMessageType.CALL, self._seqid)
args = getInstances_args()
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getInstances(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getInstances_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getInstances failed: unknown result")
def getLocalServerList(self):
self.send_getLocalServerList()
return self.recv_getLocalServerList()
def send_getLocalServerList(self):
self._oprot.writeMessageBegin('getLocalServerList', TMessageType.CALL, self._seqid)
args = getLocalServerList_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getLocalServerList(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getLocalServerList_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getLocalServerList failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["startInstances"] = Processor.process_startInstances
self._processMap["stopInstances"] = Processor.process_stopInstances
self._processMap["getInstances"] = Processor.process_getInstances
self._processMap["getLocalServerList"] = Processor.process_getLocalServerList
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_startInstances(self, seqid, iprot, oprot):
args = startInstances_args()
args.read(iprot)
iprot.readMessageEnd()
result = startInstances_result()
try:
result.success = self._handler.startInstances(args.policy)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("startInstances", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_stopInstances(self, seqid, iprot, oprot):
args = stopInstances_args()
args.read(iprot)
iprot.readMessageEnd()
result = stopInstances_result()
try:
result.success = self._handler.stopInstances(args.key)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("stopInstances", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getInstances(self, seqid, iprot, oprot):
args = getInstances_args()
args.read(iprot)
iprot.readMessageEnd()
result = getInstances_result()
client_ip = iprot.trans._TFramedTransport__trans.handle.getpeername()[0]
try:
result.success = self._handler.getInstances(args.key, client_ip)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getInstances", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getLocalServerList(self, seqid, iprot, oprot):
args = getLocalServerList_args()
args.read(iprot)
iprot.readMessageEnd()
result = getLocalServerList_result()
try:
result.success = self._handler.getLocalServerList()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getLocalServerList", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class startInstances_args(object):
"""
Attributes:
- policy
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'policy', 'UTF8', None, ), # 1
)
def __init__(self, policy=None,):
self.policy = policy
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.policy = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('startInstances_args')
if self.policy is not None:
oprot.writeFieldBegin('policy', TType.STRING, 1)
oprot.writeString(self.policy.encode('utf-8') if sys.version_info[0] == 2 else self.policy)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class startInstances_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('startInstances_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class stopInstances_args(object):
"""
Attributes:
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', 'UTF8', None, ), # 1
)
def __init__(self, key=None,):
self.key = key
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('stopInstances_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key.encode('utf-8') if sys.version_info[0] == 2 else self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class stopInstances_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('stopInstances_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getInstances_args(object):
"""
Attributes:
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', 'UTF8', None, ), # 1
)
def __init__(self, key=None,):
self.key = key
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getInstances_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key.encode('utf-8') if sys.version_info[0] == 2 else self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getInstances_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getInstances_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getLocalServerList_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getLocalServerList_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getLocalServerList_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getLocalServerList_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| <filename>wieracentral/gen-py/ApplicationToWieraIface/ApplicationToWieraIface.py
#
# Autogenerated by Thrift Compiler (0.10.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
class Iface(object):
def startInstances(self, policy):
"""
Parameters:
- policy
"""
pass
def stopInstances(self, key):
"""
Parameters:
- key
"""
pass
def getInstances(self, key):
"""
Parameters:
- key
"""
pass
def getLocalServerList(self):
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def startInstances(self, policy):
"""
Parameters:
- policy
"""
self.send_startInstances(policy)
return self.recv_startInstances()
def send_startInstances(self, policy):
self._oprot.writeMessageBegin('startInstances', TMessageType.CALL, self._seqid)
args = startInstances_args()
args.policy = policy
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_startInstances(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = startInstances_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "startInstances failed: unknown result")
def stopInstances(self, key):
"""
Parameters:
- key
"""
self.send_stopInstances(key)
return self.recv_stopInstances()
def send_stopInstances(self, key):
self._oprot.writeMessageBegin('stopInstances', TMessageType.CALL, self._seqid)
args = stopInstances_args()
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_stopInstances(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = stopInstances_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "stopInstances failed: unknown result")
def getInstances(self, key):
"""
Parameters:
- key
"""
self.send_getInstances(key)
return self.recv_getInstances()
def send_getInstances(self, key):
self._oprot.writeMessageBegin('getInstances', TMessageType.CALL, self._seqid)
args = getInstances_args()
args.key = key
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getInstances(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getInstances_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getInstances failed: unknown result")
def getLocalServerList(self):
self.send_getLocalServerList()
return self.recv_getLocalServerList()
def send_getLocalServerList(self):
self._oprot.writeMessageBegin('getLocalServerList', TMessageType.CALL, self._seqid)
args = getLocalServerList_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getLocalServerList(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getLocalServerList_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getLocalServerList failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["startInstances"] = Processor.process_startInstances
self._processMap["stopInstances"] = Processor.process_stopInstances
self._processMap["getInstances"] = Processor.process_getInstances
self._processMap["getLocalServerList"] = Processor.process_getLocalServerList
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_startInstances(self, seqid, iprot, oprot):
args = startInstances_args()
args.read(iprot)
iprot.readMessageEnd()
result = startInstances_result()
try:
result.success = self._handler.startInstances(args.policy)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("startInstances", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_stopInstances(self, seqid, iprot, oprot):
args = stopInstances_args()
args.read(iprot)
iprot.readMessageEnd()
result = stopInstances_result()
try:
result.success = self._handler.stopInstances(args.key)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("stopInstances", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getInstances(self, seqid, iprot, oprot):
args = getInstances_args()
args.read(iprot)
iprot.readMessageEnd()
result = getInstances_result()
client_ip = iprot.trans._TFramedTransport__trans.handle.getpeername()[0]
try:
result.success = self._handler.getInstances(args.key, client_ip)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getInstances", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getLocalServerList(self, seqid, iprot, oprot):
args = getLocalServerList_args()
args.read(iprot)
iprot.readMessageEnd()
result = getLocalServerList_result()
try:
result.success = self._handler.getLocalServerList()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getLocalServerList", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class startInstances_args(object):
"""
Attributes:
- policy
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'policy', 'UTF8', None, ), # 1
)
def __init__(self, policy=None,):
self.policy = policy
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.policy = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('startInstances_args')
if self.policy is not None:
oprot.writeFieldBegin('policy', TType.STRING, 1)
oprot.writeString(self.policy.encode('utf-8') if sys.version_info[0] == 2 else self.policy)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class startInstances_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('startInstances_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class stopInstances_args(object):
"""
Attributes:
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', 'UTF8', None, ), # 1
)
def __init__(self, key=None,):
self.key = key
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('stopInstances_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key.encode('utf-8') if sys.version_info[0] == 2 else self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class stopInstances_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('stopInstances_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getInstances_args(object):
"""
Attributes:
- key
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', 'UTF8', None, ), # 1
)
def __init__(self, key=None,):
self.key = key
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getInstances_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key.encode('utf-8') if sys.version_info[0] == 2 else self.key)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getInstances_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getInstances_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getLocalServerList_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getLocalServerList_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getLocalServerList_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getLocalServerList_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| en | 0.370028 | # # Autogenerated by Thrift Compiler (0.10.0) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # # options string: py # Parameters: - policy Parameters: - key Parameters: - key Parameters: - policy Parameters: - key Parameters: - key # HELPER FUNCTIONS AND STRUCTURES Attributes: - policy # 0 # 1 Attributes: - success # 0 Attributes: - key # 0 # 1 Attributes: - success # 0 Attributes: - key # 0 # 1 Attributes: - success # 0 Attributes: - success # 0 | 1.892869 | 2 |
pyEmitter/Event.py | archanpatkar/pyEmitter | 4 | 6632883 | <reponame>archanpatkar/pyEmitter
from collections import namedtuple
Event = namedtuple('Event', ['name', 'value']);
| from collections import namedtuple
Event = namedtuple('Event', ['name', 'value']); | none | 1 | 2.136976 | 2 |
|
python/code_challenges/linkedListPractice/llpractice.py | brendanwelzien/data-structures-and-algorithms | 0 | 6632884 | class Node:
def __init__(self, value, _next=None):
self.value = val
self.next = _next
def __repr__(self):
return f"{self.next}'s value is {self.value}"
class linkedList:
def __init__(self):
self.head = None
def insert(self, val):
self.head = Node(value, self.head)
def includes(self, value):
# check if value exists in Node
current = self.head
while current:
if current.value == value:
return True
current = current.next
return False
def append_to_tail(self, value):
current = self.head
while current:
if not current.next:
current.next = Node(value)
current = current.next
# More linked list tomorrow
def insert_value_before(self, value, newValue):
current = self.head
while current:
if not current.next:
return 'empty value in list'
if current.next.value == value:
current.next = Node(newValue, current.next)
current = current.next
def insert_value_after(self, value, newValue):
current = self.head
while current:
if not current.next:
return 'empty value in list'
if current.value == value:
current.next = Node(newValue, current.next)
current = current.next
def check_kth(self, k):
current = self.head
new_list = []
if k < 0:
raise ValueError("improper value")
if len(new_list) < k:
raise IndexError("value improper length in list")
while current:
new_list.append(current)
current = current.next
new_list.reverse()
if k == len(new_list):
k -= 1
return new_list[k].value
| class Node:
def __init__(self, value, _next=None):
self.value = val
self.next = _next
def __repr__(self):
return f"{self.next}'s value is {self.value}"
class linkedList:
def __init__(self):
self.head = None
def insert(self, val):
self.head = Node(value, self.head)
def includes(self, value):
# check if value exists in Node
current = self.head
while current:
if current.value == value:
return True
current = current.next
return False
def append_to_tail(self, value):
current = self.head
while current:
if not current.next:
current.next = Node(value)
current = current.next
# More linked list tomorrow
def insert_value_before(self, value, newValue):
current = self.head
while current:
if not current.next:
return 'empty value in list'
if current.next.value == value:
current.next = Node(newValue, current.next)
current = current.next
def insert_value_after(self, value, newValue):
current = self.head
while current:
if not current.next:
return 'empty value in list'
if current.value == value:
current.next = Node(newValue, current.next)
current = current.next
def check_kth(self, k):
current = self.head
new_list = []
if k < 0:
raise ValueError("improper value")
if len(new_list) < k:
raise IndexError("value improper length in list")
while current:
new_list.append(current)
current = current.next
new_list.reverse()
if k == len(new_list):
k -= 1
return new_list[k].value
| en | 0.36443 | # check if value exists in Node # More linked list tomorrow | 4.06169 | 4 |
5.py | daniiarkhodzhaev-at/lab2 | 0 | 6632885 | <gh_stars>0
#!/usr/bin/python3
import turtle
def draw_square(size: int) -> int:
turtle.penup()
turtle.goto(-size / 2, -size // 2)
turtle.pendown()
for i in range(4):
turtle.forward(size)
turtle.left(90)
return 0
def main() -> int:
turtle.shape("turtle")
for i in range(1, 11):
draw_square(50 * i)
return 0
if (__name__ == "__main__"):
main()
| #!/usr/bin/python3
import turtle
def draw_square(size: int) -> int:
turtle.penup()
turtle.goto(-size / 2, -size // 2)
turtle.pendown()
for i in range(4):
turtle.forward(size)
turtle.left(90)
return 0
def main() -> int:
turtle.shape("turtle")
for i in range(1, 11):
draw_square(50 * i)
return 0
if (__name__ == "__main__"):
main() | fr | 0.386793 | #!/usr/bin/python3 | 4.024801 | 4 |
tests/integration/cqlengine/model/test_polymorphism.py | beobal/python-driver | 1 | 6632886 | <filename>tests/integration/cqlengine/model/test_polymorphism.py<gh_stars>1-10
# Copyright 2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import mock
from cassandra.cqlengine import columns
from cassandra.cqlengine import models
from cassandra.cqlengine.connection import get_session
from tests.integration.cqlengine.base import BaseCassEngTestCase
from cassandra.cqlengine import management
class TestPolymorphicClassConstruction(BaseCassEngTestCase):
def test_multiple_polymorphic_key_failure(self):
""" Tests that defining a model with more than one polymorphic key fails """
with self.assertRaises(models.ModelDefinitionException):
class M(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(polymorphic_key=True)
type2 = columns.Integer(polymorphic_key=True)
def test_no_polymorphic_key_column_failure(self):
with self.assertRaises(models.ModelDefinitionException):
class M(models.Model):
__polymorphic_key__ = 1
def test_polymorphic_key_inheritance(self):
""" Tests that polymorphic_key attribute is not inherited """
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(polymorphic_key=True)
class M1(Base):
__polymorphic_key__ = 1
class M2(M1):
pass
assert M2.__polymorphic_key__ is None
def test_polymorphic_metaclass(self):
""" Tests that the model meta class configures polymorphic models properly """
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(polymorphic_key=True)
class M1(Base):
__polymorphic_key__ = 1
assert Base._is_polymorphic
assert M1._is_polymorphic
assert Base._is_polymorphic_base
assert not M1._is_polymorphic_base
assert Base._discriminator_column is Base._columns['type1']
assert M1._discriminator_column is M1._columns['type1']
assert Base._discriminator_column_name == 'type1'
assert M1._discriminator_column_name == 'type1'
def test_table_names_are_inherited_from_poly_base(self):
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(polymorphic_key=True)
class M1(Base):
__polymorphic_key__ = 1
assert Base.column_family_name() == M1.column_family_name()
def test_collection_columns_cant_be_polymorphic_keys(self):
with self.assertRaises(models.ModelDefinitionException):
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Set(columns.Integer, polymorphic_key=True)
class PolyBase(models.Model):
partition = columns.UUID(primary_key=True, default=uuid.uuid4)
row_type = columns.Integer(polymorphic_key=True)
class Poly1(PolyBase):
__polymorphic_key__ = 1
data1 = columns.Text()
class Poly2(PolyBase):
__polymorphic_key__ = 2
data2 = columns.Text()
class TestPolymorphicModel(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestPolymorphicModel, cls).setUpClass()
management.sync_table(Poly1)
management.sync_table(Poly2)
@classmethod
def tearDownClass(cls):
super(TestPolymorphicModel, cls).tearDownClass()
management.drop_table(Poly1)
management.drop_table(Poly2)
def test_saving_base_model_fails(self):
with self.assertRaises(models.PolymorphicModelException):
PolyBase.create()
def test_saving_subclass_saves_poly_key(self):
p1 = Poly1.create(data1='pickle')
p2 = Poly2.create(data2='bacon')
assert p1.row_type == Poly1.__polymorphic_key__
assert p2.row_type == Poly2.__polymorphic_key__
def test_query_deserialization(self):
p1 = Poly1.create(data1='pickle')
p2 = Poly2.create(data2='bacon')
p1r = PolyBase.get(partition=p1.partition)
p2r = PolyBase.get(partition=p2.partition)
assert isinstance(p1r, Poly1)
assert isinstance(p2r, Poly2)
def test_delete_on_polymorphic_subclass_does_not_include_polymorphic_key(self):
p1 = Poly1.create()
session = get_session()
with mock.patch.object(session, 'execute') as m:
Poly1.objects(partition=p1.partition).delete()
# make sure our polymorphic key isn't in the CQL
# not sure how we would even get here if it was in there
# since the CQL would fail.
self.assertNotIn("row_type", m.call_args[0][0].query_string)
class UnindexedPolyBase(models.Model):
partition = columns.UUID(primary_key=True, default=uuid.uuid4)
cluster = columns.UUID(primary_key=True, default=uuid.uuid4)
row_type = columns.Integer(polymorphic_key=True)
class UnindexedPoly1(UnindexedPolyBase):
__polymorphic_key__ = 1
data1 = columns.Text()
class UnindexedPoly2(UnindexedPolyBase):
__polymorphic_key__ = 2
data2 = columns.Text()
class UnindexedPoly3(UnindexedPoly2):
__polymorphic_key__ = 3
data3 = columns.Text()
class TestUnindexedPolymorphicQuery(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestUnindexedPolymorphicQuery, cls).setUpClass()
management.sync_table(UnindexedPoly1)
management.sync_table(UnindexedPoly2)
management.sync_table(UnindexedPoly3)
cls.p1 = UnindexedPoly1.create(data1='pickle')
cls.p2 = UnindexedPoly2.create(partition=cls.p1.partition, data2='bacon')
cls.p3 = UnindexedPoly3.create(partition=cls.p1.partition, data3='turkey')
@classmethod
def tearDownClass(cls):
super(TestUnindexedPolymorphicQuery, cls).tearDownClass()
management.drop_table(UnindexedPoly1)
management.drop_table(UnindexedPoly2)
management.drop_table(UnindexedPoly3)
def test_non_conflicting_type_results_work(self):
p1, p2, p3 = self.p1, self.p2, self.p3
assert len(list(UnindexedPoly1.objects(partition=p1.partition, cluster=p1.cluster))) == 1
assert len(list(UnindexedPoly2.objects(partition=p1.partition, cluster=p2.cluster))) == 1
assert len(list(UnindexedPoly3.objects(partition=p1.partition, cluster=p3.cluster))) == 1
def test_subclassed_model_results_work_properly(self):
p1, p2, p3 = self.p1, self.p2, self.p3
assert len(list(UnindexedPoly2.objects(partition=p1.partition, cluster__in=[p2.cluster, p3.cluster]))) == 2
def test_conflicting_type_results(self):
with self.assertRaises(models.PolymorphicModelException):
list(UnindexedPoly1.objects(partition=self.p1.partition))
with self.assertRaises(models.PolymorphicModelException):
list(UnindexedPoly2.objects(partition=self.p1.partition))
class IndexedPolyBase(models.Model):
partition = columns.UUID(primary_key=True, default=uuid.uuid4)
cluster = columns.UUID(primary_key=True, default=uuid.uuid4)
row_type = columns.Integer(polymorphic_key=True, index=True)
class IndexedPoly1(IndexedPolyBase):
__polymorphic_key__ = 1
data1 = columns.Text()
class IndexedPoly2(IndexedPolyBase):
__polymorphic_key__ = 2
data2 = columns.Text()
class TestIndexedPolymorphicQuery(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestIndexedPolymorphicQuery, cls).setUpClass()
management.sync_table(IndexedPoly1)
management.sync_table(IndexedPoly2)
cls.p1 = IndexedPoly1.create(data1='pickle')
cls.p2 = IndexedPoly2.create(partition=cls.p1.partition, data2='bacon')
@classmethod
def tearDownClass(cls):
super(TestIndexedPolymorphicQuery, cls).tearDownClass()
management.drop_table(IndexedPoly1)
management.drop_table(IndexedPoly2)
def test_success_case(self):
assert len(list(IndexedPoly1.objects(partition=self.p1.partition))) == 1
assert len(list(IndexedPoly2.objects(partition=self.p1.partition))) == 1
#########
# Repeated tests for 'discriminator' properties, following deprecation of polymorphic variants
#########
class TestInheritanceClassConstruction(BaseCassEngTestCase):
def test_multiple_discriminator_value_failure(self):
""" Tests that defining a model with more than one discriminator column fails """
with self.assertRaises(models.ModelDefinitionException):
class M(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(discriminator_column=True)
type2 = columns.Integer(discriminator_column=True)
def test_no_discriminator_column_failure(self):
with self.assertRaises(models.ModelDefinitionException):
class M(models.Model):
__discriminator_value__ = 1
def test_discriminator_value_inheritance(self):
""" Tests that discriminator_column attribute is not inherited """
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(discriminator_column=True)
class M1(Base):
__discriminator_value__ = 1
class M2(M1):
pass
assert M2.__discriminator_value__ is None
def test_inheritance_metaclass(self):
""" Tests that the model meta class configures inherited models properly """
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(discriminator_column=True)
class M1(Base):
__discriminator_value__ = 1
assert Base._is_polymorphic
assert M1._is_polymorphic
assert Base._is_polymorphic_base
assert not M1._is_polymorphic_base
assert Base._discriminator_column is Base._columns['type1']
assert M1._discriminator_column is M1._columns['type1']
assert Base._discriminator_column_name == 'type1'
assert M1._discriminator_column_name == 'type1'
def test_table_names_are_inherited_from_base(self):
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(discriminator_column=True)
class M1(Base):
__discriminator_value__ = 1
assert Base.column_family_name() == M1.column_family_name()
def test_collection_columns_cant_be_discriminator_column(self):
with self.assertRaises(models.ModelDefinitionException):
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Set(columns.Integer, discriminator_column=True)
class InheritBase(models.Model):
partition = columns.UUID(primary_key=True, default=uuid.uuid4)
row_type = columns.Integer(discriminator_column=True)
class Inherit1(InheritBase):
__discriminator_value__ = 1
data1 = columns.Text()
class Inherit2(InheritBase):
__discriminator_value__ = 2
data2 = columns.Text()
class TestInheritanceModel(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestInheritanceModel, cls).setUpClass()
management.sync_table(Inherit1)
management.sync_table(Inherit2)
@classmethod
def tearDownClass(cls):
super(TestInheritanceModel, cls).tearDownClass()
management.drop_table(Inherit1)
management.drop_table(Inherit2)
def test_saving_base_model_fails(self):
with self.assertRaises(models.PolymorphicModelException):
InheritBase.create()
def test_saving_subclass_saves_disc_value(self):
p1 = Inherit1.create(data1='pickle')
p2 = Inherit2.create(data2='bacon')
assert p1.row_type == Inherit1.__discriminator_value__
assert p2.row_type == Inherit2.__discriminator_value__
def test_query_deserialization(self):
p1 = Inherit1.create(data1='pickle')
p2 = Inherit2.create(data2='bacon')
p1r = InheritBase.get(partition=p1.partition)
p2r = InheritBase.get(partition=p2.partition)
assert isinstance(p1r, Inherit1)
assert isinstance(p2r, Inherit2)
def test_delete_on_subclass_does_not_include_disc_value(self):
p1 = Inherit1.create()
session = get_session()
with mock.patch.object(session, 'execute') as m:
Inherit1.objects(partition=p1.partition).delete()
# make sure our discriminator value isn't in the CQL
# not sure how we would even get here if it was in there
# since the CQL would fail.
self.assertNotIn("row_type", m.call_args[0][0].query_string)
class UnindexedInheritBase(models.Model):
partition = columns.UUID(primary_key=True, default=uuid.uuid4)
cluster = columns.UUID(primary_key=True, default=uuid.uuid4)
row_type = columns.Integer(discriminator_column=True)
class UnindexedInherit1(UnindexedInheritBase):
__discriminator_value__ = 1
data1 = columns.Text()
class UnindexedInherit2(UnindexedInheritBase):
__discriminator_value__ = 2
data2 = columns.Text()
class UnindexedInherit3(UnindexedInherit2):
__discriminator_value__ = 3
data3 = columns.Text()
class TestUnindexedInheritanceQuery(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestUnindexedInheritanceQuery, cls).setUpClass()
management.sync_table(UnindexedInherit1)
management.sync_table(UnindexedInherit2)
management.sync_table(UnindexedInherit3)
cls.p1 = UnindexedInherit1.create(data1='pickle')
cls.p2 = UnindexedInherit2.create(partition=cls.p1.partition, data2='bacon')
cls.p3 = UnindexedInherit3.create(partition=cls.p1.partition, data3='turkey')
@classmethod
def tearDownClass(cls):
super(TestUnindexedInheritanceQuery, cls).tearDownClass()
management.drop_table(UnindexedInherit1)
management.drop_table(UnindexedInherit2)
management.drop_table(UnindexedInherit3)
def test_non_conflicting_type_results_work(self):
p1, p2, p3 = self.p1, self.p2, self.p3
assert len(list(UnindexedInherit1.objects(partition=p1.partition, cluster=p1.cluster))) == 1
assert len(list(UnindexedInherit2.objects(partition=p1.partition, cluster=p2.cluster))) == 1
assert len(list(UnindexedInherit3.objects(partition=p1.partition, cluster=p3.cluster))) == 1
def test_subclassed_model_results_work_properly(self):
p1, p2, p3 = self.p1, self.p2, self.p3
assert len(list(UnindexedInherit2.objects(partition=p1.partition, cluster__in=[p2.cluster, p3.cluster]))) == 2
def test_conflicting_type_results(self):
with self.assertRaises(models.PolymorphicModelException):
list(UnindexedInherit1.objects(partition=self.p1.partition))
with self.assertRaises(models.PolymorphicModelException):
list(UnindexedInherit2.objects(partition=self.p1.partition))
class IndexedInheritBase(models.Model):
partition = columns.UUID(primary_key=True, default=uuid.uuid4)
cluster = columns.UUID(primary_key=True, default=uuid.uuid4)
row_type = columns.Integer(discriminator_column=True, index=True)
class IndexedInherit1(IndexedInheritBase):
__discriminator_value__ = 1
data1 = columns.Text()
class IndexedInherit2(IndexedInheritBase):
__discriminator_value__ = 2
data2 = columns.Text()
class TestIndexedInheritanceQuery(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestIndexedInheritanceQuery, cls).setUpClass()
management.sync_table(IndexedInherit1)
management.sync_table(IndexedInherit2)
cls.p1 = IndexedInherit1.create(data1='pickle')
cls.p2 = IndexedInherit2.create(partition=cls.p1.partition, data2='bacon')
@classmethod
def tearDownClass(cls):
super(TestIndexedInheritanceQuery, cls).tearDownClass()
management.drop_table(IndexedInherit1)
management.drop_table(IndexedInherit2)
def test_success_case(self):
assert len(list(IndexedInherit1.objects(partition=self.p1.partition))) == 1
assert len(list(IndexedInherit2.objects(partition=self.p1.partition))) == 1
| <filename>tests/integration/cqlengine/model/test_polymorphism.py<gh_stars>1-10
# Copyright 2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import mock
from cassandra.cqlengine import columns
from cassandra.cqlengine import models
from cassandra.cqlengine.connection import get_session
from tests.integration.cqlengine.base import BaseCassEngTestCase
from cassandra.cqlengine import management
class TestPolymorphicClassConstruction(BaseCassEngTestCase):
def test_multiple_polymorphic_key_failure(self):
""" Tests that defining a model with more than one polymorphic key fails """
with self.assertRaises(models.ModelDefinitionException):
class M(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(polymorphic_key=True)
type2 = columns.Integer(polymorphic_key=True)
def test_no_polymorphic_key_column_failure(self):
with self.assertRaises(models.ModelDefinitionException):
class M(models.Model):
__polymorphic_key__ = 1
def test_polymorphic_key_inheritance(self):
""" Tests that polymorphic_key attribute is not inherited """
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(polymorphic_key=True)
class M1(Base):
__polymorphic_key__ = 1
class M2(M1):
pass
assert M2.__polymorphic_key__ is None
def test_polymorphic_metaclass(self):
""" Tests that the model meta class configures polymorphic models properly """
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(polymorphic_key=True)
class M1(Base):
__polymorphic_key__ = 1
assert Base._is_polymorphic
assert M1._is_polymorphic
assert Base._is_polymorphic_base
assert not M1._is_polymorphic_base
assert Base._discriminator_column is Base._columns['type1']
assert M1._discriminator_column is M1._columns['type1']
assert Base._discriminator_column_name == 'type1'
assert M1._discriminator_column_name == 'type1'
def test_table_names_are_inherited_from_poly_base(self):
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(polymorphic_key=True)
class M1(Base):
__polymorphic_key__ = 1
assert Base.column_family_name() == M1.column_family_name()
def test_collection_columns_cant_be_polymorphic_keys(self):
with self.assertRaises(models.ModelDefinitionException):
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Set(columns.Integer, polymorphic_key=True)
class PolyBase(models.Model):
partition = columns.UUID(primary_key=True, default=uuid.uuid4)
row_type = columns.Integer(polymorphic_key=True)
class Poly1(PolyBase):
__polymorphic_key__ = 1
data1 = columns.Text()
class Poly2(PolyBase):
__polymorphic_key__ = 2
data2 = columns.Text()
class TestPolymorphicModel(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestPolymorphicModel, cls).setUpClass()
management.sync_table(Poly1)
management.sync_table(Poly2)
@classmethod
def tearDownClass(cls):
super(TestPolymorphicModel, cls).tearDownClass()
management.drop_table(Poly1)
management.drop_table(Poly2)
def test_saving_base_model_fails(self):
with self.assertRaises(models.PolymorphicModelException):
PolyBase.create()
def test_saving_subclass_saves_poly_key(self):
p1 = Poly1.create(data1='pickle')
p2 = Poly2.create(data2='bacon')
assert p1.row_type == Poly1.__polymorphic_key__
assert p2.row_type == Poly2.__polymorphic_key__
def test_query_deserialization(self):
p1 = Poly1.create(data1='pickle')
p2 = Poly2.create(data2='bacon')
p1r = PolyBase.get(partition=p1.partition)
p2r = PolyBase.get(partition=p2.partition)
assert isinstance(p1r, Poly1)
assert isinstance(p2r, Poly2)
def test_delete_on_polymorphic_subclass_does_not_include_polymorphic_key(self):
p1 = Poly1.create()
session = get_session()
with mock.patch.object(session, 'execute') as m:
Poly1.objects(partition=p1.partition).delete()
# make sure our polymorphic key isn't in the CQL
# not sure how we would even get here if it was in there
# since the CQL would fail.
self.assertNotIn("row_type", m.call_args[0][0].query_string)
class UnindexedPolyBase(models.Model):
partition = columns.UUID(primary_key=True, default=uuid.uuid4)
cluster = columns.UUID(primary_key=True, default=uuid.uuid4)
row_type = columns.Integer(polymorphic_key=True)
class UnindexedPoly1(UnindexedPolyBase):
__polymorphic_key__ = 1
data1 = columns.Text()
class UnindexedPoly2(UnindexedPolyBase):
__polymorphic_key__ = 2
data2 = columns.Text()
class UnindexedPoly3(UnindexedPoly2):
__polymorphic_key__ = 3
data3 = columns.Text()
class TestUnindexedPolymorphicQuery(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestUnindexedPolymorphicQuery, cls).setUpClass()
management.sync_table(UnindexedPoly1)
management.sync_table(UnindexedPoly2)
management.sync_table(UnindexedPoly3)
cls.p1 = UnindexedPoly1.create(data1='pickle')
cls.p2 = UnindexedPoly2.create(partition=cls.p1.partition, data2='bacon')
cls.p3 = UnindexedPoly3.create(partition=cls.p1.partition, data3='turkey')
@classmethod
def tearDownClass(cls):
super(TestUnindexedPolymorphicQuery, cls).tearDownClass()
management.drop_table(UnindexedPoly1)
management.drop_table(UnindexedPoly2)
management.drop_table(UnindexedPoly3)
def test_non_conflicting_type_results_work(self):
p1, p2, p3 = self.p1, self.p2, self.p3
assert len(list(UnindexedPoly1.objects(partition=p1.partition, cluster=p1.cluster))) == 1
assert len(list(UnindexedPoly2.objects(partition=p1.partition, cluster=p2.cluster))) == 1
assert len(list(UnindexedPoly3.objects(partition=p1.partition, cluster=p3.cluster))) == 1
def test_subclassed_model_results_work_properly(self):
p1, p2, p3 = self.p1, self.p2, self.p3
assert len(list(UnindexedPoly2.objects(partition=p1.partition, cluster__in=[p2.cluster, p3.cluster]))) == 2
def test_conflicting_type_results(self):
with self.assertRaises(models.PolymorphicModelException):
list(UnindexedPoly1.objects(partition=self.p1.partition))
with self.assertRaises(models.PolymorphicModelException):
list(UnindexedPoly2.objects(partition=self.p1.partition))
class IndexedPolyBase(models.Model):
partition = columns.UUID(primary_key=True, default=uuid.uuid4)
cluster = columns.UUID(primary_key=True, default=uuid.uuid4)
row_type = columns.Integer(polymorphic_key=True, index=True)
class IndexedPoly1(IndexedPolyBase):
__polymorphic_key__ = 1
data1 = columns.Text()
class IndexedPoly2(IndexedPolyBase):
__polymorphic_key__ = 2
data2 = columns.Text()
class TestIndexedPolymorphicQuery(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestIndexedPolymorphicQuery, cls).setUpClass()
management.sync_table(IndexedPoly1)
management.sync_table(IndexedPoly2)
cls.p1 = IndexedPoly1.create(data1='pickle')
cls.p2 = IndexedPoly2.create(partition=cls.p1.partition, data2='bacon')
@classmethod
def tearDownClass(cls):
super(TestIndexedPolymorphicQuery, cls).tearDownClass()
management.drop_table(IndexedPoly1)
management.drop_table(IndexedPoly2)
def test_success_case(self):
assert len(list(IndexedPoly1.objects(partition=self.p1.partition))) == 1
assert len(list(IndexedPoly2.objects(partition=self.p1.partition))) == 1
#########
# Repeated tests for 'discriminator' properties, following deprecation of polymorphic variants
#########
class TestInheritanceClassConstruction(BaseCassEngTestCase):
def test_multiple_discriminator_value_failure(self):
""" Tests that defining a model with more than one discriminator column fails """
with self.assertRaises(models.ModelDefinitionException):
class M(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(discriminator_column=True)
type2 = columns.Integer(discriminator_column=True)
def test_no_discriminator_column_failure(self):
with self.assertRaises(models.ModelDefinitionException):
class M(models.Model):
__discriminator_value__ = 1
def test_discriminator_value_inheritance(self):
""" Tests that discriminator_column attribute is not inherited """
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(discriminator_column=True)
class M1(Base):
__discriminator_value__ = 1
class M2(M1):
pass
assert M2.__discriminator_value__ is None
def test_inheritance_metaclass(self):
""" Tests that the model meta class configures inherited models properly """
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(discriminator_column=True)
class M1(Base):
__discriminator_value__ = 1
assert Base._is_polymorphic
assert M1._is_polymorphic
assert Base._is_polymorphic_base
assert not M1._is_polymorphic_base
assert Base._discriminator_column is Base._columns['type1']
assert M1._discriminator_column is M1._columns['type1']
assert Base._discriminator_column_name == 'type1'
assert M1._discriminator_column_name == 'type1'
def test_table_names_are_inherited_from_base(self):
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(discriminator_column=True)
class M1(Base):
__discriminator_value__ = 1
assert Base.column_family_name() == M1.column_family_name()
def test_collection_columns_cant_be_discriminator_column(self):
with self.assertRaises(models.ModelDefinitionException):
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Set(columns.Integer, discriminator_column=True)
class InheritBase(models.Model):
partition = columns.UUID(primary_key=True, default=uuid.uuid4)
row_type = columns.Integer(discriminator_column=True)
class Inherit1(InheritBase):
__discriminator_value__ = 1
data1 = columns.Text()
class Inherit2(InheritBase):
__discriminator_value__ = 2
data2 = columns.Text()
class TestInheritanceModel(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestInheritanceModel, cls).setUpClass()
management.sync_table(Inherit1)
management.sync_table(Inherit2)
@classmethod
def tearDownClass(cls):
super(TestInheritanceModel, cls).tearDownClass()
management.drop_table(Inherit1)
management.drop_table(Inherit2)
def test_saving_base_model_fails(self):
with self.assertRaises(models.PolymorphicModelException):
InheritBase.create()
def test_saving_subclass_saves_disc_value(self):
p1 = Inherit1.create(data1='pickle')
p2 = Inherit2.create(data2='bacon')
assert p1.row_type == Inherit1.__discriminator_value__
assert p2.row_type == Inherit2.__discriminator_value__
def test_query_deserialization(self):
p1 = Inherit1.create(data1='pickle')
p2 = Inherit2.create(data2='bacon')
p1r = InheritBase.get(partition=p1.partition)
p2r = InheritBase.get(partition=p2.partition)
assert isinstance(p1r, Inherit1)
assert isinstance(p2r, Inherit2)
def test_delete_on_subclass_does_not_include_disc_value(self):
p1 = Inherit1.create()
session = get_session()
with mock.patch.object(session, 'execute') as m:
Inherit1.objects(partition=p1.partition).delete()
# make sure our discriminator value isn't in the CQL
# not sure how we would even get here if it was in there
# since the CQL would fail.
self.assertNotIn("row_type", m.call_args[0][0].query_string)
class UnindexedInheritBase(models.Model):
partition = columns.UUID(primary_key=True, default=uuid.uuid4)
cluster = columns.UUID(primary_key=True, default=uuid.uuid4)
row_type = columns.Integer(discriminator_column=True)
class UnindexedInherit1(UnindexedInheritBase):
__discriminator_value__ = 1
data1 = columns.Text()
class UnindexedInherit2(UnindexedInheritBase):
__discriminator_value__ = 2
data2 = columns.Text()
class UnindexedInherit3(UnindexedInherit2):
__discriminator_value__ = 3
data3 = columns.Text()
class TestUnindexedInheritanceQuery(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestUnindexedInheritanceQuery, cls).setUpClass()
management.sync_table(UnindexedInherit1)
management.sync_table(UnindexedInherit2)
management.sync_table(UnindexedInherit3)
cls.p1 = UnindexedInherit1.create(data1='pickle')
cls.p2 = UnindexedInherit2.create(partition=cls.p1.partition, data2='bacon')
cls.p3 = UnindexedInherit3.create(partition=cls.p1.partition, data3='turkey')
@classmethod
def tearDownClass(cls):
super(TestUnindexedInheritanceQuery, cls).tearDownClass()
management.drop_table(UnindexedInherit1)
management.drop_table(UnindexedInherit2)
management.drop_table(UnindexedInherit3)
def test_non_conflicting_type_results_work(self):
p1, p2, p3 = self.p1, self.p2, self.p3
assert len(list(UnindexedInherit1.objects(partition=p1.partition, cluster=p1.cluster))) == 1
assert len(list(UnindexedInherit2.objects(partition=p1.partition, cluster=p2.cluster))) == 1
assert len(list(UnindexedInherit3.objects(partition=p1.partition, cluster=p3.cluster))) == 1
def test_subclassed_model_results_work_properly(self):
p1, p2, p3 = self.p1, self.p2, self.p3
assert len(list(UnindexedInherit2.objects(partition=p1.partition, cluster__in=[p2.cluster, p3.cluster]))) == 2
def test_conflicting_type_results(self):
with self.assertRaises(models.PolymorphicModelException):
list(UnindexedInherit1.objects(partition=self.p1.partition))
with self.assertRaises(models.PolymorphicModelException):
list(UnindexedInherit2.objects(partition=self.p1.partition))
class IndexedInheritBase(models.Model):
partition = columns.UUID(primary_key=True, default=uuid.uuid4)
cluster = columns.UUID(primary_key=True, default=uuid.uuid4)
row_type = columns.Integer(discriminator_column=True, index=True)
class IndexedInherit1(IndexedInheritBase):
__discriminator_value__ = 1
data1 = columns.Text()
class IndexedInherit2(IndexedInheritBase):
__discriminator_value__ = 2
data2 = columns.Text()
class TestIndexedInheritanceQuery(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestIndexedInheritanceQuery, cls).setUpClass()
management.sync_table(IndexedInherit1)
management.sync_table(IndexedInherit2)
cls.p1 = IndexedInherit1.create(data1='pickle')
cls.p2 = IndexedInherit2.create(partition=cls.p1.partition, data2='bacon')
@classmethod
def tearDownClass(cls):
super(TestIndexedInheritanceQuery, cls).tearDownClass()
management.drop_table(IndexedInherit1)
management.drop_table(IndexedInherit2)
def test_success_case(self):
assert len(list(IndexedInherit1.objects(partition=self.p1.partition))) == 1
assert len(list(IndexedInherit2.objects(partition=self.p1.partition))) == 1
| en | 0.929759 | # Copyright 2015 DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests that defining a model with more than one polymorphic key fails Tests that polymorphic_key attribute is not inherited Tests that the model meta class configures polymorphic models properly # make sure our polymorphic key isn't in the CQL # not sure how we would even get here if it was in there # since the CQL would fail. ######### # Repeated tests for 'discriminator' properties, following deprecation of polymorphic variants ######### Tests that defining a model with more than one discriminator column fails Tests that discriminator_column attribute is not inherited Tests that the model meta class configures inherited models properly # make sure our discriminator value isn't in the CQL # not sure how we would even get here if it was in there # since the CQL would fail. | 2.125828 | 2 |
tests/unit/test_utils.py | ag0n1k/notion-bot | 0 | 6632887 | <gh_stars>0
from nbot.utils import get_domain, get_omdb_id, create_buttons, MetaSingleton
def test_domain():
assert get_domain("http://youtube.com/aw2asd/") == "youtube.com"
assert get_domain("http://www.youtube.com/aw2asd/") == "youtube.com"
def test_positive_omdb_id():
assert get_omdb_id("https://www.imdb.com/title/tt0993840/") == "tt0993840"
assert get_omdb_id("https://www.imdb.com/title/tt6079772/?ref_=nv_sr_srsg_0") == "tt6079772"
assert get_omdb_id("https://m.imdb.com/title/tt7888964/") == "tt7888964"
assert get_omdb_id("https://m.imdb.com/title/tt1865505/?ref_=fn_al_tt_0") == "tt1865505"
def test_negative_omdb_id():
assert get_omdb_id("https://www.imdb.com") is None
assert get_omdb_id("https://m.imdb.com/") is None
def test_buttons():
assert len(create_buttons([1, 2, 3])) == 2
assert len(create_buttons([1, 2, 3], 3)) == 2
assert len(create_buttons([1, 2, 3, 4, 5, 6])) == 3
assert len(create_buttons([1, 2, 3, 4, 5, 6, 7, 8, 9])) == 4
assert len(create_buttons([1, 2, 3])[0]) == 2
assert len(create_buttons([1, 2, 3], 3)[0]) == 2
assert len(create_buttons([1, 2, 3, 4, 5, 6])[0]) == 2
assert len(create_buttons([1, 2, 3, 4, 5, 6, 7, 8, 9])[0]) == 2
assert len(create_buttons([1, 2, 3, 4], 4)[0]) == 3
assert len(create_buttons([1, 2, 3, 4, 5, 6], 4)[0]) == 3
class TestMeta(metaclass=MetaSingleton):
def __init__(self):
self.a = 5
def test_meta_singleton():
a = TestMeta()
b = TestMeta()
assert a == b
assert a.a == b.a
| from nbot.utils import get_domain, get_omdb_id, create_buttons, MetaSingleton
def test_domain():
assert get_domain("http://youtube.com/aw2asd/") == "youtube.com"
assert get_domain("http://www.youtube.com/aw2asd/") == "youtube.com"
def test_positive_omdb_id():
assert get_omdb_id("https://www.imdb.com/title/tt0993840/") == "tt0993840"
assert get_omdb_id("https://www.imdb.com/title/tt6079772/?ref_=nv_sr_srsg_0") == "tt6079772"
assert get_omdb_id("https://m.imdb.com/title/tt7888964/") == "tt7888964"
assert get_omdb_id("https://m.imdb.com/title/tt1865505/?ref_=fn_al_tt_0") == "tt1865505"
def test_negative_omdb_id():
assert get_omdb_id("https://www.imdb.com") is None
assert get_omdb_id("https://m.imdb.com/") is None
def test_buttons():
assert len(create_buttons([1, 2, 3])) == 2
assert len(create_buttons([1, 2, 3], 3)) == 2
assert len(create_buttons([1, 2, 3, 4, 5, 6])) == 3
assert len(create_buttons([1, 2, 3, 4, 5, 6, 7, 8, 9])) == 4
assert len(create_buttons([1, 2, 3])[0]) == 2
assert len(create_buttons([1, 2, 3], 3)[0]) == 2
assert len(create_buttons([1, 2, 3, 4, 5, 6])[0]) == 2
assert len(create_buttons([1, 2, 3, 4, 5, 6, 7, 8, 9])[0]) == 2
assert len(create_buttons([1, 2, 3, 4], 4)[0]) == 3
assert len(create_buttons([1, 2, 3, 4, 5, 6], 4)[0]) == 3
class TestMeta(metaclass=MetaSingleton):
def __init__(self):
self.a = 5
def test_meta_singleton():
a = TestMeta()
b = TestMeta()
assert a == b
assert a.a == b.a | none | 1 | 2.239746 | 2 |
|
Bindings/Python/examples/Moco/exampleHangingMuscle.py | gmiilp1318/opensim-core | 0 | 6632888 | # -------------------------------------------------------------------------- #
# OpenSim Moco: exampleHangingMuscle.py #
# -------------------------------------------------------------------------- #
# Copyright (c) 2020 Stanford University and the Authors #
# #
# Author(s): <NAME> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain a #
# copy of the License at http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# -------------------------------------------------------------------------- #
# This example includes a point mass hanging by a muscle (+x is downward),
# and shows how to use MocoStudy with a model that includes a muscle.
# Additionally, this example shows how to use OpenSim's Analyses with a
# MocoSolution.
# The trajectory optimization problem is to lift the point mass by a small
# distance in minimum time.
import opensim as osim
def createHangingMuscleModel(ignore_activation_dynamics,
ignore_tendon_compliance):
model = osim.Model()
model.setName("hanging_muscle")
model.set_gravity(osim.Vec3(9.81, 0, 0))
body = osim.Body("body", 0.5, osim.Vec3(0), osim.Inertia(1))
model.addComponent(body)
# Allows translation along x.
joint = osim.SliderJoint("joint", model.getGround(), body)
coord = joint.updCoordinate()
coord.setName("height")
model.addComponent(joint)
# The point mass is supported by a muscle.
# The DeGrooteFregly2016Muscle is the only muscle model in OpenSim that
# has been tested with Moco.
actu = osim.DeGrooteFregly2016Muscle()
actu.setName("muscle")
actu.set_max_isometric_force(30.0)
actu.set_optimal_fiber_length(0.10)
actu.set_tendon_slack_length(0.05)
actu.set_tendon_strain_at_one_norm_force(0.10)
actu.set_ignore_activation_dynamics(ignore_activation_dynamics)
actu.set_ignore_tendon_compliance(ignore_tendon_compliance)
actu.set_fiber_damping(0.01)
# The DeGrooteFregly2016Muscle is the only muscle model in OpenSim that
# can express its tendon compliance dynamics using an implicit
# differential equation.
actu.set_tendon_compliance_dynamics_mode("implicit")
actu.set_max_contraction_velocity(10)
actu.set_pennation_angle_at_optimal(0.10)
actu.addNewPathPoint("origin", model.updGround(), osim.Vec3(0))
actu.addNewPathPoint("insertion", body, osim.Vec3(0))
model.addForce(actu)
# Add metabolics probes: one for the total metabolic rate,
# and one for each term in the metabolics model.
probe = osim.Umberger2010MuscleMetabolicsProbe()
probe.setName("metabolics")
probe.addMuscle("muscle", 0.5)
model.addProbe(probe)
probe = osim.Umberger2010MuscleMetabolicsProbe()
probe.setName("activation_maintenance_rate")
probe.set_activation_maintenance_rate_on(True)
probe.set_shortening_rate_on(False)
probe.set_basal_rate_on(False)
probe.set_mechanical_work_rate_on(False)
probe.addMuscle("muscle", 0.5)
model.addProbe(probe)
probe = osim.Umberger2010MuscleMetabolicsProbe()
probe.setName("shortening_rate")
probe.set_activation_maintenance_rate_on(False)
probe.set_shortening_rate_on(True)
probe.set_basal_rate_on(False)
probe.set_mechanical_work_rate_on(False)
probe.addMuscle("muscle", 0.5)
model.addProbe(probe);
probe = osim.Umberger2010MuscleMetabolicsProbe()
probe.setName("basal_rate")
probe.set_activation_maintenance_rate_on(False)
probe.set_shortening_rate_on(False)
probe.set_basal_rate_on(True)
probe.set_mechanical_work_rate_on(False)
probe.addMuscle("muscle", 0.5)
model.addProbe(probe)
probe = osim.Umberger2010MuscleMetabolicsProbe()
probe.setName("mechanical_work_rate")
probe.set_activation_maintenance_rate_on(False)
probe.set_shortening_rate_on(False)
probe.set_basal_rate_on(False)
probe.set_mechanical_work_rate_on(True)
probe.addMuscle("muscle", 0.5)
model.addProbe(probe)
body.attachGeometry(osim.Sphere(0.05))
model.finalizeConnections()
return model
ignore_activation_dynamics = False
ignore_tendon_compliance = False
model = createHangingMuscleModel(ignore_activation_dynamics,
ignore_tendon_compliance)
model.printToXML("hanging_muscle.osim")
study = osim.MocoStudy()
problem = study.updProblem()
problem.setModelAsCopy(model)
problem.setTimeBounds(0, [0.05, 1.0])
problem.setStateInfo("/joint/height/value", [0.14, 0.16], 0.15, 0.14)
problem.setStateInfo("/joint/height/speed", [-1, 1], 0, 0)
problem.setControlInfo("/forceset/muscle", [0.01, 1])
# Initial state constraints/costs.
if not ignore_activation_dynamics:
initial_activation = osim.MocoInitialActivationGoal()
problem.addGoal(initial_activation)
initial_activation.setName("initial_activation")
if not ignore_tendon_compliance:
initial_equilibrium = osim.MocoInitialVelocityEquilibriumDGFGoal()
problem.addGoal(initial_equilibrium)
initial_equilibrium.setName("initial_velocity_equilibrium")
# The problem converges in fewer iterations when this goal is in cost mode.
initial_equilibrium.setMode("cost")
initial_equilibrium.setWeight(0.001)
problem.addGoal(osim.MocoFinalTimeGoal())
solver = study.initCasADiSolver()
solver.set_num_mesh_intervals(25)
solver.set_multibody_dynamics_mode("implicit")
solver.set_optim_convergence_tolerance(1e-4)
solver.set_optim_constraint_tolerance(1e-4)
solution = study.solve()
osim.STOFileAdapter.write(solution.exportToStatesTable(),
"exampleHangingMuscle_states.sto")
osim.STOFileAdapter.write(solution.exportToControlsTable(),
"exampleHangingMuscle_controls.sto")
# Conduct an analysis using MuscleAnalysis and ProbeReporter.
# Create an AnalyzeTool setup file.
analyze = osim.AnalyzeTool()
analyze.setName("analyze")
analyze.setModelFilename("hanging_muscle.osim")
analyze.setStatesFileName("exampleHangingMuscle_states.sto")
analyze.updAnalysisSet().cloneAndAppend(osim.MuscleAnalysis())
analyze.updAnalysisSet().cloneAndAppend(osim.ProbeReporter())
analyze.updControllerSet().cloneAndAppend(
osim.PrescribedController("exampleHangingMuscle_controls.sto"))
analyze.printToXML("exampleHangingMuscle_AnalyzeTool_setup.xml")
# Run the analysis.
analyze = osim.AnalyzeTool("exampleHangingMuscle_AnalyzeTool_setup.xml")
analyze.run()
table_force = osim.TimeSeriesTable(
"analyze_MuscleAnalysis_ActiveFiberForce.sto")
table_velocity = osim.TimeSeriesTable(
"analyze_MuscleAnalysis_FiberVelocity.sto")
time = table_force.getIndependentColumn()
force = table_force.getDependentColumn("muscle").to_numpy()
velocity = table_velocity.getDependentColumn("muscle").to_numpy()
# Plot the terms of the metabolics model, and compare the metabolics model's
# mechanical work rate to the mechanical work rate computed using the
# MuscleAnalysis.
plot = False
# The following environment variable is set during automated testing.
if os.getenv('OPENSIM_USE_VISUALIZER') != '0':
try:
import pylab as pl
plot = True
except:
print('Skipping plotting')
if plot:
pl.plot(time, force * -velocity,
label='active_fiber_force * fiber_velocity', lw=4)
table_metabolics = osim.TimeSeriesTable("analyze_ProbeReporter_probes.sto")
time = table_metabolics.getIndependentColumn()
metabolics_total_rate = table_metabolics.getDependentColumn(
"metabolics_TOTAL").to_numpy()
pl.plot(time, metabolics_total_rate, label='total metabolic rate')
mech_work_rate = table_metabolics.getDependentColumn(
"mechanical_work_rate_TOTAL").to_numpy()
pl.plot(time, mech_work_rate, label='mechanical work rate')
activation_maintenance_rate = table_metabolics.getDependentColumn(
"activation_maintenance_rate_TOTAL").to_numpy()
pl.plot(time, activation_maintenance_rate,
label='activation maintenance rate')
shortening_rate = table_metabolics.getDependentColumn(
"shortening_rate_TOTAL").to_numpy()
pl.plot(time, shortening_rate, label='shortening rate')
basal_rate = table_metabolics.getDependentColumn(
"basal_rate_TOTAL").to_numpy()
pl.plot(time, basal_rate, label='basal rate')
pl.legend()
pl.show()
| # -------------------------------------------------------------------------- #
# OpenSim Moco: exampleHangingMuscle.py #
# -------------------------------------------------------------------------- #
# Copyright (c) 2020 Stanford University and the Authors #
# #
# Author(s): <NAME> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain a #
# copy of the License at http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# -------------------------------------------------------------------------- #
# This example includes a point mass hanging by a muscle (+x is downward),
# and shows how to use MocoStudy with a model that includes a muscle.
# Additionally, this example shows how to use OpenSim's Analyses with a
# MocoSolution.
# The trajectory optimization problem is to lift the point mass by a small
# distance in minimum time.
import opensim as osim
def createHangingMuscleModel(ignore_activation_dynamics,
ignore_tendon_compliance):
model = osim.Model()
model.setName("hanging_muscle")
model.set_gravity(osim.Vec3(9.81, 0, 0))
body = osim.Body("body", 0.5, osim.Vec3(0), osim.Inertia(1))
model.addComponent(body)
# Allows translation along x.
joint = osim.SliderJoint("joint", model.getGround(), body)
coord = joint.updCoordinate()
coord.setName("height")
model.addComponent(joint)
# The point mass is supported by a muscle.
# The DeGrooteFregly2016Muscle is the only muscle model in OpenSim that
# has been tested with Moco.
actu = osim.DeGrooteFregly2016Muscle()
actu.setName("muscle")
actu.set_max_isometric_force(30.0)
actu.set_optimal_fiber_length(0.10)
actu.set_tendon_slack_length(0.05)
actu.set_tendon_strain_at_one_norm_force(0.10)
actu.set_ignore_activation_dynamics(ignore_activation_dynamics)
actu.set_ignore_tendon_compliance(ignore_tendon_compliance)
actu.set_fiber_damping(0.01)
# The DeGrooteFregly2016Muscle is the only muscle model in OpenSim that
# can express its tendon compliance dynamics using an implicit
# differential equation.
actu.set_tendon_compliance_dynamics_mode("implicit")
actu.set_max_contraction_velocity(10)
actu.set_pennation_angle_at_optimal(0.10)
actu.addNewPathPoint("origin", model.updGround(), osim.Vec3(0))
actu.addNewPathPoint("insertion", body, osim.Vec3(0))
model.addForce(actu)
# Add metabolics probes: one for the total metabolic rate,
# and one for each term in the metabolics model.
probe = osim.Umberger2010MuscleMetabolicsProbe()
probe.setName("metabolics")
probe.addMuscle("muscle", 0.5)
model.addProbe(probe)
probe = osim.Umberger2010MuscleMetabolicsProbe()
probe.setName("activation_maintenance_rate")
probe.set_activation_maintenance_rate_on(True)
probe.set_shortening_rate_on(False)
probe.set_basal_rate_on(False)
probe.set_mechanical_work_rate_on(False)
probe.addMuscle("muscle", 0.5)
model.addProbe(probe)
probe = osim.Umberger2010MuscleMetabolicsProbe()
probe.setName("shortening_rate")
probe.set_activation_maintenance_rate_on(False)
probe.set_shortening_rate_on(True)
probe.set_basal_rate_on(False)
probe.set_mechanical_work_rate_on(False)
probe.addMuscle("muscle", 0.5)
model.addProbe(probe);
probe = osim.Umberger2010MuscleMetabolicsProbe()
probe.setName("basal_rate")
probe.set_activation_maintenance_rate_on(False)
probe.set_shortening_rate_on(False)
probe.set_basal_rate_on(True)
probe.set_mechanical_work_rate_on(False)
probe.addMuscle("muscle", 0.5)
model.addProbe(probe)
probe = osim.Umberger2010MuscleMetabolicsProbe()
probe.setName("mechanical_work_rate")
probe.set_activation_maintenance_rate_on(False)
probe.set_shortening_rate_on(False)
probe.set_basal_rate_on(False)
probe.set_mechanical_work_rate_on(True)
probe.addMuscle("muscle", 0.5)
model.addProbe(probe)
body.attachGeometry(osim.Sphere(0.05))
model.finalizeConnections()
return model
ignore_activation_dynamics = False
ignore_tendon_compliance = False
model = createHangingMuscleModel(ignore_activation_dynamics,
ignore_tendon_compliance)
model.printToXML("hanging_muscle.osim")
study = osim.MocoStudy()
problem = study.updProblem()
problem.setModelAsCopy(model)
problem.setTimeBounds(0, [0.05, 1.0])
problem.setStateInfo("/joint/height/value", [0.14, 0.16], 0.15, 0.14)
problem.setStateInfo("/joint/height/speed", [-1, 1], 0, 0)
problem.setControlInfo("/forceset/muscle", [0.01, 1])
# Initial state constraints/costs.
if not ignore_activation_dynamics:
initial_activation = osim.MocoInitialActivationGoal()
problem.addGoal(initial_activation)
initial_activation.setName("initial_activation")
if not ignore_tendon_compliance:
initial_equilibrium = osim.MocoInitialVelocityEquilibriumDGFGoal()
problem.addGoal(initial_equilibrium)
initial_equilibrium.setName("initial_velocity_equilibrium")
# The problem converges in fewer iterations when this goal is in cost mode.
initial_equilibrium.setMode("cost")
initial_equilibrium.setWeight(0.001)
problem.addGoal(osim.MocoFinalTimeGoal())
solver = study.initCasADiSolver()
solver.set_num_mesh_intervals(25)
solver.set_multibody_dynamics_mode("implicit")
solver.set_optim_convergence_tolerance(1e-4)
solver.set_optim_constraint_tolerance(1e-4)
solution = study.solve()
osim.STOFileAdapter.write(solution.exportToStatesTable(),
"exampleHangingMuscle_states.sto")
osim.STOFileAdapter.write(solution.exportToControlsTable(),
"exampleHangingMuscle_controls.sto")
# Conduct an analysis using MuscleAnalysis and ProbeReporter.
# Create an AnalyzeTool setup file.
analyze = osim.AnalyzeTool()
analyze.setName("analyze")
analyze.setModelFilename("hanging_muscle.osim")
analyze.setStatesFileName("exampleHangingMuscle_states.sto")
analyze.updAnalysisSet().cloneAndAppend(osim.MuscleAnalysis())
analyze.updAnalysisSet().cloneAndAppend(osim.ProbeReporter())
analyze.updControllerSet().cloneAndAppend(
osim.PrescribedController("exampleHangingMuscle_controls.sto"))
analyze.printToXML("exampleHangingMuscle_AnalyzeTool_setup.xml")
# Run the analysis.
analyze = osim.AnalyzeTool("exampleHangingMuscle_AnalyzeTool_setup.xml")
analyze.run()
table_force = osim.TimeSeriesTable(
"analyze_MuscleAnalysis_ActiveFiberForce.sto")
table_velocity = osim.TimeSeriesTable(
"analyze_MuscleAnalysis_FiberVelocity.sto")
time = table_force.getIndependentColumn()
force = table_force.getDependentColumn("muscle").to_numpy()
velocity = table_velocity.getDependentColumn("muscle").to_numpy()
# Plot the terms of the metabolics model, and compare the metabolics model's
# mechanical work rate to the mechanical work rate computed using the
# MuscleAnalysis.
plot = False
# The following environment variable is set during automated testing.
if os.getenv('OPENSIM_USE_VISUALIZER') != '0':
try:
import pylab as pl
plot = True
except:
print('Skipping plotting')
if plot:
pl.plot(time, force * -velocity,
label='active_fiber_force * fiber_velocity', lw=4)
table_metabolics = osim.TimeSeriesTable("analyze_ProbeReporter_probes.sto")
time = table_metabolics.getIndependentColumn()
metabolics_total_rate = table_metabolics.getDependentColumn(
"metabolics_TOTAL").to_numpy()
pl.plot(time, metabolics_total_rate, label='total metabolic rate')
mech_work_rate = table_metabolics.getDependentColumn(
"mechanical_work_rate_TOTAL").to_numpy()
pl.plot(time, mech_work_rate, label='mechanical work rate')
activation_maintenance_rate = table_metabolics.getDependentColumn(
"activation_maintenance_rate_TOTAL").to_numpy()
pl.plot(time, activation_maintenance_rate,
label='activation maintenance rate')
shortening_rate = table_metabolics.getDependentColumn(
"shortening_rate_TOTAL").to_numpy()
pl.plot(time, shortening_rate, label='shortening rate')
basal_rate = table_metabolics.getDependentColumn(
"basal_rate_TOTAL").to_numpy()
pl.plot(time, basal_rate, label='basal rate')
pl.legend()
pl.show()
| en | 0.848376 | # -------------------------------------------------------------------------- # # OpenSim Moco: exampleHangingMuscle.py # # -------------------------------------------------------------------------- # # Copyright (c) 2020 Stanford University and the Authors # # # # Author(s): <NAME> # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain a # # copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # # -------------------------------------------------------------------------- # # This example includes a point mass hanging by a muscle (+x is downward), # and shows how to use MocoStudy with a model that includes a muscle. # Additionally, this example shows how to use OpenSim's Analyses with a # MocoSolution. # The trajectory optimization problem is to lift the point mass by a small # distance in minimum time. # Allows translation along x. # The point mass is supported by a muscle. # The DeGrooteFregly2016Muscle is the only muscle model in OpenSim that # has been tested with Moco. # The DeGrooteFregly2016Muscle is the only muscle model in OpenSim that # can express its tendon compliance dynamics using an implicit # differential equation. # Add metabolics probes: one for the total metabolic rate, # and one for each term in the metabolics model. # Initial state constraints/costs. # The problem converges in fewer iterations when this goal is in cost mode. # Conduct an analysis using MuscleAnalysis and ProbeReporter. # Create an AnalyzeTool setup file. # Run the analysis. # Plot the terms of the metabolics model, and compare the metabolics model's # mechanical work rate to the mechanical work rate computed using the # MuscleAnalysis. # The following environment variable is set during automated testing. | 2.3884 | 2 |
logic/Anomaly.py | futuristicmeme/Deep-Geospatial | 1 | 6632889 | from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, LeakyReLU, Dense
from keras.callbacks import TensorBoard
from keras.models import Model, load_model
import keras.backend as K
from keras.utils.vis_utils import plot_model
import tensorflow as tf
from skimage import io
import numpy as np
from sklearn.cross_validation import train_test_split
from skimage.transform import rescale, resize, downscale_local_mean
from PIL import Image
import os
errs = ["Model already loaded! Please reset current model to load a new one!",""]
class Anomaly():
"""
A class which creates an autoencoder designed for detecting anomalies in
Google Maps imagery
"""
def __init__(self, res_x, res_y, code_dim, color=True):
"""
Anomaly Constructor
Args:
res_x: The latitude of the location required
res_y: The longitude of the location required
code_dim: Coded layer dimensions
color: Whether a RGB image is used
"""
self.res_x = res_x
self.res_y = res_y
self.code_dim = code_dim
self.model = None
self.encoder = None
self.decoder = None
self.x_train = None
self.x_test = None
self.encoded_input = None
self.decoder_layer = None
if(color==True):
self.color=3
else:
self.color=1
def loadTrainingData(self, trainpath, testpath=None, testpercent=None, rescaleSize=None):
"""
Loads and prepares training &/or test data for the model.
Args:
trainpath: The absolute path to the training folder
testpath: The absolute path to the testing folder - defaults to None
testpercent: The number of tiles wide the image should be -
defaults to None (if testpath is none as well)
rescaleSize: Downscale amount. (Ex: 1.0/4.0) If your target resolution is lower than the
network input dimensions use this to rescale your photos.
"""
#convert all images in file to keras-readable array
train_images = []
test_images = []
for image_path in os.listdir(trainpath):
if not image_path.startswith('.'):
img = io.imread(trainpath+image_path , as_grey=(self.color!=3))
if(rescaleSize!=None):
img = rescale(img, rescaleSize)
img = img.reshape(self.res_x,self.res_y,self.color)
train_images.append(img)
if(testpercent != None):
x_all = np.array(train_images).astype('float32')
length = len(train_images)
# X% of the data for training, 100-X% of the data for testing
self.x_train, self.x_test = train_test_split(x_all, test_size=((testpercent*length)//100), random_state=24)
else:
for image_path in os.listdir(testpath):
if not image_path.startswith('.'):
img = io.imread(testpath+image_path , as_grey=(self.color!=3))
if(rescaleSize!=None):
img = rescale(img, rescaleSize)
img = img.reshape(self.res_x,self.res_y,self.color)
test_images.append(img)
self.x_train = np.array(train_images).astype('float32')
self.x_test = np.array(test_images).astype('float32')
print(self.x_train.shape)
print(self.x_test.shape)
return
def createModel(self):
"""
Generates the model structure for use in training and evaluating.
"""
input_img = Input(shape=(self.res_x,self.res_y,self.color))
encoded = MaxPooling2D((2, 2), padding='same', input_shape=(self.res_x,self.res_y,self.color))(input_img)
encoded = Conv2D(int(self.code_dim*2), (3, 3), activation='relu', padding='same')(encoded)
encoded = MaxPooling2D((2, 2), padding='same')(encoded)
encoded = Conv2D(self.code_dim*4, (3, 3), activation='relu', padding='same')(encoded)
encoded = MaxPooling2D((2, 2), padding='same')(encoded)
decoded = Conv2D(self.code_dim*4, (3, 3), activation='relu', padding='same')(encoded)
decoded = UpSampling2D((2, 2))(decoded)
decoded = Conv2D(int(self.code_dim*2), (3, 3), activation='relu', padding='same')(decoded)
decoded = UpSampling2D((2, 2))(decoded)
decoded = Conv2D(int(self.code_dim), (3, 3), activation='relu', padding='same')(decoded)
decoded = UpSampling2D((2, 2))(decoded)
decoded = Conv2D(self.color, (3, 3), activation='linear', padding='same')(decoded)
#decoded = LeakyReLU(alpha=.001)
# maps an input to its reconstruction
self.model = Model(input_img, decoded)
# maps an input to its encoded representation
self.encoder = Model(input_img, encoded)
self.encoded_input = Input(shape=(self.res_x,self.res_y,self.color))
self.decoder_layer = self.model.layers[-1] # last layer of the autoencoder model
self.model.compile(optimizer='adadelta', loss='mean_squared_logarithmic_error')
plot_model(self.model, to_file="model_plot.png", show_layer_names=True, show_shapes=True)
return
def loadModel(self, savePath):
"""
Loads a saved compiled model.
"""
if(self.model == None):
self.model = load_model(savePath)
return
print(errs[0])
return
def resetModel(self):
"""
Resets the current model in memory.
"""
if(self.model != None):
self.model = None
def train(self, savePath=None):
"""
Trains the generated model.
Args:
(Optional) savePath: location to save model file - defaults to None
"""
tbCallBack = TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=False)
#tensorboard --logdir path_to_current_dir/Graph to see visual progress
self.model.fit(self.x_train, self.x_train,
epochs=100,
batch_size=32,
shuffle=True,
validation_data=(self.x_test, self.x_test),
callbacks=[tbCallBack])
if(savePath!=None):
self.model.save(savePath+'model.h5')
return
def eval(self, evalimgspath):
"""
Evaluates the trained model against a set of images.
Args:
evalimgspath: location of the eval image(s)
"""
#compare generated image to original
eval_images = []
for image_path in os.listdir(evalimgspath):
if not image_path.startswith('.'):
img = io.imread(evalimgspath+image_path , as_grey=(self.color!=3))
img = img.reshape(self.res_x,self.res_y,self.color)
eval_images.append(img)
x_eval = np.array(eval_images).astype('float32')
#detect anomalys
print(x_eval.shape)
decoded_imgs = self.model.predict(x_eval)
n = len(eval_images)
for i in range(n):
# display original
# original_img = Image.fromarray(eval_images[i], 'RGB')
# original_img.show()
# display reconstruction
# decoded_img = Image.fromarray(decoded_imgs[i], 'RGB')
# decoded_img.show()
print('Mean Squared Error of iteration {0} : {1}'.format(i,self.mse(eval_images[i], decoded_imgs[i])))
return decoded_imgs
def evalSingle(self,image_path):
"""
Evaluates a single image and returns the mse
Args:
evalimgspath: location of the eval image(s)
"""
#compare generated image to original
image_paths = [image_path, image_path]
eval_images = []
for image_path in image_paths:
img = io.imread(image_path , as_grey=(self.color!=3))
img = img.reshape(self.res_x,self.res_y,self.color)
eval_images.append(img)
x_eval = np.array(eval_images).astype('float32')
#detect anomalys
print(x_eval.shape)
decoded_imgs = self.model.predict(x_eval)
print(decoded_imgs.shape)
return decoded_imgs[0]
def mse(self, imageA, imageB):
"""
'Mean Squared Error' between the two images
Args:
imageA: numpy matrix
imageB: numpy matrix
Returns:
Mean squared error
"""
err = np.sum((imageA - imageB) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
| from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, LeakyReLU, Dense
from keras.callbacks import TensorBoard
from keras.models import Model, load_model
import keras.backend as K
from keras.utils.vis_utils import plot_model
import tensorflow as tf
from skimage import io
import numpy as np
from sklearn.cross_validation import train_test_split
from skimage.transform import rescale, resize, downscale_local_mean
from PIL import Image
import os
errs = ["Model already loaded! Please reset current model to load a new one!",""]
class Anomaly():
"""
A class which creates an autoencoder designed for detecting anomalies in
Google Maps imagery
"""
def __init__(self, res_x, res_y, code_dim, color=True):
"""
Anomaly Constructor
Args:
res_x: The latitude of the location required
res_y: The longitude of the location required
code_dim: Coded layer dimensions
color: Whether a RGB image is used
"""
self.res_x = res_x
self.res_y = res_y
self.code_dim = code_dim
self.model = None
self.encoder = None
self.decoder = None
self.x_train = None
self.x_test = None
self.encoded_input = None
self.decoder_layer = None
if(color==True):
self.color=3
else:
self.color=1
def loadTrainingData(self, trainpath, testpath=None, testpercent=None, rescaleSize=None):
"""
Loads and prepares training &/or test data for the model.
Args:
trainpath: The absolute path to the training folder
testpath: The absolute path to the testing folder - defaults to None
testpercent: The number of tiles wide the image should be -
defaults to None (if testpath is none as well)
rescaleSize: Downscale amount. (Ex: 1.0/4.0) If your target resolution is lower than the
network input dimensions use this to rescale your photos.
"""
#convert all images in file to keras-readable array
train_images = []
test_images = []
for image_path in os.listdir(trainpath):
if not image_path.startswith('.'):
img = io.imread(trainpath+image_path , as_grey=(self.color!=3))
if(rescaleSize!=None):
img = rescale(img, rescaleSize)
img = img.reshape(self.res_x,self.res_y,self.color)
train_images.append(img)
if(testpercent != None):
x_all = np.array(train_images).astype('float32')
length = len(train_images)
# X% of the data for training, 100-X% of the data for testing
self.x_train, self.x_test = train_test_split(x_all, test_size=((testpercent*length)//100), random_state=24)
else:
for image_path in os.listdir(testpath):
if not image_path.startswith('.'):
img = io.imread(testpath+image_path , as_grey=(self.color!=3))
if(rescaleSize!=None):
img = rescale(img, rescaleSize)
img = img.reshape(self.res_x,self.res_y,self.color)
test_images.append(img)
self.x_train = np.array(train_images).astype('float32')
self.x_test = np.array(test_images).astype('float32')
print(self.x_train.shape)
print(self.x_test.shape)
return
def createModel(self):
"""
Generates the model structure for use in training and evaluating.
"""
input_img = Input(shape=(self.res_x,self.res_y,self.color))
encoded = MaxPooling2D((2, 2), padding='same', input_shape=(self.res_x,self.res_y,self.color))(input_img)
encoded = Conv2D(int(self.code_dim*2), (3, 3), activation='relu', padding='same')(encoded)
encoded = MaxPooling2D((2, 2), padding='same')(encoded)
encoded = Conv2D(self.code_dim*4, (3, 3), activation='relu', padding='same')(encoded)
encoded = MaxPooling2D((2, 2), padding='same')(encoded)
decoded = Conv2D(self.code_dim*4, (3, 3), activation='relu', padding='same')(encoded)
decoded = UpSampling2D((2, 2))(decoded)
decoded = Conv2D(int(self.code_dim*2), (3, 3), activation='relu', padding='same')(decoded)
decoded = UpSampling2D((2, 2))(decoded)
decoded = Conv2D(int(self.code_dim), (3, 3), activation='relu', padding='same')(decoded)
decoded = UpSampling2D((2, 2))(decoded)
decoded = Conv2D(self.color, (3, 3), activation='linear', padding='same')(decoded)
#decoded = LeakyReLU(alpha=.001)
# maps an input to its reconstruction
self.model = Model(input_img, decoded)
# maps an input to its encoded representation
self.encoder = Model(input_img, encoded)
self.encoded_input = Input(shape=(self.res_x,self.res_y,self.color))
self.decoder_layer = self.model.layers[-1] # last layer of the autoencoder model
self.model.compile(optimizer='adadelta', loss='mean_squared_logarithmic_error')
plot_model(self.model, to_file="model_plot.png", show_layer_names=True, show_shapes=True)
return
def loadModel(self, savePath):
"""
Loads a saved compiled model.
"""
if(self.model == None):
self.model = load_model(savePath)
return
print(errs[0])
return
def resetModel(self):
"""
Resets the current model in memory.
"""
if(self.model != None):
self.model = None
def train(self, savePath=None):
"""
Trains the generated model.
Args:
(Optional) savePath: location to save model file - defaults to None
"""
tbCallBack = TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=False)
#tensorboard --logdir path_to_current_dir/Graph to see visual progress
self.model.fit(self.x_train, self.x_train,
epochs=100,
batch_size=32,
shuffle=True,
validation_data=(self.x_test, self.x_test),
callbacks=[tbCallBack])
if(savePath!=None):
self.model.save(savePath+'model.h5')
return
def eval(self, evalimgspath):
"""
Evaluates the trained model against a set of images.
Args:
evalimgspath: location of the eval image(s)
"""
#compare generated image to original
eval_images = []
for image_path in os.listdir(evalimgspath):
if not image_path.startswith('.'):
img = io.imread(evalimgspath+image_path , as_grey=(self.color!=3))
img = img.reshape(self.res_x,self.res_y,self.color)
eval_images.append(img)
x_eval = np.array(eval_images).astype('float32')
#detect anomalys
print(x_eval.shape)
decoded_imgs = self.model.predict(x_eval)
n = len(eval_images)
for i in range(n):
# display original
# original_img = Image.fromarray(eval_images[i], 'RGB')
# original_img.show()
# display reconstruction
# decoded_img = Image.fromarray(decoded_imgs[i], 'RGB')
# decoded_img.show()
print('Mean Squared Error of iteration {0} : {1}'.format(i,self.mse(eval_images[i], decoded_imgs[i])))
return decoded_imgs
def evalSingle(self,image_path):
"""
Evaluates a single image and returns the mse
Args:
evalimgspath: location of the eval image(s)
"""
#compare generated image to original
image_paths = [image_path, image_path]
eval_images = []
for image_path in image_paths:
img = io.imread(image_path , as_grey=(self.color!=3))
img = img.reshape(self.res_x,self.res_y,self.color)
eval_images.append(img)
x_eval = np.array(eval_images).astype('float32')
#detect anomalys
print(x_eval.shape)
decoded_imgs = self.model.predict(x_eval)
print(decoded_imgs.shape)
return decoded_imgs[0]
def mse(self, imageA, imageB):
"""
'Mean Squared Error' between the two images
Args:
imageA: numpy matrix
imageB: numpy matrix
Returns:
Mean squared error
"""
err = np.sum((imageA - imageB) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
| en | 0.714306 | A class which creates an autoencoder designed for detecting anomalies in Google Maps imagery Anomaly Constructor Args: res_x: The latitude of the location required res_y: The longitude of the location required code_dim: Coded layer dimensions color: Whether a RGB image is used Loads and prepares training &/or test data for the model. Args: trainpath: The absolute path to the training folder testpath: The absolute path to the testing folder - defaults to None testpercent: The number of tiles wide the image should be - defaults to None (if testpath is none as well) rescaleSize: Downscale amount. (Ex: 1.0/4.0) If your target resolution is lower than the network input dimensions use this to rescale your photos. #convert all images in file to keras-readable array # X% of the data for training, 100-X% of the data for testing Generates the model structure for use in training and evaluating. #decoded = LeakyReLU(alpha=.001) # maps an input to its reconstruction # maps an input to its encoded representation # last layer of the autoencoder model Loads a saved compiled model. Resets the current model in memory. Trains the generated model. Args: (Optional) savePath: location to save model file - defaults to None #tensorboard --logdir path_to_current_dir/Graph to see visual progress Evaluates the trained model against a set of images. Args: evalimgspath: location of the eval image(s) #compare generated image to original #detect anomalys # display original # original_img = Image.fromarray(eval_images[i], 'RGB') # original_img.show() # display reconstruction # decoded_img = Image.fromarray(decoded_imgs[i], 'RGB') # decoded_img.show() Evaluates a single image and returns the mse Args: evalimgspath: location of the eval image(s) #compare generated image to original #detect anomalys 'Mean Squared Error' between the two images Args: imageA: numpy matrix imageB: numpy matrix Returns: Mean squared error | 2.841525 | 3 |
data structures/linklist.py | iFun/Project-G | 0 | 6632890 | <filename>data structures/linklist.py
import unittest
class Node():
"""LinkedList node class"""
def __init__(self, value):
self.value = value
self.next = None
self.prev = None
def __str__(self):
"""Override the default print behavior"""
if self.next is not None:
return (str(self.value) + ' -->')
return str(self.value)
def __eq__(self, other):
"""Override the default Equals behavior"""
return self.value == other.value
class LinkedList():
"""docstring for LinkedList"""
def __init__(self, node):
self.head = node
self.tail = node
self.__length = 1
def __str__(self):
node = self.head
while node is not None:
print(node,end =" ")
node = node.next
return ''
def getLength(self) -> int:
return self.__length
def addToTail(self, node):
self.tail.next = node
node.prev = self.tail
self.tail = self.tail.next
self.__length += 1
def addToHead(self, node):
node.next = self.head
self.head.prev = node
self.head= node
self.__length += 1
def addToMid(self, index, node):
current = self.head
while index is not 0:
current = current.next
index -= 1
prev = current.prev
node.next = current
node.prev = prev
current.prev = node
prev.next = node
def deleteNode(self, node):
if node is self.tail:
self.tail = self.tail.prev
node = None
elif node is self.head:
node = node.next
node.next = node.next.next
self.head = node
else:
node = node.next
node.prev = node.prev.prev
node.next = node.next.next
self.__length -= 1
def clear(self):
self.head = None
self.tail = None
self.__length = 0
def indexOf(self, value):
current = self.head
index = 0
while current is not None:
if current.value == value:
return index
else:
current = current.next
index += 1
print('value is not in LinkedList')
return None
def addToIndex(self, index, node):
if index > self.__length - 1 or index < 0 or node is None:
print('index is out of bound or node is null')
if index == 0:
return self.addToHead(node)
elif index == self.__length - 1:
return self.addToTail(node)
else:
return self.addToMid(index, node)
def set(self, index, newValue):
if index > self.__length - 1 or index < 0:
print('index is out of bound')
else:
current = self.head
while index is not 0:
current = current.next
index -= 1
current.value = newValue
def reverse(self):
"""reverse the linkedlist"""
current = self.head
while current is not None:
nextNode = current.next
prev = current.prev
current.prev = current.next
current.next = prev
current = nextNode
newTail = self.head
self.head = self.tail
self.tail = newTail
def createNewLinkList(length):
for value in range(length):
if value == 0:
ll = LinkedList(Node(value))
else:
ll.addToTail(Node(value))
return ll
class TestLinkedList(unittest.TestCase):
def setUp(self):
"""create a new linklist before each unit test"""
self.LinkedListLength = 100
self.ll = createNewLinkList(self.LinkedListLength)
def test_init(self):
self.assertEqual(self.ll.getLength(),self.LinkedListLength)
self.assertEqual(self.ll.head.value,self.LinkedListLength - self.LinkedListLength)
self.assertEqual(self.ll.tail.value,self.LinkedListLength - 1)
def test_delete_head(self):
head = self.ll.head
nextHead = head.next
self.ll.deleteNode(head)
self.assertEqual(self.ll.head,nextHead)
self.assertEqual(self.ll.getLength(),self.LinkedListLength - 1)
self.assertEqual(self.ll.tail.value ,self.LinkedListLength - 1)
def test_delete_tail(self):
tail = self.ll.tail
nextTail = tail.prev
self.ll.deleteNode(tail)
self.assertEqual(self.ll.tail,nextTail)
self.assertEqual(self.ll.getLength(),self.LinkedListLength - 1)
def test_delete_middle(self):
mid = self.ll.head.next.next
nextmid = mid.next
self.ll.deleteNode(mid)
self.assertNotEqual(mid ,nextmid.prev)
self.assertEqual(self.ll.tail.value ,self.LinkedListLength - 1)
self.assertEqual(self.ll.head.value ,0)
self.assertEqual(self.ll.getLength(),self.LinkedListLength - 1)
def test_clear(self):
self.ll.clear()
self.assertEqual(self.ll.tail,None)
self.assertEqual(self.ll.head,None)
self.assertEqual(self.ll.getLength(),0)
def test_indexOf(self):
self.assertEqual(self.ll.indexOf(self.LinkedListLength - 1) ,self.LinkedListLength - 1)
self.assertEqual(self.ll.indexOf(0) ,0)
def test_add_to_Index(self):
self.ll.addToIndex(0, Node(100))
self.ll.addToIndex(self.ll.getLength() - 1, Node(200))
self.ll.addToIndex(self.ll.getLength() - 2, Node(300))
self.assertEqual(self.ll.head.value,100)
self.assertEqual(self.ll.tail.value,200)
self.assertEqual(self.ll.indexOf(300),self.ll.getLength() - 2)
self.assertEqual(self.ll.getLength(),self.LinkedListLength - 1 + 3)
def test_set(self):
self.ll.set(0,100)
self.ll.set(self.ll.getLength() - 1, 200)
self.ll.set(self.ll.getLength() - 2, 300)
self.assertEqual(self.ll.head.value,100)
self.assertEqual(self.ll.tail.value,200)
self.assertEqual(self.ll.indexOf(300),self.ll.getLength() - 2)
self.assertEqual(self.ll.getLength(),self.LinkedListLength)
def test_reverse(self):
self.ll.reverse()
self.assertEqual(self.ll.head.value,self.ll.getLength() - 1)
self.assertEqual(self.ll.tail.value,0)
self.assertEqual(self.ll.indexOf(self.ll.getLength() - 2),1)
if __name__ == '__main__':
unittest.main()
| <filename>data structures/linklist.py
import unittest
class Node():
"""LinkedList node class"""
def __init__(self, value):
self.value = value
self.next = None
self.prev = None
def __str__(self):
"""Override the default print behavior"""
if self.next is not None:
return (str(self.value) + ' -->')
return str(self.value)
def __eq__(self, other):
"""Override the default Equals behavior"""
return self.value == other.value
class LinkedList():
"""docstring for LinkedList"""
def __init__(self, node):
self.head = node
self.tail = node
self.__length = 1
def __str__(self):
node = self.head
while node is not None:
print(node,end =" ")
node = node.next
return ''
def getLength(self) -> int:
return self.__length
def addToTail(self, node):
self.tail.next = node
node.prev = self.tail
self.tail = self.tail.next
self.__length += 1
def addToHead(self, node):
node.next = self.head
self.head.prev = node
self.head= node
self.__length += 1
def addToMid(self, index, node):
current = self.head
while index is not 0:
current = current.next
index -= 1
prev = current.prev
node.next = current
node.prev = prev
current.prev = node
prev.next = node
def deleteNode(self, node):
if node is self.tail:
self.tail = self.tail.prev
node = None
elif node is self.head:
node = node.next
node.next = node.next.next
self.head = node
else:
node = node.next
node.prev = node.prev.prev
node.next = node.next.next
self.__length -= 1
def clear(self):
self.head = None
self.tail = None
self.__length = 0
def indexOf(self, value):
current = self.head
index = 0
while current is not None:
if current.value == value:
return index
else:
current = current.next
index += 1
print('value is not in LinkedList')
return None
def addToIndex(self, index, node):
if index > self.__length - 1 or index < 0 or node is None:
print('index is out of bound or node is null')
if index == 0:
return self.addToHead(node)
elif index == self.__length - 1:
return self.addToTail(node)
else:
return self.addToMid(index, node)
def set(self, index, newValue):
if index > self.__length - 1 or index < 0:
print('index is out of bound')
else:
current = self.head
while index is not 0:
current = current.next
index -= 1
current.value = newValue
def reverse(self):
"""reverse the linkedlist"""
current = self.head
while current is not None:
nextNode = current.next
prev = current.prev
current.prev = current.next
current.next = prev
current = nextNode
newTail = self.head
self.head = self.tail
self.tail = newTail
def createNewLinkList(length):
for value in range(length):
if value == 0:
ll = LinkedList(Node(value))
else:
ll.addToTail(Node(value))
return ll
class TestLinkedList(unittest.TestCase):
def setUp(self):
"""create a new linklist before each unit test"""
self.LinkedListLength = 100
self.ll = createNewLinkList(self.LinkedListLength)
def test_init(self):
self.assertEqual(self.ll.getLength(),self.LinkedListLength)
self.assertEqual(self.ll.head.value,self.LinkedListLength - self.LinkedListLength)
self.assertEqual(self.ll.tail.value,self.LinkedListLength - 1)
def test_delete_head(self):
head = self.ll.head
nextHead = head.next
self.ll.deleteNode(head)
self.assertEqual(self.ll.head,nextHead)
self.assertEqual(self.ll.getLength(),self.LinkedListLength - 1)
self.assertEqual(self.ll.tail.value ,self.LinkedListLength - 1)
def test_delete_tail(self):
tail = self.ll.tail
nextTail = tail.prev
self.ll.deleteNode(tail)
self.assertEqual(self.ll.tail,nextTail)
self.assertEqual(self.ll.getLength(),self.LinkedListLength - 1)
def test_delete_middle(self):
mid = self.ll.head.next.next
nextmid = mid.next
self.ll.deleteNode(mid)
self.assertNotEqual(mid ,nextmid.prev)
self.assertEqual(self.ll.tail.value ,self.LinkedListLength - 1)
self.assertEqual(self.ll.head.value ,0)
self.assertEqual(self.ll.getLength(),self.LinkedListLength - 1)
def test_clear(self):
self.ll.clear()
self.assertEqual(self.ll.tail,None)
self.assertEqual(self.ll.head,None)
self.assertEqual(self.ll.getLength(),0)
def test_indexOf(self):
self.assertEqual(self.ll.indexOf(self.LinkedListLength - 1) ,self.LinkedListLength - 1)
self.assertEqual(self.ll.indexOf(0) ,0)
def test_add_to_Index(self):
self.ll.addToIndex(0, Node(100))
self.ll.addToIndex(self.ll.getLength() - 1, Node(200))
self.ll.addToIndex(self.ll.getLength() - 2, Node(300))
self.assertEqual(self.ll.head.value,100)
self.assertEqual(self.ll.tail.value,200)
self.assertEqual(self.ll.indexOf(300),self.ll.getLength() - 2)
self.assertEqual(self.ll.getLength(),self.LinkedListLength - 1 + 3)
def test_set(self):
self.ll.set(0,100)
self.ll.set(self.ll.getLength() - 1, 200)
self.ll.set(self.ll.getLength() - 2, 300)
self.assertEqual(self.ll.head.value,100)
self.assertEqual(self.ll.tail.value,200)
self.assertEqual(self.ll.indexOf(300),self.ll.getLength() - 2)
self.assertEqual(self.ll.getLength(),self.LinkedListLength)
def test_reverse(self):
self.ll.reverse()
self.assertEqual(self.ll.head.value,self.ll.getLength() - 1)
self.assertEqual(self.ll.tail.value,0)
self.assertEqual(self.ll.indexOf(self.ll.getLength() - 2),1)
if __name__ == '__main__':
unittest.main()
| en | 0.408704 | LinkedList node class Override the default print behavior Override the default Equals behavior docstring for LinkedList reverse the linkedlist create a new linklist before each unit test | 4.028897 | 4 |
experiments/custom_classifier.py | FabioTomaz/msc | 0 | 6632891 | <gh_stars>0
import os
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Dropout, Dense, Activation
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications import imagenet_utils
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
from lesion_classifier import LesionClassifier
from base_model_param import BaseModelParam
class VanillaClassifier(LesionClassifier):
"""
NOT IMPLEMENTED!!! Model trained from scratch for skin lesion classification
"""
@property
def model(self):
return self._model
@property
def model_name(self):
return self._model_name
@staticmethod
def preprocess_input(x, **kwargs):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf', **kwargs) | import os
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Dropout, Dense, Activation
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications import imagenet_utils
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
from lesion_classifier import LesionClassifier
from base_model_param import BaseModelParam
class VanillaClassifier(LesionClassifier):
"""
NOT IMPLEMENTED!!! Model trained from scratch for skin lesion classification
"""
@property
def model(self):
return self._model
@property
def model_name(self):
return self._model_name
@staticmethod
def preprocess_input(x, **kwargs):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf', **kwargs) | en | 0.587372 | NOT IMPLEMENTED!!! Model trained from scratch for skin lesion classification Preprocesses a numpy array encoding a batch of images. # Arguments x: a 4D numpy array consists of RGB values within [0, 255]. # Returns Preprocessed array. | 2.293874 | 2 |
nominations/nominations/models.py | walshs33/sweng2018group | 0 | 6632892 | <gh_stars>0
from django.db import models
from django.urls import reverse
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
#https://simpleisbetterthancomplex.com/tutorial/2016/07/22/how-to-extend-django-user-model.html#onetoone
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
first_name = models.CharField(max_length=30, help_text='Required')
last_name = models.CharField(max_length=30, help_text='Required')
email = models.EmailField(max_length=254, help_text='Required')
public_key = models.CharField(max_length=8192)
private_key = models.CharField(max_length=8192)
rank_id = models.IntegerField()
dept_id = models.IntegerField()
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
'''
class EncForm(models.Model):
encdata
dept_id
level
submitter
'''
class Post(models.Model):
post_title = models.CharField(max_length=100, null=True)
gender = models.CharField(max_length=100, null=True)
TITLE_CHOICES = (
('Mr','Mister'),
('Mrs','Missus'),
('Ms','Miss'),
('Prof','Professor'),
('Dr','Doctor'),
)
title = models.CharField(max_length=4, choices=TITLE_CHOICES, null=True)
first_name = models.CharField(max_length=100, null=True)
surname = models.CharField(max_length=100, null=True)
email = models.EmailField(max_length=254, null=True)
phone_number = models.CharField(max_length=20, null=True)
home_address = models.CharField(max_length=200, null=True)
dob = models.DateField(auto_now=False, auto_now_add=False, null=True)
discipline = models.CharField(max_length=1000, null=True)
IS_NEW_CHOICES = (
('N', 'New'),
('R', 'Replacement'),
)
new_or_replacement = models.CharField(max_length=11, choices = IS_NEW_CHOICES, null=True)
additional_remuneration = models.CharField(max_length=500, null=True)
hours_per_week = models.SmallIntegerField(null=True)
commencement_date = models.DateField(auto_now=False, auto_now_add=False, null=True)
school = models.CharField(max_length=100, null=True)
is_new_work_group = models.NullBooleanField()
work_group_title = models.CharField(max_length=100, null=True)
work_group_owner = models.CharField(max_length=100, null=True)
qual_title = models.CharField(max_length=100, null=True)
qual_awarding_body = models.CharField(max_length=100, null=True)
nationality = models.CharField(max_length=100, null=True)
is_permit_required = models.NullBooleanField()
CONTRACT_CHOICES = (
('Pe', 'Permanent'),
('SP', 'Specific Purpose'),
('FT', 'Fixed Term'),
)
contract_type = models.CharField(max_length=16, choices = CONTRACT_CHOICES, null=True)
salary = models.IntegerField(null=True)
first_increment_date = models.DateField(auto_now=False, auto_now_add=False, null=True)
increment_amount = models.SmallIntegerField(null=True)
is_NWA = models.NullBooleanField()
termination_date = models.DateField(auto_now=False, auto_now_add=False, null=True)
project_title = models.CharField(max_length=100, null=True)
grant_source = models.SmallIntegerField(null=True)
principal_investigator = models.CharField(max_length=100, null=True)
annual_leave = models.SmallIntegerField(null=True)
additional_comments = models.TextField(null=True)
def get_absolute_url(self):
return reverse('post_detail', args=[str(self.id)])
def __str__(self):
return self.title
| from django.db import models
from django.urls import reverse
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
#https://simpleisbetterthancomplex.com/tutorial/2016/07/22/how-to-extend-django-user-model.html#onetoone
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
first_name = models.CharField(max_length=30, help_text='Required')
last_name = models.CharField(max_length=30, help_text='Required')
email = models.EmailField(max_length=254, help_text='Required')
public_key = models.CharField(max_length=8192)
private_key = models.CharField(max_length=8192)
rank_id = models.IntegerField()
dept_id = models.IntegerField()
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
'''
class EncForm(models.Model):
encdata
dept_id
level
submitter
'''
class Post(models.Model):
post_title = models.CharField(max_length=100, null=True)
gender = models.CharField(max_length=100, null=True)
TITLE_CHOICES = (
('Mr','Mister'),
('Mrs','Missus'),
('Ms','Miss'),
('Prof','Professor'),
('Dr','Doctor'),
)
title = models.CharField(max_length=4, choices=TITLE_CHOICES, null=True)
first_name = models.CharField(max_length=100, null=True)
surname = models.CharField(max_length=100, null=True)
email = models.EmailField(max_length=254, null=True)
phone_number = models.CharField(max_length=20, null=True)
home_address = models.CharField(max_length=200, null=True)
dob = models.DateField(auto_now=False, auto_now_add=False, null=True)
discipline = models.CharField(max_length=1000, null=True)
IS_NEW_CHOICES = (
('N', 'New'),
('R', 'Replacement'),
)
new_or_replacement = models.CharField(max_length=11, choices = IS_NEW_CHOICES, null=True)
additional_remuneration = models.CharField(max_length=500, null=True)
hours_per_week = models.SmallIntegerField(null=True)
commencement_date = models.DateField(auto_now=False, auto_now_add=False, null=True)
school = models.CharField(max_length=100, null=True)
is_new_work_group = models.NullBooleanField()
work_group_title = models.CharField(max_length=100, null=True)
work_group_owner = models.CharField(max_length=100, null=True)
qual_title = models.CharField(max_length=100, null=True)
qual_awarding_body = models.CharField(max_length=100, null=True)
nationality = models.CharField(max_length=100, null=True)
is_permit_required = models.NullBooleanField()
CONTRACT_CHOICES = (
('Pe', 'Permanent'),
('SP', 'Specific Purpose'),
('FT', 'Fixed Term'),
)
contract_type = models.CharField(max_length=16, choices = CONTRACT_CHOICES, null=True)
salary = models.IntegerField(null=True)
first_increment_date = models.DateField(auto_now=False, auto_now_add=False, null=True)
increment_amount = models.SmallIntegerField(null=True)
is_NWA = models.NullBooleanField()
termination_date = models.DateField(auto_now=False, auto_now_add=False, null=True)
project_title = models.CharField(max_length=100, null=True)
grant_source = models.SmallIntegerField(null=True)
principal_investigator = models.CharField(max_length=100, null=True)
annual_leave = models.SmallIntegerField(null=True)
additional_comments = models.TextField(null=True)
def get_absolute_url(self):
return reverse('post_detail', args=[str(self.id)])
def __str__(self):
return self.title | en | 0.348374 | #https://simpleisbetterthancomplex.com/tutorial/2016/07/22/how-to-extend-django-user-model.html#onetoone class EncForm(models.Model): encdata dept_id level submitter | 2.404324 | 2 |
hallo/modules/dailys/field_sleep.py | joshcoales/Hallo | 1 | 6632893 | from datetime import timedelta
from hallo.events import EventMessage
import hallo.modules.dailys.dailys_field
class DailysSleepField(hallo.modules.dailys.dailys_field.DailysField):
# Does sleep and wake times, sleep notes, dream logs, shower?
type_name = "sleep"
WAKE_WORDS = ["morning", "wake", "woke"]
SLEEP_WORDS = ["goodnight", "sleep", "nini", "night"]
json_key_wake_time = "wake_time"
json_key_sleep_time = "sleep_time"
json_key_interruptions = "interruptions"
@staticmethod
def create_from_input(event, spreadsheet):
return DailysSleepField(spreadsheet)
@staticmethod
def passive_events():
return [EventMessage]
def passive_trigger(self, evt):
"""
:type evt: EventMessage
:rtype: None
"""
input_clean = evt.text.strip().lower()
now = evt.get_send_time()
time_str = now.isoformat()
sleep_date = evt.get_send_time().date()
current_data = self.load_data(sleep_date)
if current_data is None:
current_data = dict()
yesterday_date = sleep_date - timedelta(1)
yesterday_data = self.load_data(yesterday_date)
if yesterday_data is None:
yesterday_data = dict()
# If user is waking up
if input_clean in DailysSleepField.WAKE_WORDS:
# If today's data is blank, write in yesterday's sleep data
if len(current_data) == 0:
current_data = yesterday_data
sleep_date = yesterday_date
# If you already woke in this data, why are you waking again?
if self.json_key_wake_time in current_data:
self.message_channel("Didn't you already wake up?")
return
# If not, add a wake time to sleep data
else:
current_data[self.json_key_wake_time] = time_str
self.save_data(current_data, sleep_date)
self.message_channel("Good morning!")
return
# If user is going to sleep
if input_clean in DailysSleepField.SLEEP_WORDS:
# If it's before 4pm, it's probably yesterday's sleep.
if now.hour <= 16:
current_data = yesterday_data
sleep_date = yesterday_date
# Did they already go to sleep?
if self.json_key_sleep_time in current_data:
# Did they already wake? If not, they're updating their sleep time.
if self.json_key_wake_time not in current_data:
current_data[self.json_key_sleep_time] = time_str
self.save_data(current_data, sleep_date)
self.message_channel("Good night again!")
return
# Move the last wake time to interruptions
interruption = dict()
interruption[self.json_key_wake_time] = current_data.pop(
self.json_key_wake_time
)
interruption[self.json_key_sleep_time] = time_str
if self.json_key_interruptions not in current_data:
current_data[self.json_key_interruptions] = []
current_data[self.json_key_interruptions].append(interruption)
self.save_data(current_data, sleep_date)
self.message_channel("Oh, going back to sleep? Sleep well!")
return
# Otherwise they're headed to sleep
else:
current_data[self.json_key_sleep_time] = time_str
self.save_data(current_data, sleep_date)
self.message_channel("Goodnight!")
return
def to_json(self):
json_obj = dict()
json_obj["type_name"] = self.type_name
return json_obj
@staticmethod
def from_json(json_obj, spreadsheet):
return DailysSleepField(spreadsheet)
| from datetime import timedelta
from hallo.events import EventMessage
import hallo.modules.dailys.dailys_field
class DailysSleepField(hallo.modules.dailys.dailys_field.DailysField):
# Does sleep and wake times, sleep notes, dream logs, shower?
type_name = "sleep"
WAKE_WORDS = ["morning", "wake", "woke"]
SLEEP_WORDS = ["goodnight", "sleep", "nini", "night"]
json_key_wake_time = "wake_time"
json_key_sleep_time = "sleep_time"
json_key_interruptions = "interruptions"
@staticmethod
def create_from_input(event, spreadsheet):
return DailysSleepField(spreadsheet)
@staticmethod
def passive_events():
return [EventMessage]
def passive_trigger(self, evt):
"""
:type evt: EventMessage
:rtype: None
"""
input_clean = evt.text.strip().lower()
now = evt.get_send_time()
time_str = now.isoformat()
sleep_date = evt.get_send_time().date()
current_data = self.load_data(sleep_date)
if current_data is None:
current_data = dict()
yesterday_date = sleep_date - timedelta(1)
yesterday_data = self.load_data(yesterday_date)
if yesterday_data is None:
yesterday_data = dict()
# If user is waking up
if input_clean in DailysSleepField.WAKE_WORDS:
# If today's data is blank, write in yesterday's sleep data
if len(current_data) == 0:
current_data = yesterday_data
sleep_date = yesterday_date
# If you already woke in this data, why are you waking again?
if self.json_key_wake_time in current_data:
self.message_channel("Didn't you already wake up?")
return
# If not, add a wake time to sleep data
else:
current_data[self.json_key_wake_time] = time_str
self.save_data(current_data, sleep_date)
self.message_channel("Good morning!")
return
# If user is going to sleep
if input_clean in DailysSleepField.SLEEP_WORDS:
# If it's before 4pm, it's probably yesterday's sleep.
if now.hour <= 16:
current_data = yesterday_data
sleep_date = yesterday_date
# Did they already go to sleep?
if self.json_key_sleep_time in current_data:
# Did they already wake? If not, they're updating their sleep time.
if self.json_key_wake_time not in current_data:
current_data[self.json_key_sleep_time] = time_str
self.save_data(current_data, sleep_date)
self.message_channel("Good night again!")
return
# Move the last wake time to interruptions
interruption = dict()
interruption[self.json_key_wake_time] = current_data.pop(
self.json_key_wake_time
)
interruption[self.json_key_sleep_time] = time_str
if self.json_key_interruptions not in current_data:
current_data[self.json_key_interruptions] = []
current_data[self.json_key_interruptions].append(interruption)
self.save_data(current_data, sleep_date)
self.message_channel("Oh, going back to sleep? Sleep well!")
return
# Otherwise they're headed to sleep
else:
current_data[self.json_key_sleep_time] = time_str
self.save_data(current_data, sleep_date)
self.message_channel("Goodnight!")
return
def to_json(self):
json_obj = dict()
json_obj["type_name"] = self.type_name
return json_obj
@staticmethod
def from_json(json_obj, spreadsheet):
return DailysSleepField(spreadsheet)
| en | 0.925797 | # Does sleep and wake times, sleep notes, dream logs, shower? :type evt: EventMessage :rtype: None # If user is waking up # If today's data is blank, write in yesterday's sleep data # If you already woke in this data, why are you waking again? # If not, add a wake time to sleep data # If user is going to sleep # If it's before 4pm, it's probably yesterday's sleep. # Did they already go to sleep? # Did they already wake? If not, they're updating their sleep time. # Move the last wake time to interruptions # Otherwise they're headed to sleep | 2.995733 | 3 |
qmla/exploration_strategies/nv_centre_spin_characterisation/nature_physics_2021/varying_true_model.py | flynnbr11/QMD | 9 | 6632894 | <gh_stars>1-10
from __future__ import absolute_import
import sys
import os
import random
import qmla.model_building_utilities
from qmla.exploration_strategies.nv_centre_spin_characterisation.nature_physics_2021 import (
FullAccessNVCentre,
TieredGreedySearchNVCentre,
)
__all__ = ["VariableTrueModelNVCentre"]
class VariableTrueModelNVCentre(
TieredGreedySearchNVCentre
# FullAccessNVCentre
):
def __init__(self, exploration_rules, **kwargs):
r"""
Cycle through target model.
"""
super().__init__(exploration_rules=exploration_rules, **kwargs)
true_models = [
"xTi+yTi+zTz",
"xTi+zTi+xTx+zTz",
"xTi+yTi+zTi+xTx",
"xTi+yTi+zTi+zTz",
"xTi+yTi+zTi+yTy",
"xTi+yTi+zTi+xTx+zTz",
"xTi+yTi+zTi+yTy+zTz",
"xTi+yTi+zTi+xTx+yTy+zTz",
"xTi+yTi+zTi+xTx+yTy+zTz+xTy",
"xTi+yTi+zTi+xTx+yTy+zTz+xTz",
]
self.true_model = true_models[self.qmla_id % len(true_models)]
self.true_model = qmla.model_building_utilities.alph(self.true_model)
self._shared_true_parameters = False
self.log_print(
["starting rotational ES; true model is {}".format(self.true_model)]
)
| from __future__ import absolute_import
import sys
import os
import random
import qmla.model_building_utilities
from qmla.exploration_strategies.nv_centre_spin_characterisation.nature_physics_2021 import (
FullAccessNVCentre,
TieredGreedySearchNVCentre,
)
__all__ = ["VariableTrueModelNVCentre"]
class VariableTrueModelNVCentre(
TieredGreedySearchNVCentre
# FullAccessNVCentre
):
def __init__(self, exploration_rules, **kwargs):
r"""
Cycle through target model.
"""
super().__init__(exploration_rules=exploration_rules, **kwargs)
true_models = [
"xTi+yTi+zTz",
"xTi+zTi+xTx+zTz",
"xTi+yTi+zTi+xTx",
"xTi+yTi+zTi+zTz",
"xTi+yTi+zTi+yTy",
"xTi+yTi+zTi+xTx+zTz",
"xTi+yTi+zTi+yTy+zTz",
"xTi+yTi+zTi+xTx+yTy+zTz",
"xTi+yTi+zTi+xTx+yTy+zTz+xTy",
"xTi+yTi+zTi+xTx+yTy+zTz+xTz",
]
self.true_model = true_models[self.qmla_id % len(true_models)]
self.true_model = qmla.model_building_utilities.alph(self.true_model)
self._shared_true_parameters = False
self.log_print(
["starting rotational ES; true model is {}".format(self.true_model)]
) | en | 0.620562 | # FullAccessNVCentre Cycle through target model. | 2.275295 | 2 |
tests/factors/test_technical_factor.py | doncat99/zvt | 10 | 6632895 | <reponame>doncat99/zvt<gh_stars>1-10
# -*- coding: utf-8 -*-
from zvt.api.data_type import Region, Provider
from zvt.contract import IntervalLevel
from zvt.factors.algorithm import MaTransformer, MacdTransformer
from zvt.factors.ma.ma_factor import CrossMaFactor
from zvt.factors.technical_factor import TechnicalFactor
from ..context import init_test_context
init_test_context()
def test_ma():
factor = TechnicalFactor(codes=['000338'],
start_timestamp='2019-01-01',
end_timestamp='2019-06-10',
level=IntervalLevel.LEVEL_1DAY,
provider=Provider.JoinQuant,
computing_window=30,
transformer=MaTransformer(windows=[5, 10, 30]),
adjust_type='qfq')
print(factor.factor_df.tail())
# compare with east money manually
ma5 = factor.factor_df['ma5']
ma10 = factor.factor_df['ma10']
ma30 = factor.factor_df['ma30']
assert round(ma5.loc[('stock_sz_000338', '2019-06-10')], 2) <= 11.23
assert round(ma10.loc[('stock_sz_000338', '2019-06-10')], 2) <= 11.43
assert round(ma30.loc[('stock_sz_000338', '2019-06-10')], 2) <= 11.52
factor.move_on(to_timestamp='2019-06-17')
ma5 = factor.factor_df['ma5']
ma10 = factor.factor_df['ma10']
ma30 = factor.factor_df['ma30']
assert round(ma5.loc[('stock_sz_000338', '2019-06-17')], 2) <= 12.06
assert round(ma10.loc[('stock_sz_000338', '2019-06-17')], 2) <= 11.64
assert round(ma30.loc[('stock_sz_000338', '2019-06-17')], 2) <= 11.50
def test_macd():
factor = TechnicalFactor(codes=['000338'],
start_timestamp='2019-01-01',
end_timestamp='2019-06-10',
level=IntervalLevel.LEVEL_1DAY,
provider=Provider.JoinQuant,
computing_window=None,
transformer=MacdTransformer(),
adjust_type='qfq')
print(factor.factor_df.tail())
# compare with east money manually
diff = factor.factor_df['diff']
dea = factor.factor_df['dea']
macd = factor.factor_df['macd']
assert round(diff.loc[('stock_sz_000338', '2019-06-10')], 2) == -0.14
assert round(dea.loc[('stock_sz_000338', '2019-06-10')], 2) == -0.15
assert round(macd.loc[('stock_sz_000338', '2019-06-10')], 2) == 0.02
factor.move_on(to_timestamp='2019-06-17')
diff = factor.factor_df['diff']
dea = factor.factor_df['dea']
macd = factor.factor_df['macd']
assert round(diff.loc[('stock_sz_000338', '2019-06-17')], 2) == 0.06
assert round(dea.loc[('stock_sz_000338', '2019-06-17')], 2) == -0.03
assert round(macd.loc[('stock_sz_000338', '2019-06-17')], 2) <= 0.19
def test_cross_ma():
factor = CrossMaFactor(codes=['000338'],
start_timestamp='2019-01-01',
end_timestamp='2019-06-10',
level=IntervalLevel.LEVEL_1DAY,
provider=Provider.JoinQuant,
windows=[5, 10],
adjust_type='qfq')
print(factor.factor_df.tail())
print(factor.result_df.tail())
score = factor.result_df['score']
assert score[('stock_sz_000338', '2019-06-03')] == True
assert score[('stock_sz_000338', '2019-06-04')] == True
assert ('stock_sz_000338', '2019-06-05') not in score or score[('stock_sz_000338', '2019-06-05')] == False
assert ('stock_sz_000338', '2019-06-06') not in score or score[('stock_sz_000338', '2019-06-06')] == False
assert ('stock_sz_000338', '2019-06-10') not in score or score[('stock_sz_000338', '2019-06-10')] == False
factor.move_on()
score = factor.result_df['score']
assert score[('stock_sz_000338', '2019-06-17')] == True
| # -*- coding: utf-8 -*-
from zvt.api.data_type import Region, Provider
from zvt.contract import IntervalLevel
from zvt.factors.algorithm import MaTransformer, MacdTransformer
from zvt.factors.ma.ma_factor import CrossMaFactor
from zvt.factors.technical_factor import TechnicalFactor
from ..context import init_test_context
init_test_context()
def test_ma():
factor = TechnicalFactor(codes=['000338'],
start_timestamp='2019-01-01',
end_timestamp='2019-06-10',
level=IntervalLevel.LEVEL_1DAY,
provider=Provider.JoinQuant,
computing_window=30,
transformer=MaTransformer(windows=[5, 10, 30]),
adjust_type='qfq')
print(factor.factor_df.tail())
# compare with east money manually
ma5 = factor.factor_df['ma5']
ma10 = factor.factor_df['ma10']
ma30 = factor.factor_df['ma30']
assert round(ma5.loc[('stock_sz_000338', '2019-06-10')], 2) <= 11.23
assert round(ma10.loc[('stock_sz_000338', '2019-06-10')], 2) <= 11.43
assert round(ma30.loc[('stock_sz_000338', '2019-06-10')], 2) <= 11.52
factor.move_on(to_timestamp='2019-06-17')
ma5 = factor.factor_df['ma5']
ma10 = factor.factor_df['ma10']
ma30 = factor.factor_df['ma30']
assert round(ma5.loc[('stock_sz_000338', '2019-06-17')], 2) <= 12.06
assert round(ma10.loc[('stock_sz_000338', '2019-06-17')], 2) <= 11.64
assert round(ma30.loc[('stock_sz_000338', '2019-06-17')], 2) <= 11.50
def test_macd():
factor = TechnicalFactor(codes=['000338'],
start_timestamp='2019-01-01',
end_timestamp='2019-06-10',
level=IntervalLevel.LEVEL_1DAY,
provider=Provider.JoinQuant,
computing_window=None,
transformer=MacdTransformer(),
adjust_type='qfq')
print(factor.factor_df.tail())
# compare with east money manually
diff = factor.factor_df['diff']
dea = factor.factor_df['dea']
macd = factor.factor_df['macd']
assert round(diff.loc[('stock_sz_000338', '2019-06-10')], 2) == -0.14
assert round(dea.loc[('stock_sz_000338', '2019-06-10')], 2) == -0.15
assert round(macd.loc[('stock_sz_000338', '2019-06-10')], 2) == 0.02
factor.move_on(to_timestamp='2019-06-17')
diff = factor.factor_df['diff']
dea = factor.factor_df['dea']
macd = factor.factor_df['macd']
assert round(diff.loc[('stock_sz_000338', '2019-06-17')], 2) == 0.06
assert round(dea.loc[('stock_sz_000338', '2019-06-17')], 2) == -0.03
assert round(macd.loc[('stock_sz_000338', '2019-06-17')], 2) <= 0.19
def test_cross_ma():
factor = CrossMaFactor(codes=['000338'],
start_timestamp='2019-01-01',
end_timestamp='2019-06-10',
level=IntervalLevel.LEVEL_1DAY,
provider=Provider.JoinQuant,
windows=[5, 10],
adjust_type='qfq')
print(factor.factor_df.tail())
print(factor.result_df.tail())
score = factor.result_df['score']
assert score[('stock_sz_000338', '2019-06-03')] == True
assert score[('stock_sz_000338', '2019-06-04')] == True
assert ('stock_sz_000338', '2019-06-05') not in score or score[('stock_sz_000338', '2019-06-05')] == False
assert ('stock_sz_000338', '2019-06-06') not in score or score[('stock_sz_000338', '2019-06-06')] == False
assert ('stock_sz_000338', '2019-06-10') not in score or score[('stock_sz_000338', '2019-06-10')] == False
factor.move_on()
score = factor.result_df['score']
assert score[('stock_sz_000338', '2019-06-17')] == True | en | 0.924414 | # -*- coding: utf-8 -*- # compare with east money manually # compare with east money manually | 1.915077 | 2 |
ibis/backends/impala/__init__.py | GrapeBaBa/ibis | 0 | 6632896 | <filename>ibis/backends/impala/__init__.py
"""Impala backend"""
from __future__ import annotations
import contextlib
import io
import operator
import re
import weakref
from posixpath import join as pjoin
from typing import Any
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
import numpy as np
import pandas as pd
import ibis.common.exceptions as com
import ibis.config
import ibis.expr.datatypes as dt
import ibis.expr.rules as rlz
import ibis.expr.schema as sch
import ibis.util as util
from ibis.backends.base.sql import BaseSQLBackend
from ibis.backends.base.sql.ddl import (
CTAS,
CreateDatabase,
CreateTableWithSchema,
CreateView,
DropDatabase,
DropTable,
DropView,
TruncateTable,
fully_qualified_re,
is_fully_qualified,
)
from ibis.config import options
from . import ddl, udf
from .client import ImpalaConnection, ImpalaDatabase, ImpalaTable
from .compat import HS2Error, ImpylaError
from .compiler import ImpalaCompiler
from .hdfs import HDFS, WebHDFS, hdfs_connect
from .pandas_interop import DataFrameWriter
from .udf import ( # noqa F408
aggregate_function,
scalar_function,
wrap_uda,
wrap_udf,
)
_HS2_TTypeId_to_dtype = {
'BOOLEAN': 'bool',
'TINYINT': 'int8',
'SMALLINT': 'int16',
'INT': 'int32',
'BIGINT': 'int64',
'TIMESTAMP': 'datetime64[ns]',
'FLOAT': 'float32',
'DOUBLE': 'float64',
'STRING': 'object',
'DECIMAL': 'object',
'BINARY': 'object',
'VARCHAR': 'object',
'CHAR': 'object',
}
def _split_signature(x):
name, rest = x.split('(', 1)
return name, rest[:-1]
_arg_type = re.compile(r'(.*)\.\.\.|([^\.]*)')
class _type_parser:
NORMAL, IN_PAREN = 0, 1
def __init__(self, value):
self.value = value
self.state = self.NORMAL
self.buf = io.StringIO()
self.types = []
for c in value:
self._step(c)
self._push()
def _push(self):
val = self.buf.getvalue().strip()
if val:
self.types.append(val)
self.buf = io.StringIO()
def _step(self, c):
if self.state == self.NORMAL:
if c == '(':
self.state = self.IN_PAREN
elif c == ',':
self._push()
return
elif self.state == self.IN_PAREN:
if c == ')':
self.state = self.NORMAL
self.buf.write(c)
def _chunks_to_pandas_array(chunks):
total_length = 0
have_nulls = False
for c in chunks:
total_length += len(c)
have_nulls = have_nulls or c.nulls.any()
type_ = chunks[0].data_type
numpy_type = _HS2_TTypeId_to_dtype[type_]
def fill_nonnull(target, chunks):
pos = 0
for c in chunks:
target[pos : pos + len(c)] = c.values
pos += len(c.values)
def fill(target, chunks, na_rep):
pos = 0
for c in chunks:
nulls = c.nulls.copy()
nulls.bytereverse()
bits = np.frombuffer(nulls.tobytes(), dtype='u1')
mask = np.unpackbits(bits).view(np.bool_)
k = len(c)
dest = target[pos : pos + k]
dest[:] = c.values
dest[mask[:k]] = na_rep
pos += k
if have_nulls:
if numpy_type in ('bool', 'datetime64[ns]'):
target = np.empty(total_length, dtype='O')
na_rep = np.nan
elif numpy_type.startswith('int'):
target = np.empty(total_length, dtype='f8')
na_rep = np.nan
else:
target = np.empty(total_length, dtype=numpy_type)
na_rep = np.nan
fill(target, chunks, na_rep)
else:
target = np.empty(total_length, dtype=numpy_type)
fill_nonnull(target, chunks)
return target
def _column_batches_to_dataframe(names, batches):
cols = {}
for name, chunks in zip(names, zip(*(b.columns for b in batches))):
cols[name] = _chunks_to_pandas_array(chunks)
return pd.DataFrame(cols, columns=names)
class Backend(BaseSQLBackend):
name = 'impala'
database_class = ImpalaDatabase
table_expr_class = ImpalaTable
HDFS = HDFS
WebHDFS = WebHDFS
compiler = ImpalaCompiler
def hdfs_connect(self, *args, **kwargs):
return hdfs_connect(*args, **kwargs)
def do_connect(
new_backend,
host='localhost',
port=21050,
database='default',
timeout=45,
use_ssl=False,
ca_cert=None,
user=None,
password=<PASSWORD>,
auth_mechanism='NOSASL',
kerberos_service_name='impala',
pool_size=8,
hdfs_client=None,
):
"""Create an Impala Backend for use with Ibis.
Parameters
----------
host
Host name of the impalad or HiveServer2 in Hive
port
Impala's HiveServer2 port
database
Default database when obtaining new cursors
timeout
Connection timeout in seconds when communicating with HiveServer2
use_ssl
Use SSL when connecting to HiveServer2
ca_cert
Local path to 3rd party CA certificate or copy of server
certificate for self-signed certificates. If SSL is enabled, but
this argument is ``None``, then certificate validation is skipped.
user
LDAP user to authenticate
password
<PASSWORD> to authenticate
auth_mechanism
{'NOSASL' <- default, 'PLAIN', 'GSSAPI', 'LDAP'}.
Use NOSASL for non-secured Impala connections. Use PLAIN for
non-secured Hive clusters. Use LDAP for LDAP authenticated
connections. Use GSSAPI for Kerberos-secured clusters.
kerberos_service_name
Specify particular impalad service principal.
Examples
--------
>>> import ibis
>>> import os
>>> hdfs_host = os.environ.get('IBIS_TEST_NN_HOST', 'localhost')
>>> hdfs_port = int(os.environ.get('IBIS_TEST_NN_PORT', 50070))
>>> impala_host = os.environ.get('IBIS_TEST_IMPALA_HOST', 'localhost')
>>> impala_port = int(os.environ.get('IBIS_TEST_IMPALA_PORT', 21050))
>>> hdfs = ibis.impala.hdfs_connect(host=hdfs_host, port=hdfs_port)
>>> hdfs # doctest: +ELLIPSIS
<ibis.filesystems.WebHDFS object at 0x...>
>>> client = ibis.impala.connect(
... host=impala_host,
... port=impala_port,
... hdfs_client=hdfs,
... )
>>> client # doctest: +ELLIPSIS
<ibis.backends.impala.Backend object at 0x...>
Returns
-------
Backend
Impala backend
"""
import hdfs
new_backend._temp_objects = set()
if hdfs_client is None or isinstance(hdfs_client, HDFS):
new_backend._hdfs = hdfs_client
elif isinstance(hdfs_client, hdfs.Client):
new_backend._hdfs = WebHDFS(hdfs_client)
else:
raise TypeError(hdfs_client)
params = {
'host': host,
'port': port,
'database': database,
'timeout': timeout,
'use_ssl': use_ssl,
'ca_cert': ca_cert,
'user': user,
'password': password,
'auth_mechanism': auth_mechanism,
'kerberos_service_name': kerberos_service_name,
}
new_backend.con = ImpalaConnection(pool_size=pool_size, **params)
new_backend._ensure_temp_db_exists()
@property
def version(self):
cursor = self.raw_sql('select version()')
result = cursor.fetchone()[0]
cursor.release()
return result
def register_options(self):
ibis.config.register_option(
'temp_db',
'__ibis_tmp',
'Database to use for temporary tables, views. functions, etc.',
)
ibis.config.register_option(
'temp_hdfs_path',
'/tmp/ibis',
'HDFS path for storage of temporary data',
)
def list_databases(self, like=None):
cur = self.raw_sql('SHOW DATABASES')
databases = self._get_list(cur)
cur.release()
return self._filter_with_like(databases, like)
def list_tables(self, like=None, database=None):
statement = 'SHOW TABLES'
if database is not None:
statement += f' IN {database}'
if like:
m = fully_qualified_re.match(like)
if m:
database, quoted, unquoted = m.groups()
like = quoted or unquoted
return self.list_tables(like=like, database=database)
statement += f" LIKE '{like}'"
return self._filter_with_like(
[row[0] for row in self.raw_sql(statement).fetchall()]
)
def fetch_from_cursor(self, cursor, schema):
batches = cursor.fetchall(columnar=True)
names = [x[0] for x in cursor.description]
df = _column_batches_to_dataframe(names, batches)
if schema:
return schema.apply_to(df)
return df
def _get_hdfs(self):
if self._hdfs is None:
raise com.IbisError(
'No HDFS connection; must pass connection '
'using the hdfs_client argument to '
'ibis.impala.connect'
)
return self._hdfs
def _set_hdfs(self, hdfs):
if not isinstance(hdfs, HDFS):
raise TypeError('must be HDFS instance')
self._hdfs = hdfs
hdfs = property(fget=_get_hdfs, fset=_set_hdfs)
@property
def kudu(self):
raise NotImplementedError(
"kudu support using kudu-python is no longer supported; "
"use impala facilities to manage kudu tables; "
"see https://kudu.apache.org/docs/kudu_impala_integration.html"
)
def close(self):
"""Close the connection and drop temporary objects."""
while self._temp_objects:
finalizer = self._temp_objects.pop()
with contextlib.suppress(HS2Error):
finalizer()
self.con.close()
def disable_codegen(self, disabled=True):
"""Turn off or on LLVM codegen in Impala query execution.
Parameters
----------
disabled
To disable codegen, pass with no argument or True. To enable
codegen, pass False.
"""
self.con.disable_codegen(disabled)
def _fully_qualified_name(self, name, database):
if is_fully_qualified(name):
return name
database = database or self.current_database
return f'{database}.`{name}`'
def _get_list(self, cur):
tuples = cur.fetchall()
return list(map(operator.itemgetter(0), tuples))
@util.deprecated(version='2.0', instead='a new connection to database')
def set_database(self, name):
# XXX The parent `Client` has a generic method that calls this same
# method in the backend. But for whatever reason calling this code from
# that method doesn't seem to work. Maybe `con` is a copy?
self.con.set_database(name)
@property
def current_database(self):
# XXX The parent `Client` has a generic method that calls this same
# method in the backend. But for whatever reason calling this code from
# that method doesn't seem to work. Maybe `con` is a copy?
return self.con.database
def create_database(self, name, path=None, force=False):
"""Create a new Impala database.
Parameters
----------
name
Database name
path
HDFS path where to store the database data; otherwise uses Impala
default
force
Forcibly create the database
"""
if path:
# explicit mkdir ensures the user own the dir rather than impala,
# which is easier for manual cleanup, if necessary
self.hdfs.mkdir(path)
statement = CreateDatabase(name, path=path, can_exist=force)
return self.raw_sql(statement)
def drop_database(self, name, force=False):
"""Drop an Impala database.
Parameters
----------
name
Database name
force
If False and there are any tables in this database, raises an
IntegrityError
"""
if not force or name in self.list_databases():
tables = self.list_tables(database=name)
udfs = self.list_udfs(database=name)
udas = self.list_udas(database=name)
else:
tables = []
udfs = []
udas = []
if force:
for table in tables:
util.log('Dropping {}'.format(f'{name}.{table}'))
self.drop_table_or_view(table, database=name)
for func in udfs:
util.log(f'Dropping function {func.name}({func.inputs})')
self.drop_udf(
func.name,
input_types=func.inputs,
database=name,
force=True,
)
for func in udas:
util.log(
'Dropping aggregate function {}({})'.format(
func.name, func.inputs
)
)
self.drop_uda(
func.name,
input_types=func.inputs,
database=name,
force=True,
)
else:
if len(tables) > 0 or len(udfs) > 0 or len(udas) > 0:
raise com.IntegrityError(
'Database {} must be empty before '
'being dropped, or set '
'force=True'.format(name)
)
statement = DropDatabase(name, must_exist=not force)
return self.raw_sql(statement)
def get_schema(
self,
table_name: str,
database: str | None = None,
) -> sch.Schema:
"""Return a Schema object for the indicated table and database.
Parameters
----------
table_name
Table name
database
Database name
Returns
-------
Schema
Ibis schema
"""
qualified_name = self._fully_qualified_name(table_name, database)
query = f'DESCRIBE {qualified_name}'
# only pull out the first two columns which are names and types
pairs = [row[:2] for row in self.con.fetchall(query)]
names, types = zip(*pairs)
ibis_types = [udf.parse_type(type.lower()) for type in types]
names = [name.lower() for name in names]
return sch.Schema(names, ibis_types)
@property
def client_options(self):
return self.con.options
def get_options(self):
"""Return current query options for the Impala session."""
return dict(row[:2] for row in self.con.fetchall("SET"))
def set_options(self, options):
self.con.set_options(options)
def reset_options(self):
# Must nuke all cursors
raise NotImplementedError
def set_compression_codec(self, codec):
if codec is None:
codec = 'none'
else:
codec = codec.lower()
if codec not in ('none', 'gzip', 'snappy'):
raise ValueError(f'Unknown codec: {codec}')
self.set_options({'COMPRESSION_CODEC': codec})
def create_view(self, name, expr, database=None):
"""Create an Impala view from a table expression.
Parameters
----------
name
View name
expr : ibis TableExpr
Ibis table expression
database
Database name
"""
ast = self.compiler.to_ast(expr)
select = ast.queries[0]
statement = CreateView(name, select, database=database)
return self.raw_sql(statement)
def drop_view(self, name, database=None, force=False):
"""Drop an Impala view.
Parameters
----------
name
Table name
database
Database
force
Database may throw exception if table does not exist
"""
statement = DropView(name, database=database, must_exist=not force)
return self.raw_sql(statement)
@contextlib.contextmanager
def _setup_insert(self, obj):
if isinstance(obj, pd.DataFrame):
with DataFrameWriter(self, obj) as writer:
yield writer.delimited_table(writer.write_temp_csv())
else:
yield obj
def create_table(
self,
table_name,
obj=None,
schema=None,
database=None,
external=False,
force=False,
# HDFS options
format='parquet',
location=None,
partition=None,
like_parquet=None,
):
"""Create a new table in Impala using an Ibis table expression.
This is currently designed for tables whose data is stored in HDFS.
Parameters
----------
table_name
Table name
obj
If passed, creates table from select statement results
schema
Mutually exclusive with obj, creates an empty table with a
particular schema
database
Database name
force
Do not create table if table with indicated name already exists
external
Create an external table; Impala will not delete the underlying
data when the table is dropped
format
File format
location
Specify the directory location where Impala reads and writes files
for the table
partition
Must pass a schema to use this. Cannot partition from an
expression.
like_parquet
Can specify instead of a schema
Examples
--------
>>> con.create_table('new_table_name', table_expr) # doctest: +SKIP
"""
if like_parquet is not None:
raise NotImplementedError
if obj is not None:
with self._setup_insert(obj) as to_insert:
ast = self.compiler.to_ast(to_insert)
select = ast.queries[0]
self.raw_sql(
CTAS(
table_name,
select,
database=database,
can_exist=force,
format=format,
external=external,
partition=partition,
path=location,
)
)
elif schema is not None:
self.raw_sql(
CreateTableWithSchema(
table_name,
schema,
database=database,
format=format,
can_exist=force,
external=external,
path=location,
partition=partition,
)
)
else:
raise com.IbisError('Must pass obj or schema')
def avro_file(
self,
hdfs_dir,
avro_schema,
name=None,
database=None,
external=True,
persist=False,
):
"""Create a table to read a collection of Avro data.
Parameters
----------
hdfs_dir
Absolute HDFS path to directory containing avro files
avro_schema
The Avro schema for the data as a Python dict
name
Table name
database
Database name
external
Whether the table is external
persist
Persist the table
Returns
-------
ImpalaTable
Impala table expression
"""
name, database = self._get_concrete_table_path(
name, database, persist=persist
)
stmt = ddl.CreateTableAvro(
name, hdfs_dir, avro_schema, database=database, external=external
)
self.raw_sql(stmt)
return self._wrap_new_table(name, database, persist)
def delimited_file(
self,
hdfs_dir,
schema,
name=None,
database=None,
delimiter=',',
na_rep=None,
escapechar=None,
lineterminator=None,
external=True,
persist=False,
):
"""Interpret delimited text files as an Ibis table expression.
See the `parquet_file` method for more details on what happens under
the hood.
Parameters
----------
hdfs_dir
HDFS directory containing delimited text files
schema
Ibis schema
name
Name for temporary or persistent table; otherwise random names are
generated
database
Database to create the table in
delimiter
Character used to delimit columns
escapechar
Character used to escape special characters
lineterminator
Character used to delimit lines
external
Create table as EXTERNAL (data will not be deleted on drop). Not
that if persist=False and external=False, whatever data you
reference will be deleted
persist
If True, do not delete the table upon garbage collection of ibis
table object
Returns
-------
ImpalaTable
Impala table expression
"""
name, database = self._get_concrete_table_path(
name, database, persist=persist
)
stmt = ddl.CreateTableDelimited(
name,
hdfs_dir,
schema,
database=database,
delimiter=delimiter,
external=external,
na_rep=na_rep,
lineterminator=lineterminator,
escapechar=escapechar,
)
self.raw_sql(stmt)
return self._wrap_new_table(name, database, persist)
def parquet_file(
self,
hdfs_dir,
schema=None,
name=None,
database=None,
external=True,
like_file=None,
like_table=None,
persist=False,
):
"""Make indicated parquet file in HDFS available as an Ibis table.
The table created can be optionally named and persisted, otherwise a
unique name will be generated. Temporarily, for any non-persistent
external table created by Ibis we will attempt to drop it when the
underlying object is garbage collected (or the Python interpreter shuts
down normally).
Parameters
----------
hdfs_dir
Path in HDFS
schema
If no schema provided, and neither of the like_* argument is
passed, one will be inferred from one of the parquet files in the
directory.
like_file
Absolute path to Parquet file in HDFS to use for schema
definitions. An alternative to having to supply an explicit schema
like_table
Fully scoped and escaped string to an Impala table whose schema we
will use for the newly created table.
name
Random unique name generated otherwise
database
Database to create the (possibly temporary) table in
external
If a table is external, the referenced data will not be deleted
when the table is dropped in Impala. Otherwise (external=False)
Impala takes ownership of the Parquet file.
persist
Do not drop the table during garbage collection
Returns
-------
ImpalaTable
Impala table expression
"""
name, database = self._get_concrete_table_path(
name, database, persist=persist
)
# If no schema provided, need to find some absolute path to a file in
# the HDFS directory
if like_file is None and like_table is None and schema is None:
file_name = self.hdfs._find_any_file(hdfs_dir)
like_file = pjoin(hdfs_dir, file_name)
stmt = ddl.CreateTableParquet(
name,
hdfs_dir,
schema=schema,
database=database,
example_file=like_file,
example_table=like_table,
external=external,
can_exist=False,
)
self.raw_sql(stmt)
return self._wrap_new_table(name, database, persist)
def _get_concrete_table_path(self, name, database, persist=False):
if not persist:
if name is None:
name = f'__ibis_tmp_{util.guid()}'
if database is None:
self._ensure_temp_db_exists()
database = options.impala.temp_db
return name, database
else:
if name is None:
raise com.IbisError('Must pass table name if persist=True')
return name, database
def _ensure_temp_db_exists(self):
# TODO: session memoize to avoid unnecessary `SHOW DATABASES` calls
name, path = options.impala.temp_db, options.impala.temp_hdfs_path
if name not in self.list_databases():
if self._hdfs is None:
print(
'Without an HDFS connection, certain functionality'
' may be disabled'
)
else:
self.create_database(name, path=path, force=True)
def _drop_table(self, name: str) -> None:
# database might have been dropped, so we suppress the
# corresponding Exception
with contextlib.suppress(ImpylaError):
self.drop_table(name)
def _wrap_new_table(self, name, database, persist):
qualified_name = self._fully_qualified_name(name, database)
t = self.table(qualified_name)
if not persist:
self._temp_objects.add(
weakref.finalize(t, self._drop_table, qualified_name)
)
# Compute number of rows in table for better default query planning
cardinality = t.count().execute()
set_card = (
"alter table {} set tblproperties('numRows'='{}', "
"'STATS_GENERATED_VIA_STATS_TASK' = 'true')".format(
qualified_name, cardinality
)
)
self.raw_sql(set_card)
return t
def text_file(self, hdfs_path, column_name='value'):
"""Interpret text data as a table with a single string column."""
def insert(
self,
table_name,
obj=None,
database=None,
overwrite=False,
partition=None,
values=None,
validate=True,
):
"""Insert data into an existing table.
See
[`ImpalaTable.insert`][ibis.backends.impala.client.ImpalaTable.insert]
for parameters.
Examples
--------
>>> table = 'my_table'
>>> con.insert(table, table_expr) # doctest: +SKIP
Completely overwrite contents
>>> con.insert(table, table_expr, overwrite=True) # doctest: +SKIP
"""
table = self.table(table_name, database=database)
return table.insert(
obj=obj,
overwrite=overwrite,
partition=partition,
values=values,
validate=validate,
)
def load_data(
self,
table_name,
path,
database=None,
overwrite=False,
partition=None,
):
"""Loads data into an Impala table by physically moving data files."""
table = self.table(table_name, database=database)
return table.load_data(path, overwrite=overwrite, partition=partition)
def drop_table(self, table_name, database=None, force=False):
"""Drop an Impala table.
Parameters
----------
table_name
Table name
database
Database name
force
Database may throw exception if table does not exist
Examples
--------
>>> table = 'my_table'
>>> db = 'operations'
>>> con.drop_table(table, database=db, force=True) # doctest: +SKIP
"""
statement = DropTable(
table_name, database=database, must_exist=not force
)
self.raw_sql(statement)
def truncate_table(self, table_name, database=None):
"""Delete all rows from an existing table.
Parameters
----------
table_name
Table name
database
Database name
"""
statement = TruncateTable(table_name, database=database)
self.raw_sql(statement)
def drop_table_or_view(self, name, database=None, force=False):
"""Drop view or table."""
try:
self.drop_table(name, database=database)
except Exception as e:
try:
self.drop_view(name, database=database)
except Exception:
raise e
def cache_table(self, table_name, database=None, pool='default'):
"""Caches a table in cluster memory in the given pool.
Parameters
----------
table_name
Table name
database
Database name
pool
The name of the pool in which to cache the table
Examples
--------
>>> table = 'my_table'
>>> db = 'operations'
>>> pool = 'op_4GB_pool'
>>> con.cache_table('my_table', database=db, pool=pool) # noqa: E501 # doctest: +SKIP
"""
statement = ddl.CacheTable(table_name, database=database, pool=pool)
self.raw_sql(statement)
def _get_schema_using_query(self, query):
cur = self.raw_sql(query)
# resets the state of the cursor and closes operation
cur.fetchall()
names, ibis_types = self._adapt_types(cur.description)
cur.release()
# per #321; most Impala tables will be lower case already, but Avro
# data, depending on the version of Impala, might have field names in
# the metastore cased according to the explicit case in the declared
# avro schema. This is very annoying, so it's easier to just conform on
# all lowercase fields from Impala.
names = [x.lower() for x in names]
return sch.Schema(names, ibis_types)
def create_function(self, func, name=None, database=None):
"""Create a function within Impala.
Parameters
----------
func
UDF or UDAF
name
Function name
database
Database name
"""
if name is None:
name = func.name
database = database or self.current_database
if isinstance(func, udf.ImpalaUDF):
stmt = ddl.CreateUDF(func, name=name, database=database)
elif isinstance(func, udf.ImpalaUDA):
stmt = ddl.CreateUDA(func, name=name, database=database)
else:
raise TypeError(func)
self.raw_sql(stmt)
def drop_udf(
self,
name,
input_types=None,
database=None,
force=False,
aggregate=False,
):
"""Drop a UDF.
If only name is given, this will search for the relevant UDF and drop
it. To delete an overloaded UDF, give only a name and force=True
Parameters
----------
name
Function name
input_types
Input types
force
Must be set to `True` to drop overloaded UDFs
database
Database name
aggregate
Whether the function is an aggregate
"""
if not input_types:
if not database:
database = self.current_database
result = self.list_udfs(database=database, like=name)
if len(result) > 1:
if force:
for func in result:
self._drop_single_function(
func.name,
func.inputs,
database=database,
aggregate=aggregate,
)
return
else:
raise Exception(
"More than one function "
+ f"with {name} found."
+ "Please specify force=True"
)
elif len(result) == 1:
func = result.pop()
self._drop_single_function(
func.name,
func.inputs,
database=database,
aggregate=aggregate,
)
return
else:
raise Exception(f"No function found with name {name}")
self._drop_single_function(
name, input_types, database=database, aggregate=aggregate
)
def drop_uda(self, name, input_types=None, database=None, force=False):
"""Drop an aggregate function."""
return self.drop_udf(
name, input_types=input_types, database=database, force=force
)
def _drop_single_function(
self, name, input_types, database=None, aggregate=False
):
stmt = ddl.DropFunction(
name,
input_types,
must_exist=False,
aggregate=aggregate,
database=database,
)
self.raw_sql(stmt)
def _drop_all_functions(self, database):
udfs = self.list_udfs(database=database)
for fnct in udfs:
stmt = ddl.DropFunction(
fnct.name,
fnct.inputs,
must_exist=False,
aggregate=False,
database=database,
)
self.raw_sql(stmt)
udafs = self.list_udas(database=database)
for udaf in udafs:
stmt = ddl.DropFunction(
udaf.name,
udaf.inputs,
must_exist=False,
aggregate=True,
database=database,
)
self.raw_sql(stmt)
def list_udfs(self, database=None, like=None):
"""Lists all UDFs associated with given database."""
if not database:
database = self.current_database
statement = ddl.ListFunction(database, like=like, aggregate=False)
cur = self.raw_sql(statement)
result = self._get_udfs(cur, udf.ImpalaUDF)
cur.release()
return result
def list_udas(self, database=None, like=None):
"""Lists all UDAFs associated with a given database."""
if not database:
database = self.current_database
statement = ddl.ListFunction(database, like=like, aggregate=True)
cur = self.raw_sql(statement)
result = self._get_udfs(cur, udf.ImpalaUDA)
cur.release()
return result
def _get_udfs(self, cur, klass):
def _to_type(x):
ibis_type = udf._impala_type_to_ibis(x.lower())
return dt.dtype(ibis_type)
tuples = cur.fetchall()
if len(tuples) > 0:
result = []
for tup in tuples:
out_type, sig = tup[:2]
name, types = _split_signature(sig)
types = _type_parser(types).types
inputs = []
for arg in types:
argm = _arg_type.match(arg)
var, simple = argm.groups()
if simple:
t = _to_type(simple)
inputs.append(t)
else:
t = _to_type(var)
inputs = rlz.listof(t)
# TODO
# inputs.append(varargs(t))
break
output = udf._impala_type_to_ibis(out_type.lower())
result.append(klass(inputs, output, name=name))
return result
else:
return []
def exists_udf(self, name: str, database: str | None = None) -> bool:
"""Checks if a given UDF exists within a specified database"""
return bool(self.list_udfs(database=database, like=name))
def exists_uda(self, name: str, database: str | None = None) -> bool:
"""Checks if a given UDAF exists within a specified database."""
return bool(self.list_udas(database=database, like=name))
def compute_stats(
self,
name: str,
database: str | None = None,
incremental: bool = False,
) -> None:
"""Issue a `COMPUTE STATS` command for a given table.
Parameters
----------
name
Can be fully qualified (with database name)
database
Database name
incremental
If True, issue COMPUTE INCREMENTAL STATS
"""
maybe_inc = 'INCREMENTAL ' if incremental else ''
cmd = f'COMPUTE {maybe_inc}STATS'
stmt = self._table_command(cmd, name, database=database)
self.raw_sql(stmt)
def invalidate_metadata(
self,
name: str | None = None,
database: str | None = None,
) -> None:
"""Issue an `INVALIDATE METADATA` command.
Optionally this applies to a specific table. See Impala documentation.
Parameters
----------
name
Table name. Can be fully qualified (with database)
database
Database name
"""
stmt = 'INVALIDATE METADATA'
if name is not None:
stmt = self._table_command(stmt, name, database=database)
self.raw_sql(stmt)
def refresh(self, name: str, database: str | None = None) -> None:
"""Reload HDFS block location metadata for a table.
This can be useful after ingesting data as part of an ETL pipeline, for
example.
Related to `INVALIDATE METADATA`. See Impala documentation for more.
Parameters
----------
name
Table name. Can be fully qualified (with database)
database
Database name
"""
# TODO(wesm): can this statement be cancelled?
stmt = self._table_command('REFRESH', name, database=database)
self.raw_sql(stmt)
def describe_formatted(
self,
name: str,
database: str | None = None,
) -> pd.DataFrame:
"""Retrieve the results of a `DESCRIBE FORMATTED` command.
See Impala documentation for more.
Parameters
----------
name
Table name. Can be fully qualified (with database)
database
Database name
"""
from .metadata import parse_metadata
stmt = self._table_command(
'DESCRIBE FORMATTED', name, database=database
)
result = self._exec_statement(stmt)
# Leave formatting to pandas
for c in result.columns:
result[c] = result[c].str.strip()
return parse_metadata(result)
def show_files(
self,
name: str,
database: str | None = None,
) -> pd.DataFrame:
"""Retrieve results of a `SHOW FILES` command for a table.
See Impala documentation for more.
Parameters
----------
name
Table name. Can be fully qualified (with database)
database
Database name
"""
stmt = self._table_command('SHOW FILES IN', name, database=database)
return self._exec_statement(stmt)
def list_partitions(self, name, database=None):
stmt = self._table_command('SHOW PARTITIONS', name, database=database)
return self._exec_statement(stmt)
def table_stats(self, name, database=None):
"""Return results of `SHOW TABLE STATS` for the table `name`."""
stmt = self._table_command('SHOW TABLE STATS', name, database=database)
return self._exec_statement(stmt)
def column_stats(self, name, database=None):
"""Return results of `SHOW COLUMN STATS` for the table `name`."""
stmt = self._table_command(
'SHOW COLUMN STATS', name, database=database
)
return self._exec_statement(stmt)
def _exec_statement(self, stmt):
return self.fetch_from_cursor(
self.raw_sql(stmt, results=True), schema=None
)
def _table_command(self, cmd, name, database=None):
qualified_name = self._fully_qualified_name(name, database)
return f'{cmd} {qualified_name}'
def _adapt_types(self, descr):
names = []
adapted_types = []
for col in descr:
names.append(col[0])
impala_typename = col[1]
typename = udf._impala_to_ibis_type[impala_typename.lower()]
if typename == 'decimal':
precision, scale = col[4:6]
adapted_types.append(dt.Decimal(precision, scale))
else:
adapted_types.append(typename)
return names, adapted_types
def write_dataframe(
self,
df: pd.DataFrame,
path: str,
format: Literal['csv'] = 'csv',
) -> Any:
"""Write a pandas DataFrame to indicated file path.
Parameters
----------
df
Pandas DataFrame
path
Absolute file path
format
File format
"""
writer = DataFrameWriter(self, df)
return writer.write_csv(path)
| <filename>ibis/backends/impala/__init__.py
"""Impala backend"""
from __future__ import annotations
import contextlib
import io
import operator
import re
import weakref
from posixpath import join as pjoin
from typing import Any
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
import numpy as np
import pandas as pd
import ibis.common.exceptions as com
import ibis.config
import ibis.expr.datatypes as dt
import ibis.expr.rules as rlz
import ibis.expr.schema as sch
import ibis.util as util
from ibis.backends.base.sql import BaseSQLBackend
from ibis.backends.base.sql.ddl import (
CTAS,
CreateDatabase,
CreateTableWithSchema,
CreateView,
DropDatabase,
DropTable,
DropView,
TruncateTable,
fully_qualified_re,
is_fully_qualified,
)
from ibis.config import options
from . import ddl, udf
from .client import ImpalaConnection, ImpalaDatabase, ImpalaTable
from .compat import HS2Error, ImpylaError
from .compiler import ImpalaCompiler
from .hdfs import HDFS, WebHDFS, hdfs_connect
from .pandas_interop import DataFrameWriter
from .udf import ( # noqa F408
aggregate_function,
scalar_function,
wrap_uda,
wrap_udf,
)
_HS2_TTypeId_to_dtype = {
'BOOLEAN': 'bool',
'TINYINT': 'int8',
'SMALLINT': 'int16',
'INT': 'int32',
'BIGINT': 'int64',
'TIMESTAMP': 'datetime64[ns]',
'FLOAT': 'float32',
'DOUBLE': 'float64',
'STRING': 'object',
'DECIMAL': 'object',
'BINARY': 'object',
'VARCHAR': 'object',
'CHAR': 'object',
}
def _split_signature(x):
name, rest = x.split('(', 1)
return name, rest[:-1]
_arg_type = re.compile(r'(.*)\.\.\.|([^\.]*)')
class _type_parser:
NORMAL, IN_PAREN = 0, 1
def __init__(self, value):
self.value = value
self.state = self.NORMAL
self.buf = io.StringIO()
self.types = []
for c in value:
self._step(c)
self._push()
def _push(self):
val = self.buf.getvalue().strip()
if val:
self.types.append(val)
self.buf = io.StringIO()
def _step(self, c):
if self.state == self.NORMAL:
if c == '(':
self.state = self.IN_PAREN
elif c == ',':
self._push()
return
elif self.state == self.IN_PAREN:
if c == ')':
self.state = self.NORMAL
self.buf.write(c)
def _chunks_to_pandas_array(chunks):
total_length = 0
have_nulls = False
for c in chunks:
total_length += len(c)
have_nulls = have_nulls or c.nulls.any()
type_ = chunks[0].data_type
numpy_type = _HS2_TTypeId_to_dtype[type_]
def fill_nonnull(target, chunks):
pos = 0
for c in chunks:
target[pos : pos + len(c)] = c.values
pos += len(c.values)
def fill(target, chunks, na_rep):
pos = 0
for c in chunks:
nulls = c.nulls.copy()
nulls.bytereverse()
bits = np.frombuffer(nulls.tobytes(), dtype='u1')
mask = np.unpackbits(bits).view(np.bool_)
k = len(c)
dest = target[pos : pos + k]
dest[:] = c.values
dest[mask[:k]] = na_rep
pos += k
if have_nulls:
if numpy_type in ('bool', 'datetime64[ns]'):
target = np.empty(total_length, dtype='O')
na_rep = np.nan
elif numpy_type.startswith('int'):
target = np.empty(total_length, dtype='f8')
na_rep = np.nan
else:
target = np.empty(total_length, dtype=numpy_type)
na_rep = np.nan
fill(target, chunks, na_rep)
else:
target = np.empty(total_length, dtype=numpy_type)
fill_nonnull(target, chunks)
return target
def _column_batches_to_dataframe(names, batches):
cols = {}
for name, chunks in zip(names, zip(*(b.columns for b in batches))):
cols[name] = _chunks_to_pandas_array(chunks)
return pd.DataFrame(cols, columns=names)
class Backend(BaseSQLBackend):
name = 'impala'
database_class = ImpalaDatabase
table_expr_class = ImpalaTable
HDFS = HDFS
WebHDFS = WebHDFS
compiler = ImpalaCompiler
def hdfs_connect(self, *args, **kwargs):
return hdfs_connect(*args, **kwargs)
def do_connect(
new_backend,
host='localhost',
port=21050,
database='default',
timeout=45,
use_ssl=False,
ca_cert=None,
user=None,
password=<PASSWORD>,
auth_mechanism='NOSASL',
kerberos_service_name='impala',
pool_size=8,
hdfs_client=None,
):
"""Create an Impala Backend for use with Ibis.
Parameters
----------
host
Host name of the impalad or HiveServer2 in Hive
port
Impala's HiveServer2 port
database
Default database when obtaining new cursors
timeout
Connection timeout in seconds when communicating with HiveServer2
use_ssl
Use SSL when connecting to HiveServer2
ca_cert
Local path to 3rd party CA certificate or copy of server
certificate for self-signed certificates. If SSL is enabled, but
this argument is ``None``, then certificate validation is skipped.
user
LDAP user to authenticate
password
<PASSWORD> to authenticate
auth_mechanism
{'NOSASL' <- default, 'PLAIN', 'GSSAPI', 'LDAP'}.
Use NOSASL for non-secured Impala connections. Use PLAIN for
non-secured Hive clusters. Use LDAP for LDAP authenticated
connections. Use GSSAPI for Kerberos-secured clusters.
kerberos_service_name
Specify particular impalad service principal.
Examples
--------
>>> import ibis
>>> import os
>>> hdfs_host = os.environ.get('IBIS_TEST_NN_HOST', 'localhost')
>>> hdfs_port = int(os.environ.get('IBIS_TEST_NN_PORT', 50070))
>>> impala_host = os.environ.get('IBIS_TEST_IMPALA_HOST', 'localhost')
>>> impala_port = int(os.environ.get('IBIS_TEST_IMPALA_PORT', 21050))
>>> hdfs = ibis.impala.hdfs_connect(host=hdfs_host, port=hdfs_port)
>>> hdfs # doctest: +ELLIPSIS
<ibis.filesystems.WebHDFS object at 0x...>
>>> client = ibis.impala.connect(
... host=impala_host,
... port=impala_port,
... hdfs_client=hdfs,
... )
>>> client # doctest: +ELLIPSIS
<ibis.backends.impala.Backend object at 0x...>
Returns
-------
Backend
Impala backend
"""
import hdfs
new_backend._temp_objects = set()
if hdfs_client is None or isinstance(hdfs_client, HDFS):
new_backend._hdfs = hdfs_client
elif isinstance(hdfs_client, hdfs.Client):
new_backend._hdfs = WebHDFS(hdfs_client)
else:
raise TypeError(hdfs_client)
params = {
'host': host,
'port': port,
'database': database,
'timeout': timeout,
'use_ssl': use_ssl,
'ca_cert': ca_cert,
'user': user,
'password': password,
'auth_mechanism': auth_mechanism,
'kerberos_service_name': kerberos_service_name,
}
new_backend.con = ImpalaConnection(pool_size=pool_size, **params)
new_backend._ensure_temp_db_exists()
@property
def version(self):
cursor = self.raw_sql('select version()')
result = cursor.fetchone()[0]
cursor.release()
return result
def register_options(self):
ibis.config.register_option(
'temp_db',
'__ibis_tmp',
'Database to use for temporary tables, views. functions, etc.',
)
ibis.config.register_option(
'temp_hdfs_path',
'/tmp/ibis',
'HDFS path for storage of temporary data',
)
def list_databases(self, like=None):
cur = self.raw_sql('SHOW DATABASES')
databases = self._get_list(cur)
cur.release()
return self._filter_with_like(databases, like)
def list_tables(self, like=None, database=None):
statement = 'SHOW TABLES'
if database is not None:
statement += f' IN {database}'
if like:
m = fully_qualified_re.match(like)
if m:
database, quoted, unquoted = m.groups()
like = quoted or unquoted
return self.list_tables(like=like, database=database)
statement += f" LIKE '{like}'"
return self._filter_with_like(
[row[0] for row in self.raw_sql(statement).fetchall()]
)
def fetch_from_cursor(self, cursor, schema):
batches = cursor.fetchall(columnar=True)
names = [x[0] for x in cursor.description]
df = _column_batches_to_dataframe(names, batches)
if schema:
return schema.apply_to(df)
return df
def _get_hdfs(self):
if self._hdfs is None:
raise com.IbisError(
'No HDFS connection; must pass connection '
'using the hdfs_client argument to '
'ibis.impala.connect'
)
return self._hdfs
def _set_hdfs(self, hdfs):
if not isinstance(hdfs, HDFS):
raise TypeError('must be HDFS instance')
self._hdfs = hdfs
hdfs = property(fget=_get_hdfs, fset=_set_hdfs)
@property
def kudu(self):
raise NotImplementedError(
"kudu support using kudu-python is no longer supported; "
"use impala facilities to manage kudu tables; "
"see https://kudu.apache.org/docs/kudu_impala_integration.html"
)
def close(self):
"""Close the connection and drop temporary objects."""
while self._temp_objects:
finalizer = self._temp_objects.pop()
with contextlib.suppress(HS2Error):
finalizer()
self.con.close()
def disable_codegen(self, disabled=True):
"""Turn off or on LLVM codegen in Impala query execution.
Parameters
----------
disabled
To disable codegen, pass with no argument or True. To enable
codegen, pass False.
"""
self.con.disable_codegen(disabled)
def _fully_qualified_name(self, name, database):
if is_fully_qualified(name):
return name
database = database or self.current_database
return f'{database}.`{name}`'
def _get_list(self, cur):
tuples = cur.fetchall()
return list(map(operator.itemgetter(0), tuples))
@util.deprecated(version='2.0', instead='a new connection to database')
def set_database(self, name):
# XXX The parent `Client` has a generic method that calls this same
# method in the backend. But for whatever reason calling this code from
# that method doesn't seem to work. Maybe `con` is a copy?
self.con.set_database(name)
@property
def current_database(self):
# XXX The parent `Client` has a generic method that calls this same
# method in the backend. But for whatever reason calling this code from
# that method doesn't seem to work. Maybe `con` is a copy?
return self.con.database
def create_database(self, name, path=None, force=False):
"""Create a new Impala database.
Parameters
----------
name
Database name
path
HDFS path where to store the database data; otherwise uses Impala
default
force
Forcibly create the database
"""
if path:
# explicit mkdir ensures the user own the dir rather than impala,
# which is easier for manual cleanup, if necessary
self.hdfs.mkdir(path)
statement = CreateDatabase(name, path=path, can_exist=force)
return self.raw_sql(statement)
def drop_database(self, name, force=False):
"""Drop an Impala database.
Parameters
----------
name
Database name
force
If False and there are any tables in this database, raises an
IntegrityError
"""
if not force or name in self.list_databases():
tables = self.list_tables(database=name)
udfs = self.list_udfs(database=name)
udas = self.list_udas(database=name)
else:
tables = []
udfs = []
udas = []
if force:
for table in tables:
util.log('Dropping {}'.format(f'{name}.{table}'))
self.drop_table_or_view(table, database=name)
for func in udfs:
util.log(f'Dropping function {func.name}({func.inputs})')
self.drop_udf(
func.name,
input_types=func.inputs,
database=name,
force=True,
)
for func in udas:
util.log(
'Dropping aggregate function {}({})'.format(
func.name, func.inputs
)
)
self.drop_uda(
func.name,
input_types=func.inputs,
database=name,
force=True,
)
else:
if len(tables) > 0 or len(udfs) > 0 or len(udas) > 0:
raise com.IntegrityError(
'Database {} must be empty before '
'being dropped, or set '
'force=True'.format(name)
)
statement = DropDatabase(name, must_exist=not force)
return self.raw_sql(statement)
def get_schema(
self,
table_name: str,
database: str | None = None,
) -> sch.Schema:
"""Return a Schema object for the indicated table and database.
Parameters
----------
table_name
Table name
database
Database name
Returns
-------
Schema
Ibis schema
"""
qualified_name = self._fully_qualified_name(table_name, database)
query = f'DESCRIBE {qualified_name}'
# only pull out the first two columns which are names and types
pairs = [row[:2] for row in self.con.fetchall(query)]
names, types = zip(*pairs)
ibis_types = [udf.parse_type(type.lower()) for type in types]
names = [name.lower() for name in names]
return sch.Schema(names, ibis_types)
@property
def client_options(self):
return self.con.options
def get_options(self):
"""Return current query options for the Impala session."""
return dict(row[:2] for row in self.con.fetchall("SET"))
def set_options(self, options):
self.con.set_options(options)
def reset_options(self):
# Must nuke all cursors
raise NotImplementedError
def set_compression_codec(self, codec):
if codec is None:
codec = 'none'
else:
codec = codec.lower()
if codec not in ('none', 'gzip', 'snappy'):
raise ValueError(f'Unknown codec: {codec}')
self.set_options({'COMPRESSION_CODEC': codec})
def create_view(self, name, expr, database=None):
"""Create an Impala view from a table expression.
Parameters
----------
name
View name
expr : ibis TableExpr
Ibis table expression
database
Database name
"""
ast = self.compiler.to_ast(expr)
select = ast.queries[0]
statement = CreateView(name, select, database=database)
return self.raw_sql(statement)
def drop_view(self, name, database=None, force=False):
"""Drop an Impala view.
Parameters
----------
name
Table name
database
Database
force
Database may throw exception if table does not exist
"""
statement = DropView(name, database=database, must_exist=not force)
return self.raw_sql(statement)
@contextlib.contextmanager
def _setup_insert(self, obj):
if isinstance(obj, pd.DataFrame):
with DataFrameWriter(self, obj) as writer:
yield writer.delimited_table(writer.write_temp_csv())
else:
yield obj
def create_table(
self,
table_name,
obj=None,
schema=None,
database=None,
external=False,
force=False,
# HDFS options
format='parquet',
location=None,
partition=None,
like_parquet=None,
):
"""Create a new table in Impala using an Ibis table expression.
This is currently designed for tables whose data is stored in HDFS.
Parameters
----------
table_name
Table name
obj
If passed, creates table from select statement results
schema
Mutually exclusive with obj, creates an empty table with a
particular schema
database
Database name
force
Do not create table if table with indicated name already exists
external
Create an external table; Impala will not delete the underlying
data when the table is dropped
format
File format
location
Specify the directory location where Impala reads and writes files
for the table
partition
Must pass a schema to use this. Cannot partition from an
expression.
like_parquet
Can specify instead of a schema
Examples
--------
>>> con.create_table('new_table_name', table_expr) # doctest: +SKIP
"""
if like_parquet is not None:
raise NotImplementedError
if obj is not None:
with self._setup_insert(obj) as to_insert:
ast = self.compiler.to_ast(to_insert)
select = ast.queries[0]
self.raw_sql(
CTAS(
table_name,
select,
database=database,
can_exist=force,
format=format,
external=external,
partition=partition,
path=location,
)
)
elif schema is not None:
self.raw_sql(
CreateTableWithSchema(
table_name,
schema,
database=database,
format=format,
can_exist=force,
external=external,
path=location,
partition=partition,
)
)
else:
raise com.IbisError('Must pass obj or schema')
def avro_file(
self,
hdfs_dir,
avro_schema,
name=None,
database=None,
external=True,
persist=False,
):
"""Create a table to read a collection of Avro data.
Parameters
----------
hdfs_dir
Absolute HDFS path to directory containing avro files
avro_schema
The Avro schema for the data as a Python dict
name
Table name
database
Database name
external
Whether the table is external
persist
Persist the table
Returns
-------
ImpalaTable
Impala table expression
"""
name, database = self._get_concrete_table_path(
name, database, persist=persist
)
stmt = ddl.CreateTableAvro(
name, hdfs_dir, avro_schema, database=database, external=external
)
self.raw_sql(stmt)
return self._wrap_new_table(name, database, persist)
def delimited_file(
self,
hdfs_dir,
schema,
name=None,
database=None,
delimiter=',',
na_rep=None,
escapechar=None,
lineterminator=None,
external=True,
persist=False,
):
"""Interpret delimited text files as an Ibis table expression.
See the `parquet_file` method for more details on what happens under
the hood.
Parameters
----------
hdfs_dir
HDFS directory containing delimited text files
schema
Ibis schema
name
Name for temporary or persistent table; otherwise random names are
generated
database
Database to create the table in
delimiter
Character used to delimit columns
escapechar
Character used to escape special characters
lineterminator
Character used to delimit lines
external
Create table as EXTERNAL (data will not be deleted on drop). Not
that if persist=False and external=False, whatever data you
reference will be deleted
persist
If True, do not delete the table upon garbage collection of ibis
table object
Returns
-------
ImpalaTable
Impala table expression
"""
name, database = self._get_concrete_table_path(
name, database, persist=persist
)
stmt = ddl.CreateTableDelimited(
name,
hdfs_dir,
schema,
database=database,
delimiter=delimiter,
external=external,
na_rep=na_rep,
lineterminator=lineterminator,
escapechar=escapechar,
)
self.raw_sql(stmt)
return self._wrap_new_table(name, database, persist)
def parquet_file(
self,
hdfs_dir,
schema=None,
name=None,
database=None,
external=True,
like_file=None,
like_table=None,
persist=False,
):
"""Make indicated parquet file in HDFS available as an Ibis table.
The table created can be optionally named and persisted, otherwise a
unique name will be generated. Temporarily, for any non-persistent
external table created by Ibis we will attempt to drop it when the
underlying object is garbage collected (or the Python interpreter shuts
down normally).
Parameters
----------
hdfs_dir
Path in HDFS
schema
If no schema provided, and neither of the like_* argument is
passed, one will be inferred from one of the parquet files in the
directory.
like_file
Absolute path to Parquet file in HDFS to use for schema
definitions. An alternative to having to supply an explicit schema
like_table
Fully scoped and escaped string to an Impala table whose schema we
will use for the newly created table.
name
Random unique name generated otherwise
database
Database to create the (possibly temporary) table in
external
If a table is external, the referenced data will not be deleted
when the table is dropped in Impala. Otherwise (external=False)
Impala takes ownership of the Parquet file.
persist
Do not drop the table during garbage collection
Returns
-------
ImpalaTable
Impala table expression
"""
name, database = self._get_concrete_table_path(
name, database, persist=persist
)
# If no schema provided, need to find some absolute path to a file in
# the HDFS directory
if like_file is None and like_table is None and schema is None:
file_name = self.hdfs._find_any_file(hdfs_dir)
like_file = pjoin(hdfs_dir, file_name)
stmt = ddl.CreateTableParquet(
name,
hdfs_dir,
schema=schema,
database=database,
example_file=like_file,
example_table=like_table,
external=external,
can_exist=False,
)
self.raw_sql(stmt)
return self._wrap_new_table(name, database, persist)
def _get_concrete_table_path(self, name, database, persist=False):
if not persist:
if name is None:
name = f'__ibis_tmp_{util.guid()}'
if database is None:
self._ensure_temp_db_exists()
database = options.impala.temp_db
return name, database
else:
if name is None:
raise com.IbisError('Must pass table name if persist=True')
return name, database
def _ensure_temp_db_exists(self):
# TODO: session memoize to avoid unnecessary `SHOW DATABASES` calls
name, path = options.impala.temp_db, options.impala.temp_hdfs_path
if name not in self.list_databases():
if self._hdfs is None:
print(
'Without an HDFS connection, certain functionality'
' may be disabled'
)
else:
self.create_database(name, path=path, force=True)
def _drop_table(self, name: str) -> None:
# database might have been dropped, so we suppress the
# corresponding Exception
with contextlib.suppress(ImpylaError):
self.drop_table(name)
def _wrap_new_table(self, name, database, persist):
qualified_name = self._fully_qualified_name(name, database)
t = self.table(qualified_name)
if not persist:
self._temp_objects.add(
weakref.finalize(t, self._drop_table, qualified_name)
)
# Compute number of rows in table for better default query planning
cardinality = t.count().execute()
set_card = (
"alter table {} set tblproperties('numRows'='{}', "
"'STATS_GENERATED_VIA_STATS_TASK' = 'true')".format(
qualified_name, cardinality
)
)
self.raw_sql(set_card)
return t
def text_file(self, hdfs_path, column_name='value'):
"""Interpret text data as a table with a single string column."""
def insert(
self,
table_name,
obj=None,
database=None,
overwrite=False,
partition=None,
values=None,
validate=True,
):
"""Insert data into an existing table.
See
[`ImpalaTable.insert`][ibis.backends.impala.client.ImpalaTable.insert]
for parameters.
Examples
--------
>>> table = 'my_table'
>>> con.insert(table, table_expr) # doctest: +SKIP
Completely overwrite contents
>>> con.insert(table, table_expr, overwrite=True) # doctest: +SKIP
"""
table = self.table(table_name, database=database)
return table.insert(
obj=obj,
overwrite=overwrite,
partition=partition,
values=values,
validate=validate,
)
def load_data(
self,
table_name,
path,
database=None,
overwrite=False,
partition=None,
):
"""Loads data into an Impala table by physically moving data files."""
table = self.table(table_name, database=database)
return table.load_data(path, overwrite=overwrite, partition=partition)
def drop_table(self, table_name, database=None, force=False):
"""Drop an Impala table.
Parameters
----------
table_name
Table name
database
Database name
force
Database may throw exception if table does not exist
Examples
--------
>>> table = 'my_table'
>>> db = 'operations'
>>> con.drop_table(table, database=db, force=True) # doctest: +SKIP
"""
statement = DropTable(
table_name, database=database, must_exist=not force
)
self.raw_sql(statement)
def truncate_table(self, table_name, database=None):
"""Delete all rows from an existing table.
Parameters
----------
table_name
Table name
database
Database name
"""
statement = TruncateTable(table_name, database=database)
self.raw_sql(statement)
def drop_table_or_view(self, name, database=None, force=False):
"""Drop view or table."""
try:
self.drop_table(name, database=database)
except Exception as e:
try:
self.drop_view(name, database=database)
except Exception:
raise e
def cache_table(self, table_name, database=None, pool='default'):
"""Caches a table in cluster memory in the given pool.
Parameters
----------
table_name
Table name
database
Database name
pool
The name of the pool in which to cache the table
Examples
--------
>>> table = 'my_table'
>>> db = 'operations'
>>> pool = 'op_4GB_pool'
>>> con.cache_table('my_table', database=db, pool=pool) # noqa: E501 # doctest: +SKIP
"""
statement = ddl.CacheTable(table_name, database=database, pool=pool)
self.raw_sql(statement)
def _get_schema_using_query(self, query):
cur = self.raw_sql(query)
# resets the state of the cursor and closes operation
cur.fetchall()
names, ibis_types = self._adapt_types(cur.description)
cur.release()
# per #321; most Impala tables will be lower case already, but Avro
# data, depending on the version of Impala, might have field names in
# the metastore cased according to the explicit case in the declared
# avro schema. This is very annoying, so it's easier to just conform on
# all lowercase fields from Impala.
names = [x.lower() for x in names]
return sch.Schema(names, ibis_types)
def create_function(self, func, name=None, database=None):
"""Create a function within Impala.
Parameters
----------
func
UDF or UDAF
name
Function name
database
Database name
"""
if name is None:
name = func.name
database = database or self.current_database
if isinstance(func, udf.ImpalaUDF):
stmt = ddl.CreateUDF(func, name=name, database=database)
elif isinstance(func, udf.ImpalaUDA):
stmt = ddl.CreateUDA(func, name=name, database=database)
else:
raise TypeError(func)
self.raw_sql(stmt)
def drop_udf(
self,
name,
input_types=None,
database=None,
force=False,
aggregate=False,
):
"""Drop a UDF.
If only name is given, this will search for the relevant UDF and drop
it. To delete an overloaded UDF, give only a name and force=True
Parameters
----------
name
Function name
input_types
Input types
force
Must be set to `True` to drop overloaded UDFs
database
Database name
aggregate
Whether the function is an aggregate
"""
if not input_types:
if not database:
database = self.current_database
result = self.list_udfs(database=database, like=name)
if len(result) > 1:
if force:
for func in result:
self._drop_single_function(
func.name,
func.inputs,
database=database,
aggregate=aggregate,
)
return
else:
raise Exception(
"More than one function "
+ f"with {name} found."
+ "Please specify force=True"
)
elif len(result) == 1:
func = result.pop()
self._drop_single_function(
func.name,
func.inputs,
database=database,
aggregate=aggregate,
)
return
else:
raise Exception(f"No function found with name {name}")
self._drop_single_function(
name, input_types, database=database, aggregate=aggregate
)
def drop_uda(self, name, input_types=None, database=None, force=False):
"""Drop an aggregate function."""
return self.drop_udf(
name, input_types=input_types, database=database, force=force
)
def _drop_single_function(
self, name, input_types, database=None, aggregate=False
):
stmt = ddl.DropFunction(
name,
input_types,
must_exist=False,
aggregate=aggregate,
database=database,
)
self.raw_sql(stmt)
def _drop_all_functions(self, database):
udfs = self.list_udfs(database=database)
for fnct in udfs:
stmt = ddl.DropFunction(
fnct.name,
fnct.inputs,
must_exist=False,
aggregate=False,
database=database,
)
self.raw_sql(stmt)
udafs = self.list_udas(database=database)
for udaf in udafs:
stmt = ddl.DropFunction(
udaf.name,
udaf.inputs,
must_exist=False,
aggregate=True,
database=database,
)
self.raw_sql(stmt)
def list_udfs(self, database=None, like=None):
"""Lists all UDFs associated with given database."""
if not database:
database = self.current_database
statement = ddl.ListFunction(database, like=like, aggregate=False)
cur = self.raw_sql(statement)
result = self._get_udfs(cur, udf.ImpalaUDF)
cur.release()
return result
def list_udas(self, database=None, like=None):
"""Lists all UDAFs associated with a given database."""
if not database:
database = self.current_database
statement = ddl.ListFunction(database, like=like, aggregate=True)
cur = self.raw_sql(statement)
result = self._get_udfs(cur, udf.ImpalaUDA)
cur.release()
return result
def _get_udfs(self, cur, klass):
def _to_type(x):
ibis_type = udf._impala_type_to_ibis(x.lower())
return dt.dtype(ibis_type)
tuples = cur.fetchall()
if len(tuples) > 0:
result = []
for tup in tuples:
out_type, sig = tup[:2]
name, types = _split_signature(sig)
types = _type_parser(types).types
inputs = []
for arg in types:
argm = _arg_type.match(arg)
var, simple = argm.groups()
if simple:
t = _to_type(simple)
inputs.append(t)
else:
t = _to_type(var)
inputs = rlz.listof(t)
# TODO
# inputs.append(varargs(t))
break
output = udf._impala_type_to_ibis(out_type.lower())
result.append(klass(inputs, output, name=name))
return result
else:
return []
def exists_udf(self, name: str, database: str | None = None) -> bool:
"""Checks if a given UDF exists within a specified database"""
return bool(self.list_udfs(database=database, like=name))
def exists_uda(self, name: str, database: str | None = None) -> bool:
"""Checks if a given UDAF exists within a specified database."""
return bool(self.list_udas(database=database, like=name))
def compute_stats(
self,
name: str,
database: str | None = None,
incremental: bool = False,
) -> None:
"""Issue a `COMPUTE STATS` command for a given table.
Parameters
----------
name
Can be fully qualified (with database name)
database
Database name
incremental
If True, issue COMPUTE INCREMENTAL STATS
"""
maybe_inc = 'INCREMENTAL ' if incremental else ''
cmd = f'COMPUTE {maybe_inc}STATS'
stmt = self._table_command(cmd, name, database=database)
self.raw_sql(stmt)
def invalidate_metadata(
self,
name: str | None = None,
database: str | None = None,
) -> None:
"""Issue an `INVALIDATE METADATA` command.
Optionally this applies to a specific table. See Impala documentation.
Parameters
----------
name
Table name. Can be fully qualified (with database)
database
Database name
"""
stmt = 'INVALIDATE METADATA'
if name is not None:
stmt = self._table_command(stmt, name, database=database)
self.raw_sql(stmt)
def refresh(self, name: str, database: str | None = None) -> None:
"""Reload HDFS block location metadata for a table.
This can be useful after ingesting data as part of an ETL pipeline, for
example.
Related to `INVALIDATE METADATA`. See Impala documentation for more.
Parameters
----------
name
Table name. Can be fully qualified (with database)
database
Database name
"""
# TODO(wesm): can this statement be cancelled?
stmt = self._table_command('REFRESH', name, database=database)
self.raw_sql(stmt)
def describe_formatted(
self,
name: str,
database: str | None = None,
) -> pd.DataFrame:
"""Retrieve the results of a `DESCRIBE FORMATTED` command.
See Impala documentation for more.
Parameters
----------
name
Table name. Can be fully qualified (with database)
database
Database name
"""
from .metadata import parse_metadata
stmt = self._table_command(
'DESCRIBE FORMATTED', name, database=database
)
result = self._exec_statement(stmt)
# Leave formatting to pandas
for c in result.columns:
result[c] = result[c].str.strip()
return parse_metadata(result)
def show_files(
self,
name: str,
database: str | None = None,
) -> pd.DataFrame:
"""Retrieve results of a `SHOW FILES` command for a table.
See Impala documentation for more.
Parameters
----------
name
Table name. Can be fully qualified (with database)
database
Database name
"""
stmt = self._table_command('SHOW FILES IN', name, database=database)
return self._exec_statement(stmt)
def list_partitions(self, name, database=None):
stmt = self._table_command('SHOW PARTITIONS', name, database=database)
return self._exec_statement(stmt)
def table_stats(self, name, database=None):
"""Return results of `SHOW TABLE STATS` for the table `name`."""
stmt = self._table_command('SHOW TABLE STATS', name, database=database)
return self._exec_statement(stmt)
def column_stats(self, name, database=None):
"""Return results of `SHOW COLUMN STATS` for the table `name`."""
stmt = self._table_command(
'SHOW COLUMN STATS', name, database=database
)
return self._exec_statement(stmt)
def _exec_statement(self, stmt):
return self.fetch_from_cursor(
self.raw_sql(stmt, results=True), schema=None
)
def _table_command(self, cmd, name, database=None):
qualified_name = self._fully_qualified_name(name, database)
return f'{cmd} {qualified_name}'
def _adapt_types(self, descr):
names = []
adapted_types = []
for col in descr:
names.append(col[0])
impala_typename = col[1]
typename = udf._impala_to_ibis_type[impala_typename.lower()]
if typename == 'decimal':
precision, scale = col[4:6]
adapted_types.append(dt.Decimal(precision, scale))
else:
adapted_types.append(typename)
return names, adapted_types
def write_dataframe(
self,
df: pd.DataFrame,
path: str,
format: Literal['csv'] = 'csv',
) -> Any:
"""Write a pandas DataFrame to indicated file path.
Parameters
----------
df
Pandas DataFrame
path
Absolute file path
format
File format
"""
writer = DataFrameWriter(self, df)
return writer.write_csv(path)
| en | 0.638286 | Impala backend # noqa F408 Create an Impala Backend for use with Ibis. Parameters ---------- host Host name of the impalad or HiveServer2 in Hive port Impala's HiveServer2 port database Default database when obtaining new cursors timeout Connection timeout in seconds when communicating with HiveServer2 use_ssl Use SSL when connecting to HiveServer2 ca_cert Local path to 3rd party CA certificate or copy of server certificate for self-signed certificates. If SSL is enabled, but this argument is ``None``, then certificate validation is skipped. user LDAP user to authenticate password <PASSWORD> to authenticate auth_mechanism {'NOSASL' <- default, 'PLAIN', 'GSSAPI', 'LDAP'}. Use NOSASL for non-secured Impala connections. Use PLAIN for non-secured Hive clusters. Use LDAP for LDAP authenticated connections. Use GSSAPI for Kerberos-secured clusters. kerberos_service_name Specify particular impalad service principal. Examples -------- >>> import ibis >>> import os >>> hdfs_host = os.environ.get('IBIS_TEST_NN_HOST', 'localhost') >>> hdfs_port = int(os.environ.get('IBIS_TEST_NN_PORT', 50070)) >>> impala_host = os.environ.get('IBIS_TEST_IMPALA_HOST', 'localhost') >>> impala_port = int(os.environ.get('IBIS_TEST_IMPALA_PORT', 21050)) >>> hdfs = ibis.impala.hdfs_connect(host=hdfs_host, port=hdfs_port) >>> hdfs # doctest: +ELLIPSIS <ibis.filesystems.WebHDFS object at 0x...> >>> client = ibis.impala.connect( ... host=impala_host, ... port=impala_port, ... hdfs_client=hdfs, ... ) >>> client # doctest: +ELLIPSIS <ibis.backends.impala.Backend object at 0x...> Returns ------- Backend Impala backend Close the connection and drop temporary objects. Turn off or on LLVM codegen in Impala query execution. Parameters ---------- disabled To disable codegen, pass with no argument or True. To enable codegen, pass False. # XXX The parent `Client` has a generic method that calls this same # method in the backend. But for whatever reason calling this code from # that method doesn't seem to work. Maybe `con` is a copy? # XXX The parent `Client` has a generic method that calls this same # method in the backend. But for whatever reason calling this code from # that method doesn't seem to work. Maybe `con` is a copy? Create a new Impala database. Parameters ---------- name Database name path HDFS path where to store the database data; otherwise uses Impala default force Forcibly create the database # explicit mkdir ensures the user own the dir rather than impala, # which is easier for manual cleanup, if necessary Drop an Impala database. Parameters ---------- name Database name force If False and there are any tables in this database, raises an IntegrityError Return a Schema object for the indicated table and database. Parameters ---------- table_name Table name database Database name Returns ------- Schema Ibis schema # only pull out the first two columns which are names and types Return current query options for the Impala session. # Must nuke all cursors Create an Impala view from a table expression. Parameters ---------- name View name expr : ibis TableExpr Ibis table expression database Database name Drop an Impala view. Parameters ---------- name Table name database Database force Database may throw exception if table does not exist # HDFS options Create a new table in Impala using an Ibis table expression. This is currently designed for tables whose data is stored in HDFS. Parameters ---------- table_name Table name obj If passed, creates table from select statement results schema Mutually exclusive with obj, creates an empty table with a particular schema database Database name force Do not create table if table with indicated name already exists external Create an external table; Impala will not delete the underlying data when the table is dropped format File format location Specify the directory location where Impala reads and writes files for the table partition Must pass a schema to use this. Cannot partition from an expression. like_parquet Can specify instead of a schema Examples -------- >>> con.create_table('new_table_name', table_expr) # doctest: +SKIP Create a table to read a collection of Avro data. Parameters ---------- hdfs_dir Absolute HDFS path to directory containing avro files avro_schema The Avro schema for the data as a Python dict name Table name database Database name external Whether the table is external persist Persist the table Returns ------- ImpalaTable Impala table expression Interpret delimited text files as an Ibis table expression. See the `parquet_file` method for more details on what happens under the hood. Parameters ---------- hdfs_dir HDFS directory containing delimited text files schema Ibis schema name Name for temporary or persistent table; otherwise random names are generated database Database to create the table in delimiter Character used to delimit columns escapechar Character used to escape special characters lineterminator Character used to delimit lines external Create table as EXTERNAL (data will not be deleted on drop). Not that if persist=False and external=False, whatever data you reference will be deleted persist If True, do not delete the table upon garbage collection of ibis table object Returns ------- ImpalaTable Impala table expression Make indicated parquet file in HDFS available as an Ibis table. The table created can be optionally named and persisted, otherwise a unique name will be generated. Temporarily, for any non-persistent external table created by Ibis we will attempt to drop it when the underlying object is garbage collected (or the Python interpreter shuts down normally). Parameters ---------- hdfs_dir Path in HDFS schema If no schema provided, and neither of the like_* argument is passed, one will be inferred from one of the parquet files in the directory. like_file Absolute path to Parquet file in HDFS to use for schema definitions. An alternative to having to supply an explicit schema like_table Fully scoped and escaped string to an Impala table whose schema we will use for the newly created table. name Random unique name generated otherwise database Database to create the (possibly temporary) table in external If a table is external, the referenced data will not be deleted when the table is dropped in Impala. Otherwise (external=False) Impala takes ownership of the Parquet file. persist Do not drop the table during garbage collection Returns ------- ImpalaTable Impala table expression # If no schema provided, need to find some absolute path to a file in # the HDFS directory # TODO: session memoize to avoid unnecessary `SHOW DATABASES` calls # database might have been dropped, so we suppress the # corresponding Exception # Compute number of rows in table for better default query planning Interpret text data as a table with a single string column. Insert data into an existing table. See [`ImpalaTable.insert`][ibis.backends.impala.client.ImpalaTable.insert] for parameters. Examples -------- >>> table = 'my_table' >>> con.insert(table, table_expr) # doctest: +SKIP Completely overwrite contents >>> con.insert(table, table_expr, overwrite=True) # doctest: +SKIP Loads data into an Impala table by physically moving data files. Drop an Impala table. Parameters ---------- table_name Table name database Database name force Database may throw exception if table does not exist Examples -------- >>> table = 'my_table' >>> db = 'operations' >>> con.drop_table(table, database=db, force=True) # doctest: +SKIP Delete all rows from an existing table. Parameters ---------- table_name Table name database Database name Drop view or table. Caches a table in cluster memory in the given pool. Parameters ---------- table_name Table name database Database name pool The name of the pool in which to cache the table Examples -------- >>> table = 'my_table' >>> db = 'operations' >>> pool = 'op_4GB_pool' >>> con.cache_table('my_table', database=db, pool=pool) # noqa: E501 # doctest: +SKIP # resets the state of the cursor and closes operation # per #321; most Impala tables will be lower case already, but Avro # data, depending on the version of Impala, might have field names in # the metastore cased according to the explicit case in the declared # avro schema. This is very annoying, so it's easier to just conform on # all lowercase fields from Impala. Create a function within Impala. Parameters ---------- func UDF or UDAF name Function name database Database name Drop a UDF. If only name is given, this will search for the relevant UDF and drop it. To delete an overloaded UDF, give only a name and force=True Parameters ---------- name Function name input_types Input types force Must be set to `True` to drop overloaded UDFs database Database name aggregate Whether the function is an aggregate Drop an aggregate function. Lists all UDFs associated with given database. Lists all UDAFs associated with a given database. # TODO # inputs.append(varargs(t)) Checks if a given UDF exists within a specified database Checks if a given UDAF exists within a specified database. Issue a `COMPUTE STATS` command for a given table. Parameters ---------- name Can be fully qualified (with database name) database Database name incremental If True, issue COMPUTE INCREMENTAL STATS Issue an `INVALIDATE METADATA` command. Optionally this applies to a specific table. See Impala documentation. Parameters ---------- name Table name. Can be fully qualified (with database) database Database name Reload HDFS block location metadata for a table. This can be useful after ingesting data as part of an ETL pipeline, for example. Related to `INVALIDATE METADATA`. See Impala documentation for more. Parameters ---------- name Table name. Can be fully qualified (with database) database Database name # TODO(wesm): can this statement be cancelled? Retrieve the results of a `DESCRIBE FORMATTED` command. See Impala documentation for more. Parameters ---------- name Table name. Can be fully qualified (with database) database Database name # Leave formatting to pandas Retrieve results of a `SHOW FILES` command for a table. See Impala documentation for more. Parameters ---------- name Table name. Can be fully qualified (with database) database Database name Return results of `SHOW TABLE STATS` for the table `name`. Return results of `SHOW COLUMN STATS` for the table `name`. Write a pandas DataFrame to indicated file path. Parameters ---------- df Pandas DataFrame path Absolute file path format File format | 1.803029 | 2 |
cvxpy/atoms/affine/diag.py | NunoEdgarGFlowHub/cvxpy | 0 | 6632897 | <gh_stars>0
"""
Copyright 2013 <NAME>
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.atoms.affine.affine_atom import AffAtom
from cvxpy.atoms.affine.vec import vec
import cvxpy.lin_ops.lin_utils as lu
import numpy as np
def diag(expr):
"""Extracts the diagonal from a matrix or makes a vector a diagonal matrix.
Parameters
----------
expr : Expression or numeric constant
A vector or square matrix.
Returns
-------
Expression
An Expression representing the diagonal vector/matrix.
"""
expr = AffAtom.cast_to_const(expr)
if expr.is_vector():
return diag_vec(vec(expr))
elif expr.ndim == 2 and expr.shape[0] == expr.shape[1]:
return diag_mat(expr)
else:
raise ValueError("Argument to diag must be a vector or square matrix.")
class diag_vec(AffAtom):
"""Converts a vector into a diagonal matrix.
"""
def __init__(self, expr):
super(diag_vec, self).__init__(expr)
def numeric(self, values):
"""Convert the vector constant into a diagonal matrix.
"""
return np.diag(values[0])
def shape_from_args(self):
"""A square matrix.
"""
rows = self.args[0].shape[0]
return (rows, rows)
def is_symmetric(self):
"""Is the expression symmetric?
"""
return True
def is_hermitian(self):
"""Is the expression symmetric?
"""
return True
@staticmethod
def graph_implementation(arg_objs, shape, data=None):
"""Convolve two vectors.
Parameters
----------
arg_objs : list
LinExpr for each argument.
shape : tuple
The shape of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
return (lu.diag_vec(arg_objs[0]), [])
class diag_mat(AffAtom):
"""Extracts the diagonal from a square matrix.
"""
def __init__(self, expr):
super(diag_mat, self).__init__(expr)
@AffAtom.numpy_numeric
def numeric(self, values):
"""Extract the diagonal from a square matrix constant.
"""
# The return type in numpy versions < 1.10 was ndarray.
v = np.diag(values[0])
if isinstance(v, np.matrix):
v = v.A[0]
return v
def shape_from_args(self):
"""A column vector.
"""
rows, _ = self.args[0].shape
return (rows,)
@staticmethod
def graph_implementation(arg_objs, shape, data=None):
"""Extracts the diagonal of a matrix.
Parameters
----------
arg_objs : list
LinExpr for each argument.
shape : tuple
The shape of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
return (lu.diag_mat(arg_objs[0]), [])
| """
Copyright 2013 <NAME>
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.atoms.affine.affine_atom import AffAtom
from cvxpy.atoms.affine.vec import vec
import cvxpy.lin_ops.lin_utils as lu
import numpy as np
def diag(expr):
"""Extracts the diagonal from a matrix or makes a vector a diagonal matrix.
Parameters
----------
expr : Expression or numeric constant
A vector or square matrix.
Returns
-------
Expression
An Expression representing the diagonal vector/matrix.
"""
expr = AffAtom.cast_to_const(expr)
if expr.is_vector():
return diag_vec(vec(expr))
elif expr.ndim == 2 and expr.shape[0] == expr.shape[1]:
return diag_mat(expr)
else:
raise ValueError("Argument to diag must be a vector or square matrix.")
class diag_vec(AffAtom):
"""Converts a vector into a diagonal matrix.
"""
def __init__(self, expr):
super(diag_vec, self).__init__(expr)
def numeric(self, values):
"""Convert the vector constant into a diagonal matrix.
"""
return np.diag(values[0])
def shape_from_args(self):
"""A square matrix.
"""
rows = self.args[0].shape[0]
return (rows, rows)
def is_symmetric(self):
"""Is the expression symmetric?
"""
return True
def is_hermitian(self):
"""Is the expression symmetric?
"""
return True
@staticmethod
def graph_implementation(arg_objs, shape, data=None):
"""Convolve two vectors.
Parameters
----------
arg_objs : list
LinExpr for each argument.
shape : tuple
The shape of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
return (lu.diag_vec(arg_objs[0]), [])
class diag_mat(AffAtom):
"""Extracts the diagonal from a square matrix.
"""
def __init__(self, expr):
super(diag_mat, self).__init__(expr)
@AffAtom.numpy_numeric
def numeric(self, values):
"""Extract the diagonal from a square matrix constant.
"""
# The return type in numpy versions < 1.10 was ndarray.
v = np.diag(values[0])
if isinstance(v, np.matrix):
v = v.A[0]
return v
def shape_from_args(self):
"""A column vector.
"""
rows, _ = self.args[0].shape
return (rows,)
@staticmethod
def graph_implementation(arg_objs, shape, data=None):
"""Extracts the diagonal of a matrix.
Parameters
----------
arg_objs : list
LinExpr for each argument.
shape : tuple
The shape of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
return (lu.diag_mat(arg_objs[0]), []) | en | 0.790343 | Copyright 2013 <NAME> This file is part of CVXPY. CVXPY is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. CVXPY is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with CVXPY. If not, see <http://www.gnu.org/licenses/>. Extracts the diagonal from a matrix or makes a vector a diagonal matrix. Parameters ---------- expr : Expression or numeric constant A vector or square matrix. Returns ------- Expression An Expression representing the diagonal vector/matrix. Converts a vector into a diagonal matrix. Convert the vector constant into a diagonal matrix. A square matrix. Is the expression symmetric? Is the expression symmetric? Convolve two vectors. Parameters ---------- arg_objs : list LinExpr for each argument. shape : tuple The shape of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) Extracts the diagonal from a square matrix. Extract the diagonal from a square matrix constant. # The return type in numpy versions < 1.10 was ndarray. A column vector. Extracts the diagonal of a matrix. Parameters ---------- arg_objs : list LinExpr for each argument. shape : tuple The shape of the resulting expression. data : Additional data required by the atom. Returns ------- tuple (LinOp for objective, list of constraints) | 2.988561 | 3 |
pydle/client.py | benshu/pydle | 0 | 6632898 | <reponame>benshu/pydle<filename>pydle/client.py<gh_stars>0
## client.py
# Basic IRC client implementation.
import time
import datetime
import itertools
import logging
from . import async
from . import connection
from . import protocol
__all__ = [ 'Error', 'AlreadyInChannel', 'NotInChannel', 'BasicClient' ]
PING_TIMEOUT = 300
DEFAULT_NICKNAME = '<unregistered>'
class Error(Exception):
""" Base class for all pydle errors. """
pass
class NotInChannel(Error):
def __init__(self, channel):
super().__init__('Not in channel: {}'.format(channel))
self.channel = channel
class AlreadyInChannel(Error):
def __init__(self, channel):
super().__init__('Already in channel: {}'.format(channel))
self.channel = channel
class BasicClient:
"""
Base IRC client class.
This class on its own is not complete: in order to be able to run properly, _has_message, _parse_message and _create_message have to be overloaded.
"""
RECONNECT_ON_ERROR = True
RECONNECT_MAX_ATTEMPTS = 3
RECONNECT_DELAYED = True
RECONNECT_DELAYS = [0, 5, 10, 30, 120, 600]
def __init__(self, nickname, fallback_nicknames=[], username=None, realname=None, **kwargs):
""" Create a client. """
self._nicknames = [nickname] + fallback_nicknames
self.username = username or nickname.lower()
self.realname = realname or nickname
self.eventloop = None
self.own_eventloop = True
self._reset_connection_attributes()
self._reset_attributes()
if kwargs:
self.logger.warning('Unused arguments: %s', ', '.join(kwargs.keys()))
def _reset_attributes(self):
""" Reset attributes. """
# Record-keeping.
self.channels = {}
self.users = {}
# Low-level data stuff.
self._receive_buffer = b''
self._pending = {}
self._handler_top_level = False
self._ping_checker_handle = None
# Misc.
self.logger = logging.getLogger(__name__)
# Public connection attributes.
self.nickname = DEFAULT_NICKNAME
self.network = None
def _reset_connection_attributes(self):
""" Reset connection attributes. """
self.connection = None
self.encoding = None
self._autojoin_channels = []
self._reconnect_attempts = 0
## Connection.
def connect(self, hostname=None, port=None, reconnect=False, eventloop=None, **kwargs):
""" Connect to IRC server. """
if (not hostname or not port) and not reconnect:
raise ValueError('Have to specify hostname and port if not reconnecting.')
# Disconnect from current connection.
if self.connected:
self.disconnect(expected=True)
# Set event loop.
if eventloop:
self.eventloop = eventloop
elif not self.eventloop:
self.eventloop = async.EventLoop()
self.own_eventloop = not eventloop
# Reset attributes and connect.
if not reconnect:
self._reset_connection_attributes()
try:
self._connect(hostname=hostname, port=port, reconnect=reconnect, **kwargs)
except OSError:
self.on_disconnect(expected=False)
# Set logger name.
if self.server_tag:
self.logger = logging.getLogger(self.__class__.__name__ + ':' + self.server_tag)
def disconnect(self, expected=True):
""" Disconnect from server. """
if self.connected:
# Unschedule ping checker.
self.eventloop.unschedule(self._ping_checker_handle)
# Schedule disconnect.
self.eventloop.schedule(self._disconnect, expected)
def _disconnect(self, expected):
# Shutdown connection.
if self.connection.handles('read', self.on_data):
self.connection.off('read', self.on_data)
if self.connection.handles('error', self.on_data_error):
self.connection.off('error', self.on_data_error)
self.connection.disconnect()
# Callback.
self.on_disconnect(expected)
# Shut down event loop.
if expected and self.own_eventloop:
self.connection.stop()
# Reset any attributes.
self._reset_attributes()
def _connect(self, hostname, port, reconnect=False, channels=[], encoding=protocol.DEFAULT_ENCODING, source_address=None):
""" Connect to IRC host. """
# Create connection if we can't reuse it.
if not reconnect or not self.connection:
self._autojoin_channels = channels
self.connection = connection.Connection(hostname, port, source_address=source_address, eventloop=self.eventloop)
self.encoding = encoding
# Connect.
self.connection.connect()
# Add handlers.
self.connection.on('read', self.on_data)
self.connection.on('error', self.on_data_error)
def _reconnect_delay(self):
""" Calculate reconnection delay. """
if self.RECONNECT_ON_ERROR and self.RECONNECT_DELAYED:
if self._reconnect_attempts >= len(self.RECONNECT_DELAYS):
return self.RECONNECT_DELAYS[-1]
else:
return self.RECONNECT_DELAYS[self._reconnect_attempts]
else:
return 0
def _perform_ping_timeout(self):
""" Handle timeout gracefully. """
error = TimeoutError('Ping timeout: no data received from server in {timeout} seconds.'.format(timeout=PING_TIMEOUT))
self.on_data_error(error)
## Internal database management.
def _create_channel(self, channel):
self.channels[channel] = {
'users': set(),
}
def _destroy_channel(self, channel):
# Copy set to prevent a runtime error when destroying the user.
for user in set(self.channels[channel]['users']):
self._destroy_user(user, channel)
del self.channels[channel]
def _create_user(self, nickname):
# Servers are NOT users.
if not nickname or '.' in nickname:
return
self.users[nickname] = {
'nickname': nickname,
'username': None,
'realname': None,
'hostname': None
}
def _sync_user(self, nick, metadata):
# Create user in database.
if nick not in self.users:
self._create_user(nick)
if nick not in self.users:
return
self.users[nick].update(metadata)
def _rename_user(self, user, new):
if user in self.users:
self.users[new] = self.users[user]
self.users[new]['nickname'] = new
del self.users[user]
else:
self._create_user(new)
if new not in self.users:
return
for ch in self.channels.values():
# Rename user in channel list.
if user in ch['users']:
ch['users'].discard(user)
ch['users'].add(new)
def _destroy_user(self, nickname, channel=None):
if channel:
channels = [ self.channels[channel] ]
else:
channels = self.channels.values()
for ch in channels:
# Remove from nicklist.
ch['users'].discard(nickname)
# If we're not in any common channels with the user anymore, we have no reliable way to keep their info up-to-date.
# Remove the user.
if not channel or not any(nickname in ch['users'] for ch in self.channels.values()):
del self.users[nickname]
def _parse_user(self, data):
""" Parse user and return nickname, metadata tuple. """
raise NotImplementedError()
def _format_user_mask(self, nickname):
user = self.users.get(nickname, { "nickname": nickname, "username": "*", "hostname": "*" })
return self._format_host_mask(user['nickname'], user['username'] or '*', user['hostname'] or '*')
def _format_host_mask(self, nick, user, host):
return '{n}!{u}@{h}'.format(n=nick, u=user, h=host)
## IRC helpers.
def is_channel(self, chan):
""" Check if given argument is a channel name or not. """
return True
def in_channel(self, channel):
""" Check if we are currently in the given channel. """
return channel in self.channels.keys()
def is_same_nick(self, left, right):
""" Check if given nicknames are equal. """
return left == right
def is_same_channel(self, left, right):
""" Check if given channel names are equal. """
return left == right
## IRC attributes.
@property
def connected(self):
""" Whether or not we are connected. """
return self.connection and self.connection.connected
@property
def server_tag(self):
if self.connected and self.connection.hostname:
if self.network:
tag = self.network.lower()
else:
tag = self.connection.hostname.lower()
# Remove hostname prefix.
if tag.startswith('irc.'):
tag = tag[4:]
# Check if host is either an FQDN or IPv4.
if '.' in tag:
# Attempt to cut off TLD.
host, suffix = tag.rsplit('.', 1)
# Make sure we aren't cutting off the last octet of an IPv4.
try:
int(suffix)
except ValueError:
tag = host
return tag
else:
return None
## IRC API.
def raw(self, message):
""" Send raw command. """
self._send(message)
def rawmsg(self, command, *args, **kwargs):
""" Send raw message. """
message = str(self._create_message(command, *args, **kwargs))
self._send(message)
## Overloadable callbacks.
def on_connect(self):
""" Callback called when the client has connected successfully. """
# Reset reconnect attempts.
self._reconnect_attempts = 0
def on_disconnect(self, expected):
if not expected:
# Unexpected disconnect. Reconnect?
if self.RECONNECT_ON_ERROR and (self.RECONNECT_MAX_ATTEMPTS is None or self._reconnect_attempts < self.RECONNECT_MAX_ATTEMPTS):
# Calculate reconnect delay.
delay = self._reconnect_delay()
self._reconnect_attempts += 1
if delay > 0:
self.logger.error('Unexpected disconnect. Attempting to reconnect within %s seconds.', delay)
else:
self.logger.error('Unexpected disconnect. Attempting to reconnect.')
# Wait and reconnect.
self.eventloop.schedule_in(delay, self.connect, reconnect=True)
else:
self.logger.error('Unexpected disconnect. Giving up.')
## Message dispatch.
def _has_message(self):
""" Whether or not we have messages available for processing. """
raise NotImplementedError()
def _create_message(self, command, *params, **kwargs):
raise NotImplementedError()
def _parse_message(self):
raise NotImplementedError()
def _send(self, input):
if not isinstance(input, (bytes, str)):
input = str(input)
if isinstance(input, str):
input = input.encode(self.encoding)
self.logger.debug('>> %s', input.decode(self.encoding))
self.connection.send(input)
def handle_forever(self):
""" Handle data forever. """
self.connection.run_forever()
## Raw message handlers.
def on_data(self, data):
""" Handle received data. """
self._receive_buffer += data
# Schedule new timeout event.
self.eventloop.unschedule(self._ping_checker_handle)
self._ping_checker_handle = self.eventloop.schedule_in(PING_TIMEOUT, self._perform_ping_timeout)
while self._has_message():
message = self._parse_message()
self.on_raw(message)
def on_data_error(self, exception):
""" Handle error. """
self.logger.error('Encountered error on socket.', exc_info=(type(exception), exception, None))
self.disconnect(expected=False)
def on_raw(self, message):
""" Handle a single message. """
self.logger.debug('<< %s', message._raw)
if not message._valid:
self.logger.warning('Encountered strictly invalid IRC message from server: %s', message._raw)
if isinstance(message.command, int):
cmd = str(message.command).zfill(3)
else:
cmd = message.command
# Invoke dispatcher, if we have one.
method = 'on_raw_' + cmd.lower()
try:
# Set _top_level so __getattr__() can decide whether to return on_unknown or _ignored for unknown handlers.
# The reason for this is that features can always call super().on_raw_* safely and thus don't need to care for other features,
# while unknown messages for which no handlers exist at all are still logged.
self._handler_top_level = True
handler = getattr(self, method)
self._handler_top_level = False
self.eventloop.schedule(handler, message)
except:
self.logger.exception('Failed to execute %s handler.', method)
def on_unknown(self, message):
""" Unknown command. """
self.logger.warning('Unknown command: [%s] %s %s', message.source, message.command, message.params)
def _ignored(self, message):
""" Ignore message. """
pass
def __getattr__(self, attr):
""" Return on_unknown or _ignored for unknown handlers, depending on the invocation type. """
# Is this a raw handler?
if attr.startswith('on_raw_'):
# Are we in on_raw() trying to find any message handler?
if self._handler_top_level:
# In that case, return the method that logs and possibly acts on unknown messages.
return self.on_unknown
# Are we in an existing handler calling super()?
else:
# Just ignore it, then.
return self._ignored
# This isn't a handler, just raise an error.
raise AttributeError(attr)
class ClientPool:
""" A pool of clients that are ran and handled in parallel. """
def __init__(self, clients=None, eventloop=None):
if not eventloop:
self.eventloop = async.EventLoop()
else:
self.eventloop = eventloop
if not clients:
self.clients = set()
else:
self.clients = set(clients)
def add(self, client):
""" Add client to pool. """
self.clients.add(client)
def remove(self, client):
""" Remove client from pool. """
self.clients.remove(client)
def __contains__(self, item):
return item in self.clients
## High-level.
def connect(self, client, *args, eventloop=None, **kwargs):
"""
Add client to pool and connect it using the given argument.
Refer to the connect() method of the added client for details on parameters.
"""
if client not in self:
self.add(client)
client.connect(*args, eventloop=self.eventloop, **kwargs)
def disconnect(self, client, *args, **kwargs):
"""
Disconnect client from pool and remove it.
Refer to the disconnect() method of the removed client for details on parameters.
"""
if client not in self:
return
client.disconnect(*args, **kwargs)
self.remove(client)
def handle_forever(self):
""" Main loop of the pool: handle clients forever, until the event loop is stopped. """
for client in self.clients:
client.connection.setup_handlers()
self.eventloop.run()
for client in self.clients:
client.connection.remove_handlers()
| ## client.py
# Basic IRC client implementation.
import time
import datetime
import itertools
import logging
from . import async
from . import connection
from . import protocol
__all__ = [ 'Error', 'AlreadyInChannel', 'NotInChannel', 'BasicClient' ]
PING_TIMEOUT = 300
DEFAULT_NICKNAME = '<unregistered>'
class Error(Exception):
""" Base class for all pydle errors. """
pass
class NotInChannel(Error):
def __init__(self, channel):
super().__init__('Not in channel: {}'.format(channel))
self.channel = channel
class AlreadyInChannel(Error):
def __init__(self, channel):
super().__init__('Already in channel: {}'.format(channel))
self.channel = channel
class BasicClient:
"""
Base IRC client class.
This class on its own is not complete: in order to be able to run properly, _has_message, _parse_message and _create_message have to be overloaded.
"""
RECONNECT_ON_ERROR = True
RECONNECT_MAX_ATTEMPTS = 3
RECONNECT_DELAYED = True
RECONNECT_DELAYS = [0, 5, 10, 30, 120, 600]
def __init__(self, nickname, fallback_nicknames=[], username=None, realname=None, **kwargs):
""" Create a client. """
self._nicknames = [nickname] + fallback_nicknames
self.username = username or nickname.lower()
self.realname = realname or nickname
self.eventloop = None
self.own_eventloop = True
self._reset_connection_attributes()
self._reset_attributes()
if kwargs:
self.logger.warning('Unused arguments: %s', ', '.join(kwargs.keys()))
def _reset_attributes(self):
""" Reset attributes. """
# Record-keeping.
self.channels = {}
self.users = {}
# Low-level data stuff.
self._receive_buffer = b''
self._pending = {}
self._handler_top_level = False
self._ping_checker_handle = None
# Misc.
self.logger = logging.getLogger(__name__)
# Public connection attributes.
self.nickname = DEFAULT_NICKNAME
self.network = None
def _reset_connection_attributes(self):
""" Reset connection attributes. """
self.connection = None
self.encoding = None
self._autojoin_channels = []
self._reconnect_attempts = 0
## Connection.
def connect(self, hostname=None, port=None, reconnect=False, eventloop=None, **kwargs):
""" Connect to IRC server. """
if (not hostname or not port) and not reconnect:
raise ValueError('Have to specify hostname and port if not reconnecting.')
# Disconnect from current connection.
if self.connected:
self.disconnect(expected=True)
# Set event loop.
if eventloop:
self.eventloop = eventloop
elif not self.eventloop:
self.eventloop = async.EventLoop()
self.own_eventloop = not eventloop
# Reset attributes and connect.
if not reconnect:
self._reset_connection_attributes()
try:
self._connect(hostname=hostname, port=port, reconnect=reconnect, **kwargs)
except OSError:
self.on_disconnect(expected=False)
# Set logger name.
if self.server_tag:
self.logger = logging.getLogger(self.__class__.__name__ + ':' + self.server_tag)
def disconnect(self, expected=True):
""" Disconnect from server. """
if self.connected:
# Unschedule ping checker.
self.eventloop.unschedule(self._ping_checker_handle)
# Schedule disconnect.
self.eventloop.schedule(self._disconnect, expected)
def _disconnect(self, expected):
# Shutdown connection.
if self.connection.handles('read', self.on_data):
self.connection.off('read', self.on_data)
if self.connection.handles('error', self.on_data_error):
self.connection.off('error', self.on_data_error)
self.connection.disconnect()
# Callback.
self.on_disconnect(expected)
# Shut down event loop.
if expected and self.own_eventloop:
self.connection.stop()
# Reset any attributes.
self._reset_attributes()
def _connect(self, hostname, port, reconnect=False, channels=[], encoding=protocol.DEFAULT_ENCODING, source_address=None):
""" Connect to IRC host. """
# Create connection if we can't reuse it.
if not reconnect or not self.connection:
self._autojoin_channels = channels
self.connection = connection.Connection(hostname, port, source_address=source_address, eventloop=self.eventloop)
self.encoding = encoding
# Connect.
self.connection.connect()
# Add handlers.
self.connection.on('read', self.on_data)
self.connection.on('error', self.on_data_error)
def _reconnect_delay(self):
""" Calculate reconnection delay. """
if self.RECONNECT_ON_ERROR and self.RECONNECT_DELAYED:
if self._reconnect_attempts >= len(self.RECONNECT_DELAYS):
return self.RECONNECT_DELAYS[-1]
else:
return self.RECONNECT_DELAYS[self._reconnect_attempts]
else:
return 0
def _perform_ping_timeout(self):
""" Handle timeout gracefully. """
error = TimeoutError('Ping timeout: no data received from server in {timeout} seconds.'.format(timeout=PING_TIMEOUT))
self.on_data_error(error)
## Internal database management.
def _create_channel(self, channel):
self.channels[channel] = {
'users': set(),
}
def _destroy_channel(self, channel):
# Copy set to prevent a runtime error when destroying the user.
for user in set(self.channels[channel]['users']):
self._destroy_user(user, channel)
del self.channels[channel]
def _create_user(self, nickname):
# Servers are NOT users.
if not nickname or '.' in nickname:
return
self.users[nickname] = {
'nickname': nickname,
'username': None,
'realname': None,
'hostname': None
}
def _sync_user(self, nick, metadata):
# Create user in database.
if nick not in self.users:
self._create_user(nick)
if nick not in self.users:
return
self.users[nick].update(metadata)
def _rename_user(self, user, new):
if user in self.users:
self.users[new] = self.users[user]
self.users[new]['nickname'] = new
del self.users[user]
else:
self._create_user(new)
if new not in self.users:
return
for ch in self.channels.values():
# Rename user in channel list.
if user in ch['users']:
ch['users'].discard(user)
ch['users'].add(new)
def _destroy_user(self, nickname, channel=None):
if channel:
channels = [ self.channels[channel] ]
else:
channels = self.channels.values()
for ch in channels:
# Remove from nicklist.
ch['users'].discard(nickname)
# If we're not in any common channels with the user anymore, we have no reliable way to keep their info up-to-date.
# Remove the user.
if not channel or not any(nickname in ch['users'] for ch in self.channels.values()):
del self.users[nickname]
def _parse_user(self, data):
""" Parse user and return nickname, metadata tuple. """
raise NotImplementedError()
def _format_user_mask(self, nickname):
user = self.users.get(nickname, { "nickname": nickname, "username": "*", "hostname": "*" })
return self._format_host_mask(user['nickname'], user['username'] or '*', user['hostname'] or '*')
def _format_host_mask(self, nick, user, host):
return '{n}!{u}@{h}'.format(n=nick, u=user, h=host)
## IRC helpers.
def is_channel(self, chan):
""" Check if given argument is a channel name or not. """
return True
def in_channel(self, channel):
""" Check if we are currently in the given channel. """
return channel in self.channels.keys()
def is_same_nick(self, left, right):
""" Check if given nicknames are equal. """
return left == right
def is_same_channel(self, left, right):
""" Check if given channel names are equal. """
return left == right
## IRC attributes.
@property
def connected(self):
""" Whether or not we are connected. """
return self.connection and self.connection.connected
@property
def server_tag(self):
if self.connected and self.connection.hostname:
if self.network:
tag = self.network.lower()
else:
tag = self.connection.hostname.lower()
# Remove hostname prefix.
if tag.startswith('irc.'):
tag = tag[4:]
# Check if host is either an FQDN or IPv4.
if '.' in tag:
# Attempt to cut off TLD.
host, suffix = tag.rsplit('.', 1)
# Make sure we aren't cutting off the last octet of an IPv4.
try:
int(suffix)
except ValueError:
tag = host
return tag
else:
return None
## IRC API.
def raw(self, message):
""" Send raw command. """
self._send(message)
def rawmsg(self, command, *args, **kwargs):
""" Send raw message. """
message = str(self._create_message(command, *args, **kwargs))
self._send(message)
## Overloadable callbacks.
def on_connect(self):
""" Callback called when the client has connected successfully. """
# Reset reconnect attempts.
self._reconnect_attempts = 0
def on_disconnect(self, expected):
if not expected:
# Unexpected disconnect. Reconnect?
if self.RECONNECT_ON_ERROR and (self.RECONNECT_MAX_ATTEMPTS is None or self._reconnect_attempts < self.RECONNECT_MAX_ATTEMPTS):
# Calculate reconnect delay.
delay = self._reconnect_delay()
self._reconnect_attempts += 1
if delay > 0:
self.logger.error('Unexpected disconnect. Attempting to reconnect within %s seconds.', delay)
else:
self.logger.error('Unexpected disconnect. Attempting to reconnect.')
# Wait and reconnect.
self.eventloop.schedule_in(delay, self.connect, reconnect=True)
else:
self.logger.error('Unexpected disconnect. Giving up.')
## Message dispatch.
def _has_message(self):
""" Whether or not we have messages available for processing. """
raise NotImplementedError()
def _create_message(self, command, *params, **kwargs):
raise NotImplementedError()
def _parse_message(self):
raise NotImplementedError()
def _send(self, input):
if not isinstance(input, (bytes, str)):
input = str(input)
if isinstance(input, str):
input = input.encode(self.encoding)
self.logger.debug('>> %s', input.decode(self.encoding))
self.connection.send(input)
def handle_forever(self):
""" Handle data forever. """
self.connection.run_forever()
## Raw message handlers.
def on_data(self, data):
""" Handle received data. """
self._receive_buffer += data
# Schedule new timeout event.
self.eventloop.unschedule(self._ping_checker_handle)
self._ping_checker_handle = self.eventloop.schedule_in(PING_TIMEOUT, self._perform_ping_timeout)
while self._has_message():
message = self._parse_message()
self.on_raw(message)
def on_data_error(self, exception):
""" Handle error. """
self.logger.error('Encountered error on socket.', exc_info=(type(exception), exception, None))
self.disconnect(expected=False)
def on_raw(self, message):
""" Handle a single message. """
self.logger.debug('<< %s', message._raw)
if not message._valid:
self.logger.warning('Encountered strictly invalid IRC message from server: %s', message._raw)
if isinstance(message.command, int):
cmd = str(message.command).zfill(3)
else:
cmd = message.command
# Invoke dispatcher, if we have one.
method = 'on_raw_' + cmd.lower()
try:
# Set _top_level so __getattr__() can decide whether to return on_unknown or _ignored for unknown handlers.
# The reason for this is that features can always call super().on_raw_* safely and thus don't need to care for other features,
# while unknown messages for which no handlers exist at all are still logged.
self._handler_top_level = True
handler = getattr(self, method)
self._handler_top_level = False
self.eventloop.schedule(handler, message)
except:
self.logger.exception('Failed to execute %s handler.', method)
def on_unknown(self, message):
""" Unknown command. """
self.logger.warning('Unknown command: [%s] %s %s', message.source, message.command, message.params)
def _ignored(self, message):
""" Ignore message. """
pass
def __getattr__(self, attr):
""" Return on_unknown or _ignored for unknown handlers, depending on the invocation type. """
# Is this a raw handler?
if attr.startswith('on_raw_'):
# Are we in on_raw() trying to find any message handler?
if self._handler_top_level:
# In that case, return the method that logs and possibly acts on unknown messages.
return self.on_unknown
# Are we in an existing handler calling super()?
else:
# Just ignore it, then.
return self._ignored
# This isn't a handler, just raise an error.
raise AttributeError(attr)
class ClientPool:
""" A pool of clients that are ran and handled in parallel. """
def __init__(self, clients=None, eventloop=None):
if not eventloop:
self.eventloop = async.EventLoop()
else:
self.eventloop = eventloop
if not clients:
self.clients = set()
else:
self.clients = set(clients)
def add(self, client):
""" Add client to pool. """
self.clients.add(client)
def remove(self, client):
""" Remove client from pool. """
self.clients.remove(client)
def __contains__(self, item):
return item in self.clients
## High-level.
def connect(self, client, *args, eventloop=None, **kwargs):
"""
Add client to pool and connect it using the given argument.
Refer to the connect() method of the added client for details on parameters.
"""
if client not in self:
self.add(client)
client.connect(*args, eventloop=self.eventloop, **kwargs)
def disconnect(self, client, *args, **kwargs):
"""
Disconnect client from pool and remove it.
Refer to the disconnect() method of the removed client for details on parameters.
"""
if client not in self:
return
client.disconnect(*args, **kwargs)
self.remove(client)
def handle_forever(self):
""" Main loop of the pool: handle clients forever, until the event loop is stopped. """
for client in self.clients:
client.connection.setup_handlers()
self.eventloop.run()
for client in self.clients:
client.connection.remove_handlers() | en | 0.843198 | ## client.py # Basic IRC client implementation. Base class for all pydle errors. Base IRC client class. This class on its own is not complete: in order to be able to run properly, _has_message, _parse_message and _create_message have to be overloaded. Create a client. Reset attributes. # Record-keeping. # Low-level data stuff. # Misc. # Public connection attributes. Reset connection attributes. ## Connection. Connect to IRC server. # Disconnect from current connection. # Set event loop. # Reset attributes and connect. # Set logger name. Disconnect from server. # Unschedule ping checker. # Schedule disconnect. # Shutdown connection. # Callback. # Shut down event loop. # Reset any attributes. Connect to IRC host. # Create connection if we can't reuse it. # Connect. # Add handlers. Calculate reconnection delay. Handle timeout gracefully. ## Internal database management. # Copy set to prevent a runtime error when destroying the user. # Servers are NOT users. # Create user in database. # Rename user in channel list. # Remove from nicklist. # If we're not in any common channels with the user anymore, we have no reliable way to keep their info up-to-date. # Remove the user. Parse user and return nickname, metadata tuple. ## IRC helpers. Check if given argument is a channel name or not. Check if we are currently in the given channel. Check if given nicknames are equal. Check if given channel names are equal. ## IRC attributes. Whether or not we are connected. # Remove hostname prefix. # Check if host is either an FQDN or IPv4. # Attempt to cut off TLD. # Make sure we aren't cutting off the last octet of an IPv4. ## IRC API. Send raw command. Send raw message. ## Overloadable callbacks. Callback called when the client has connected successfully. # Reset reconnect attempts. # Unexpected disconnect. Reconnect? # Calculate reconnect delay. # Wait and reconnect. ## Message dispatch. Whether or not we have messages available for processing. Handle data forever. ## Raw message handlers. Handle received data. # Schedule new timeout event. Handle error. Handle a single message. # Invoke dispatcher, if we have one. # Set _top_level so __getattr__() can decide whether to return on_unknown or _ignored for unknown handlers. # The reason for this is that features can always call super().on_raw_* safely and thus don't need to care for other features, # while unknown messages for which no handlers exist at all are still logged. Unknown command. Ignore message. Return on_unknown or _ignored for unknown handlers, depending on the invocation type. # Is this a raw handler? # Are we in on_raw() trying to find any message handler? # In that case, return the method that logs and possibly acts on unknown messages. # Are we in an existing handler calling super()? # Just ignore it, then. # This isn't a handler, just raise an error. A pool of clients that are ran and handled in parallel. Add client to pool. Remove client from pool. ## High-level. Add client to pool and connect it using the given argument. Refer to the connect() method of the added client for details on parameters. Disconnect client from pool and remove it. Refer to the disconnect() method of the removed client for details on parameters. Main loop of the pool: handle clients forever, until the event loop is stopped. | 2.428536 | 2 |
dassl/data/datasets/da/digit5.py | Fyy10/Dassl.pytorch | 563 | 6632899 | import random
import os.path as osp
from dassl.utils import listdir_nohidden
from ..build import DATASET_REGISTRY
from ..base_dataset import Datum, DatasetBase
# Folder names for train and test sets
MNIST = {"train": "train_images", "test": "test_images"}
MNIST_M = {"train": "train_images", "test": "test_images"}
SVHN = {"train": "train_images", "test": "test_images"}
SYN = {"train": "train_images", "test": "test_images"}
USPS = {"train": "train_images", "test": "test_images"}
def read_image_list(im_dir, n_max=None, n_repeat=None):
items = []
for imname in listdir_nohidden(im_dir):
imname_noext = osp.splitext(imname)[0]
label = int(imname_noext.split("_")[1])
impath = osp.join(im_dir, imname)
items.append((impath, label))
if n_max is not None:
items = random.sample(items, n_max)
if n_repeat is not None:
items *= n_repeat
return items
def load_mnist(dataset_dir, split="train"):
data_dir = osp.join(dataset_dir, MNIST[split])
n_max = 25000 if split == "train" else 9000
return read_image_list(data_dir, n_max=n_max)
def load_mnist_m(dataset_dir, split="train"):
data_dir = osp.join(dataset_dir, MNIST_M[split])
n_max = 25000 if split == "train" else 9000
return read_image_list(data_dir, n_max=n_max)
def load_svhn(dataset_dir, split="train"):
data_dir = osp.join(dataset_dir, SVHN[split])
n_max = 25000 if split == "train" else 9000
return read_image_list(data_dir, n_max=n_max)
def load_syn(dataset_dir, split="train"):
data_dir = osp.join(dataset_dir, SYN[split])
n_max = 25000 if split == "train" else 9000
return read_image_list(data_dir, n_max=n_max)
def load_usps(dataset_dir, split="train"):
data_dir = osp.join(dataset_dir, USPS[split])
n_repeat = 3 if split == "train" else None
return read_image_list(data_dir, n_repeat=n_repeat)
@DATASET_REGISTRY.register()
class Digit5(DatasetBase):
"""Five digit datasets.
It contains:
- MNIST: hand-written digits.
- MNIST-M: variant of MNIST with blended background.
- SVHN: street view house number.
- SYN: synthetic digits.
- USPS: hand-written digits, slightly different from MNIST.
For MNIST, MNIST-M, SVHN and SYN, we randomly sample 25,000 images from
the training set and 9,000 images from the test set. For USPS which has only
9,298 images in total, we use the entire dataset but replicate its training
set for 3 times so as to match the training set size of other domains.
Reference:
- Lecun et al. Gradient-based learning applied to document
recognition. IEEE 1998.
- Ganin et al. Domain-adversarial training of neural networks.
JMLR 2016.
- Netzer et al. Reading digits in natural images with unsupervised
feature learning. NIPS-W 2011.
"""
dataset_dir = "digit5"
domains = ["mnist", "mnist_m", "svhn", "syn", "usps"]
def __init__(self, cfg):
root = osp.abspath(osp.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = osp.join(root, self.dataset_dir)
self.check_input_domains(
cfg.DATASET.SOURCE_DOMAINS, cfg.DATASET.TARGET_DOMAINS
)
train_x = self._read_data(cfg.DATASET.SOURCE_DOMAINS, split="train")
train_u = self._read_data(cfg.DATASET.TARGET_DOMAINS, split="train")
test = self._read_data(cfg.DATASET.TARGET_DOMAINS, split="test")
super().__init__(train_x=train_x, train_u=train_u, test=test)
def _read_data(self, input_domains, split="train"):
items = []
for domain, dname in enumerate(input_domains):
func = "load_" + dname
domain_dir = osp.join(self.dataset_dir, dname)
items_d = eval(func)(domain_dir, split=split)
for impath, label in items_d:
item = Datum(
impath=impath,
label=label,
domain=domain,
classname=str(label)
)
items.append(item)
return items
| import random
import os.path as osp
from dassl.utils import listdir_nohidden
from ..build import DATASET_REGISTRY
from ..base_dataset import Datum, DatasetBase
# Folder names for train and test sets
MNIST = {"train": "train_images", "test": "test_images"}
MNIST_M = {"train": "train_images", "test": "test_images"}
SVHN = {"train": "train_images", "test": "test_images"}
SYN = {"train": "train_images", "test": "test_images"}
USPS = {"train": "train_images", "test": "test_images"}
def read_image_list(im_dir, n_max=None, n_repeat=None):
items = []
for imname in listdir_nohidden(im_dir):
imname_noext = osp.splitext(imname)[0]
label = int(imname_noext.split("_")[1])
impath = osp.join(im_dir, imname)
items.append((impath, label))
if n_max is not None:
items = random.sample(items, n_max)
if n_repeat is not None:
items *= n_repeat
return items
def load_mnist(dataset_dir, split="train"):
data_dir = osp.join(dataset_dir, MNIST[split])
n_max = 25000 if split == "train" else 9000
return read_image_list(data_dir, n_max=n_max)
def load_mnist_m(dataset_dir, split="train"):
data_dir = osp.join(dataset_dir, MNIST_M[split])
n_max = 25000 if split == "train" else 9000
return read_image_list(data_dir, n_max=n_max)
def load_svhn(dataset_dir, split="train"):
data_dir = osp.join(dataset_dir, SVHN[split])
n_max = 25000 if split == "train" else 9000
return read_image_list(data_dir, n_max=n_max)
def load_syn(dataset_dir, split="train"):
data_dir = osp.join(dataset_dir, SYN[split])
n_max = 25000 if split == "train" else 9000
return read_image_list(data_dir, n_max=n_max)
def load_usps(dataset_dir, split="train"):
data_dir = osp.join(dataset_dir, USPS[split])
n_repeat = 3 if split == "train" else None
return read_image_list(data_dir, n_repeat=n_repeat)
@DATASET_REGISTRY.register()
class Digit5(DatasetBase):
"""Five digit datasets.
It contains:
- MNIST: hand-written digits.
- MNIST-M: variant of MNIST with blended background.
- SVHN: street view house number.
- SYN: synthetic digits.
- USPS: hand-written digits, slightly different from MNIST.
For MNIST, MNIST-M, SVHN and SYN, we randomly sample 25,000 images from
the training set and 9,000 images from the test set. For USPS which has only
9,298 images in total, we use the entire dataset but replicate its training
set for 3 times so as to match the training set size of other domains.
Reference:
- Lecun et al. Gradient-based learning applied to document
recognition. IEEE 1998.
- Ganin et al. Domain-adversarial training of neural networks.
JMLR 2016.
- Netzer et al. Reading digits in natural images with unsupervised
feature learning. NIPS-W 2011.
"""
dataset_dir = "digit5"
domains = ["mnist", "mnist_m", "svhn", "syn", "usps"]
def __init__(self, cfg):
root = osp.abspath(osp.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = osp.join(root, self.dataset_dir)
self.check_input_domains(
cfg.DATASET.SOURCE_DOMAINS, cfg.DATASET.TARGET_DOMAINS
)
train_x = self._read_data(cfg.DATASET.SOURCE_DOMAINS, split="train")
train_u = self._read_data(cfg.DATASET.TARGET_DOMAINS, split="train")
test = self._read_data(cfg.DATASET.TARGET_DOMAINS, split="test")
super().__init__(train_x=train_x, train_u=train_u, test=test)
def _read_data(self, input_domains, split="train"):
items = []
for domain, dname in enumerate(input_domains):
func = "load_" + dname
domain_dir = osp.join(self.dataset_dir, dname)
items_d = eval(func)(domain_dir, split=split)
for impath, label in items_d:
item = Datum(
impath=impath,
label=label,
domain=domain,
classname=str(label)
)
items.append(item)
return items
| en | 0.856902 | # Folder names for train and test sets Five digit datasets. It contains: - MNIST: hand-written digits. - MNIST-M: variant of MNIST with blended background. - SVHN: street view house number. - SYN: synthetic digits. - USPS: hand-written digits, slightly different from MNIST. For MNIST, MNIST-M, SVHN and SYN, we randomly sample 25,000 images from the training set and 9,000 images from the test set. For USPS which has only 9,298 images in total, we use the entire dataset but replicate its training set for 3 times so as to match the training set size of other domains. Reference: - Lecun et al. Gradient-based learning applied to document recognition. IEEE 1998. - Ganin et al. Domain-adversarial training of neural networks. JMLR 2016. - Netzer et al. Reading digits in natural images with unsupervised feature learning. NIPS-W 2011. | 2.313712 | 2 |
for.py | brauliorivas/basico_python | 0 | 6632900 | def run():
for contador in range(1, 1001):
print(contador)
# contador si no tiene dos param, empieza en cero, y es mas facil realizar el for con rangos
#range es un tipo de dato
if __name__ == '__main__':
run() | def run():
for contador in range(1, 1001):
print(contador)
# contador si no tiene dos param, empieza en cero, y es mas facil realizar el for con rangos
#range es un tipo de dato
if __name__ == '__main__':
run() | es | 0.816112 | # contador si no tiene dos param, empieza en cero, y es mas facil realizar el for con rangos #range es un tipo de dato | 3.503028 | 4 |
deprecated/dataloaders/deprecated_examples/multimedia/mmimdb_MVAE_mixed.py | kapikantzari/MultiBench | 148 | 6632901 | from objective_functions.recon import elbo_loss, sigmloss1dcentercrop
from unimodals.MVAE import LeNetEncoder, DeLeNet
from training_structures.MVAE_mixed import train_MVAE, test_MVAE
from datasets.avmnist.get_data import get_dataloader
import torch
from torch import nn
from unimodals.common_models import MLP
from fusions.MVAE import ProductOfExperts
import sys
import os
sys.path.append(os.getcwd())
traindata, validdata, testdata = get_dataloader(
'/data/yiwei/avmnist/_MFAS/avmnist')
classes = 10
n_latent = 200
fuse = ProductOfExperts((1, 40, n_latent))
# TODO
# channels=6
# encoders=[LeNetEncoder(1,channels,3,n_latent).cuda(),LeNetEncoder(1,channels,5,n_latent).cuda()]
# decoders=[DeLeNet(1,channels,3,n_latent).cuda(),DeLeNet(1,channels,5,n_latent).cuda()]
# head=MLP(n_latent,40,classes).cuda()
# elbo=elbo_loss([sigmloss1dcentercrop(28,34),sigmloss1dcentercrop(112,130)],[1.0,1.0],0.0)
# train_MVAE(encoders,decoders,head,fuse,traindata,validdata,elbo,20)
# mvae=torch.load('best1.pt')
# head=torch.load('best2.pt')
# test_MVAE(mvae,head,testdata)
| from objective_functions.recon import elbo_loss, sigmloss1dcentercrop
from unimodals.MVAE import LeNetEncoder, DeLeNet
from training_structures.MVAE_mixed import train_MVAE, test_MVAE
from datasets.avmnist.get_data import get_dataloader
import torch
from torch import nn
from unimodals.common_models import MLP
from fusions.MVAE import ProductOfExperts
import sys
import os
sys.path.append(os.getcwd())
traindata, validdata, testdata = get_dataloader(
'/data/yiwei/avmnist/_MFAS/avmnist')
classes = 10
n_latent = 200
fuse = ProductOfExperts((1, 40, n_latent))
# TODO
# channels=6
# encoders=[LeNetEncoder(1,channels,3,n_latent).cuda(),LeNetEncoder(1,channels,5,n_latent).cuda()]
# decoders=[DeLeNet(1,channels,3,n_latent).cuda(),DeLeNet(1,channels,5,n_latent).cuda()]
# head=MLP(n_latent,40,classes).cuda()
# elbo=elbo_loss([sigmloss1dcentercrop(28,34),sigmloss1dcentercrop(112,130)],[1.0,1.0],0.0)
# train_MVAE(encoders,decoders,head,fuse,traindata,validdata,elbo,20)
# mvae=torch.load('best1.pt')
# head=torch.load('best2.pt')
# test_MVAE(mvae,head,testdata)
| en | 0.336171 | # TODO # channels=6 # encoders=[LeNetEncoder(1,channels,3,n_latent).cuda(),LeNetEncoder(1,channels,5,n_latent).cuda()] # decoders=[DeLeNet(1,channels,3,n_latent).cuda(),DeLeNet(1,channels,5,n_latent).cuda()] # head=MLP(n_latent,40,classes).cuda() # elbo=elbo_loss([sigmloss1dcentercrop(28,34),sigmloss1dcentercrop(112,130)],[1.0,1.0],0.0) # train_MVAE(encoders,decoders,head,fuse,traindata,validdata,elbo,20) # mvae=torch.load('best1.pt') # head=torch.load('best2.pt') # test_MVAE(mvae,head,testdata) | 1.871719 | 2 |
tests/test_utils.py | CNugteren/keras-onnx | 1 | 6632902 | ###############################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import os
import sys
import onnx
import numpy as np
import keras2onnx
from keras2onnx.proto import keras, is_keras_older_than
from keras2onnx.common.onnx_ops import apply_identity, OnnxOperatorBuilder
working_path = os.path.abspath(os.path.dirname(__file__))
tmp_path = os.path.join(working_path, 'temp')
def convert_tf_crop_and_resize(scope, operator, container):
if operator.target_opset < 11:
raise ValueError("CropAndResize op is not supported for opset < 11")
oopb = OnnxOperatorBuilder(container, scope)
node = operator.raw_operator
mode_value = node.get_attr('method')
transpose_node = oopb.apply_transpose(operator.inputs[0].full_name,
name=operator.full_name + '_transpose_1',
perm=[0, 3, 1, 2])
cropandresize = oopb.add_node('CropAndResize',
transpose_node + operator.input_full_names[1:],
operator.full_name + '_crop_and_resize',
op_domain='com.microsoft',
op_version=1,
mode=mode_value)
oopb.apply_op_with_output("apply_transpose",
cropandresize,
operator.output_full_names,
name=operator.full_name + '_transpose_final',
perm=[0, 2, 3, 1])
# convert keras_contrib.layers.InstanceNormalization
def convert_InstanceNormalizationLayer(scope, operator, container):
from keras2onnx.common.onnx_ops import OnnxOperatorBuilder
op = operator.raw_operator
params = op.get_weights()
assert len(op.input_shape) == 4
beta = params[0].reshape(1, 1, 1, 1).astype('float32')
gamma = params[1].reshape(1, 1, 1, 1).astype('float32')
oopb = OnnxOperatorBuilder(container, scope)
reducemean_1 = oopb.add_node('ReduceMean',
[operator.inputs[0].full_name],
operator.inputs[0].full_name + '_reduce_mean_1',
axes=[1, 2, 3], keepdims=1)
sub_1 = oopb.add_node('Sub',
[operator.inputs[0].full_name, reducemean_1],
operator.inputs[0].full_name + '_sub_1')
mul = oopb.add_node('Mul',
[sub_1, sub_1],
operator.inputs[0].full_name + '_mul')
reducemean_2 = oopb.add_node('ReduceMean',
[mul],
operator.inputs[0].full_name + '_reduce_mean_2',
axes=[1, 2, 3], keepdims=1)
sqrt = oopb.add_node('Sqrt',
[reducemean_2],
operator.inputs[0].full_name + '_sqrt')
add = oopb.add_node('Add',
[sqrt,
('_start', oopb.float, np.array([op.epsilon], dtype='float32'))],
operator.inputs[0].full_name + '_add')
div = oopb.add_node('Div',
[sub_1, add],
operator.inputs[0].full_name + '_div')
mul_scale = oopb.add_node('Mul',
[div,
('_start', oopb.float, beta)],
operator.inputs[0].full_name + '_mul_scale')
add_bias = oopb.add_node('Add',
[mul_scale,
('_start', oopb.float, gamma)],
operator.inputs[0].full_name + '_add_bias')
apply_identity(scope, add_bias, operator.outputs[0].full_name, container)
def print_mismatches(case_name, list_idx, expected_list, actual_list, rtol=1.e-3, atol=1.e-6):
diff_list = abs(expected_list - actual_list)
count_total = len(expected_list)
count_error = 0
count_current = 0
for e_, a_, d_ in zip(expected_list, actual_list, diff_list):
if d_ > atol + rtol * abs(a_):
if count_error < 10: # print the first 10 mismatches
print(
"case = " + case_name + ", result mismatch for expected = " + str(e_) +
", actual = " + str(a_) + " at location " + str(count_current), file=sys.stderr)
count_error = count_error + 1
count_current += 1
print("case = " + case_name + ", " +
str(count_error) + " mismatches out of " + str(count_total) + " for list " + str(list_idx),
file=sys.stderr)
def run_onnx_runtime(case_name, onnx_model, data, expected, model_files, rtol=1.e-3, atol=1.e-6):
if not os.path.exists(tmp_path):
os.mkdir(tmp_path)
temp_model_file = os.path.join(tmp_path, 'temp_' + case_name + '.onnx')
onnx.save_model(onnx_model, temp_model_file)
try:
import onnxruntime
sess = onnxruntime.InferenceSession(temp_model_file)
except ImportError:
keras2onnx.common.k2o_logger().warning("Cannot import ONNXRuntime!")
return True
if isinstance(data, dict):
feed_input = data
else:
data = data if isinstance(data, list) else [data]
input_names = sess.get_inputs()
# to avoid too complicated test code, we restrict the input name in Keras test cases must be
# in alphabetical order. It's always true unless there is any trick preventing that.
feed = zip(sorted(i_.name for i_ in input_names), data)
feed_input = dict(feed)
actual = sess.run(None, feed_input)
if expected is None:
return
if isinstance(expected, tuple):
expected = list(expected)
elif not isinstance(expected, list):
expected = [expected]
res = all(np.allclose(expected[n_], actual[n_], rtol=rtol, atol=atol) for n_ in range(len(expected)))
if res and temp_model_file not in model_files: # still keep the failed case files for the diagnosis.
model_files.append(temp_model_file)
if not res:
for n_ in range(len(expected)):
expected_list = expected[n_].flatten()
actual_list = actual[n_].flatten()
print_mismatches(case_name, n_, expected_list, actual_list, rtol, atol)
return res
def run_image(model, model_files, img_path, model_name='onnx_conversion', rtol=1.e-3, atol=1.e-5, color_mode="rgb",
target_size=224, tf_v2=False):
if tf_v2:
preprocess_input = keras.applications.imagenet_utils.preprocess_input
else:
preprocess_input = keras.applications.resnet50.preprocess_input
image = keras.preprocessing.image
try:
if not isinstance(target_size, tuple):
target_size = (target_size, target_size)
if is_keras_older_than("2.2.3"):
# color_mode is not supported in old keras version
img = image.load_img(img_path, target_size=target_size)
else:
img = image.load_img(img_path, color_mode=color_mode, target_size=target_size)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
if color_mode == "rgb":
x = preprocess_input(x)
except FileNotFoundError:
return False, 'The image data does not exist.'
msg = ''
preds = None
try:
preds = model.predict(x)
except RuntimeError:
msg = 'keras prediction throws an exception for model ' + model.name + ', skip comparison.'
onnx_model = keras2onnx.convert_keras(model, model.name)
res = run_onnx_runtime(model_name, onnx_model, x, preds, model_files, rtol=rtol, atol=atol)
return res, msg
| ###############################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import os
import sys
import onnx
import numpy as np
import keras2onnx
from keras2onnx.proto import keras, is_keras_older_than
from keras2onnx.common.onnx_ops import apply_identity, OnnxOperatorBuilder
working_path = os.path.abspath(os.path.dirname(__file__))
tmp_path = os.path.join(working_path, 'temp')
def convert_tf_crop_and_resize(scope, operator, container):
if operator.target_opset < 11:
raise ValueError("CropAndResize op is not supported for opset < 11")
oopb = OnnxOperatorBuilder(container, scope)
node = operator.raw_operator
mode_value = node.get_attr('method')
transpose_node = oopb.apply_transpose(operator.inputs[0].full_name,
name=operator.full_name + '_transpose_1',
perm=[0, 3, 1, 2])
cropandresize = oopb.add_node('CropAndResize',
transpose_node + operator.input_full_names[1:],
operator.full_name + '_crop_and_resize',
op_domain='com.microsoft',
op_version=1,
mode=mode_value)
oopb.apply_op_with_output("apply_transpose",
cropandresize,
operator.output_full_names,
name=operator.full_name + '_transpose_final',
perm=[0, 2, 3, 1])
# convert keras_contrib.layers.InstanceNormalization
def convert_InstanceNormalizationLayer(scope, operator, container):
from keras2onnx.common.onnx_ops import OnnxOperatorBuilder
op = operator.raw_operator
params = op.get_weights()
assert len(op.input_shape) == 4
beta = params[0].reshape(1, 1, 1, 1).astype('float32')
gamma = params[1].reshape(1, 1, 1, 1).astype('float32')
oopb = OnnxOperatorBuilder(container, scope)
reducemean_1 = oopb.add_node('ReduceMean',
[operator.inputs[0].full_name],
operator.inputs[0].full_name + '_reduce_mean_1',
axes=[1, 2, 3], keepdims=1)
sub_1 = oopb.add_node('Sub',
[operator.inputs[0].full_name, reducemean_1],
operator.inputs[0].full_name + '_sub_1')
mul = oopb.add_node('Mul',
[sub_1, sub_1],
operator.inputs[0].full_name + '_mul')
reducemean_2 = oopb.add_node('ReduceMean',
[mul],
operator.inputs[0].full_name + '_reduce_mean_2',
axes=[1, 2, 3], keepdims=1)
sqrt = oopb.add_node('Sqrt',
[reducemean_2],
operator.inputs[0].full_name + '_sqrt')
add = oopb.add_node('Add',
[sqrt,
('_start', oopb.float, np.array([op.epsilon], dtype='float32'))],
operator.inputs[0].full_name + '_add')
div = oopb.add_node('Div',
[sub_1, add],
operator.inputs[0].full_name + '_div')
mul_scale = oopb.add_node('Mul',
[div,
('_start', oopb.float, beta)],
operator.inputs[0].full_name + '_mul_scale')
add_bias = oopb.add_node('Add',
[mul_scale,
('_start', oopb.float, gamma)],
operator.inputs[0].full_name + '_add_bias')
apply_identity(scope, add_bias, operator.outputs[0].full_name, container)
def print_mismatches(case_name, list_idx, expected_list, actual_list, rtol=1.e-3, atol=1.e-6):
diff_list = abs(expected_list - actual_list)
count_total = len(expected_list)
count_error = 0
count_current = 0
for e_, a_, d_ in zip(expected_list, actual_list, diff_list):
if d_ > atol + rtol * abs(a_):
if count_error < 10: # print the first 10 mismatches
print(
"case = " + case_name + ", result mismatch for expected = " + str(e_) +
", actual = " + str(a_) + " at location " + str(count_current), file=sys.stderr)
count_error = count_error + 1
count_current += 1
print("case = " + case_name + ", " +
str(count_error) + " mismatches out of " + str(count_total) + " for list " + str(list_idx),
file=sys.stderr)
def run_onnx_runtime(case_name, onnx_model, data, expected, model_files, rtol=1.e-3, atol=1.e-6):
if not os.path.exists(tmp_path):
os.mkdir(tmp_path)
temp_model_file = os.path.join(tmp_path, 'temp_' + case_name + '.onnx')
onnx.save_model(onnx_model, temp_model_file)
try:
import onnxruntime
sess = onnxruntime.InferenceSession(temp_model_file)
except ImportError:
keras2onnx.common.k2o_logger().warning("Cannot import ONNXRuntime!")
return True
if isinstance(data, dict):
feed_input = data
else:
data = data if isinstance(data, list) else [data]
input_names = sess.get_inputs()
# to avoid too complicated test code, we restrict the input name in Keras test cases must be
# in alphabetical order. It's always true unless there is any trick preventing that.
feed = zip(sorted(i_.name for i_ in input_names), data)
feed_input = dict(feed)
actual = sess.run(None, feed_input)
if expected is None:
return
if isinstance(expected, tuple):
expected = list(expected)
elif not isinstance(expected, list):
expected = [expected]
res = all(np.allclose(expected[n_], actual[n_], rtol=rtol, atol=atol) for n_ in range(len(expected)))
if res and temp_model_file not in model_files: # still keep the failed case files for the diagnosis.
model_files.append(temp_model_file)
if not res:
for n_ in range(len(expected)):
expected_list = expected[n_].flatten()
actual_list = actual[n_].flatten()
print_mismatches(case_name, n_, expected_list, actual_list, rtol, atol)
return res
def run_image(model, model_files, img_path, model_name='onnx_conversion', rtol=1.e-3, atol=1.e-5, color_mode="rgb",
target_size=224, tf_v2=False):
if tf_v2:
preprocess_input = keras.applications.imagenet_utils.preprocess_input
else:
preprocess_input = keras.applications.resnet50.preprocess_input
image = keras.preprocessing.image
try:
if not isinstance(target_size, tuple):
target_size = (target_size, target_size)
if is_keras_older_than("2.2.3"):
# color_mode is not supported in old keras version
img = image.load_img(img_path, target_size=target_size)
else:
img = image.load_img(img_path, color_mode=color_mode, target_size=target_size)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
if color_mode == "rgb":
x = preprocess_input(x)
except FileNotFoundError:
return False, 'The image data does not exist.'
msg = ''
preds = None
try:
preds = model.predict(x)
except RuntimeError:
msg = 'keras prediction throws an exception for model ' + model.name + ', skip comparison.'
onnx_model = keras2onnx.convert_keras(model, model.name)
res = run_onnx_runtime(model_name, onnx_model, x, preds, model_files, rtol=rtol, atol=atol)
return res, msg
| en | 0.470766 | ############################################################################### # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. ############################################################################### # convert keras_contrib.layers.InstanceNormalization # print the first 10 mismatches # to avoid too complicated test code, we restrict the input name in Keras test cases must be # in alphabetical order. It's always true unless there is any trick preventing that. # still keep the failed case files for the diagnosis. # color_mode is not supported in old keras version | 2.234944 | 2 |
h2o-py/tests/testdir_algos/glrm/pyunit_arrests_missing_glrm.py | ahmedengu/h2o-3 | 6,098 | 6632903 | <reponame>ahmedengu/h2o-3
from __future__ import print_function
from builtins import str
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glrm import H2OGeneralizedLowRankEstimator
import numpy as np
def glrm_arrests_miss():
missing_ratios = np.arange(0.1, 1, 0.1).tolist()
print("Importing USArrests.csv data and saving for validation...")
arrests_full = h2o.upload_file(pyunit_utils.locate("smalldata/pca_test/USArrests.csv"))
arrests_full.describe()
totobs = arrests_full.nrow * arrests_full.ncol
train_err = [0]*len(missing_ratios)
valid_err = [0]*len(missing_ratios)
for i in range(len(missing_ratios)):
ratio = missing_ratios[i]
print("Importing USArrests.csv and inserting {0}% missing entries".format(100*ratio))
arrests_miss = h2o.upload_file(pyunit_utils.locate("smalldata/pca_test/USArrests.csv"))
arrests_miss = arrests_miss.insert_missing_values(fraction=ratio)
arrests_miss.describe()
print("H2O GLRM with {0}% missing entries".format(100*ratio))
arrests_glrm = H2OGeneralizedLowRankEstimator(k=4,
ignore_const_cols=False,
loss="Quadratic",
regularization_x="None",
regularization_y="None",
init="PlusPlus",
max_iterations=10,
min_step_size=1e-6)
arrests_glrm.train(x=arrests_miss.names,
training_frame=arrests_miss,
validation_frame=arrests_full)
arrests_glrm.show()
# Check imputed data and error metrics
glrm_obj = arrests_glrm._model_json['output']['objective']
train_numerr = arrests_glrm._model_json['output']['training_metrics']._metric_json['numerr']
train_caterr = arrests_glrm._model_json['output']['training_metrics']._metric_json['caterr']
valid_numerr = arrests_glrm._model_json['output']['validation_metrics']._metric_json['numerr']
valid_caterr = arrests_glrm._model_json['output']['validation_metrics']._metric_json['caterr']
assert abs(train_numerr - glrm_obj) < 1e-3, "Numeric error on training data was " + str(train_numerr) + " but should equal final objective " + str(glrm_obj)
assert train_caterr == 0, "Categorical error on training data was " + str(train_caterr) + " but should be zero"
assert valid_caterr == 0, "Categorical error on validation data was " + str(valid_caterr) + " but should be zero"
train_numcnt = arrests_glrm._model_json['output']['training_metrics']._metric_json['numcnt']
valid_numcnt = arrests_glrm._model_json['output']['validation_metrics']._metric_json['numcnt']
assert valid_numcnt > train_numcnt, "Number of non-missing numerical entries in training data should be less than validation data"
assert valid_numcnt == totobs, "Number of non-missing numerical entries in validation data was " + str(valid_numcnt) + " but should be " + str(totobs)
train_err[i] = train_numerr
valid_err[i] = valid_numerr
# h2o.remove(arrests_glrm._model_json['output']['loading_key']['name'])
for i in range(len(missing_ratios)):
print("Missing ratio: {0}% --> Training error: {1}\tValidation error: {2}".format(missing_ratios[i]*100, train_err[i], valid_err[i]))
if __name__ == "__main__":
pyunit_utils.standalone_test(glrm_arrests_miss)
else:
glrm_arrests_miss()
| from __future__ import print_function
from builtins import str
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glrm import H2OGeneralizedLowRankEstimator
import numpy as np
def glrm_arrests_miss():
missing_ratios = np.arange(0.1, 1, 0.1).tolist()
print("Importing USArrests.csv data and saving for validation...")
arrests_full = h2o.upload_file(pyunit_utils.locate("smalldata/pca_test/USArrests.csv"))
arrests_full.describe()
totobs = arrests_full.nrow * arrests_full.ncol
train_err = [0]*len(missing_ratios)
valid_err = [0]*len(missing_ratios)
for i in range(len(missing_ratios)):
ratio = missing_ratios[i]
print("Importing USArrests.csv and inserting {0}% missing entries".format(100*ratio))
arrests_miss = h2o.upload_file(pyunit_utils.locate("smalldata/pca_test/USArrests.csv"))
arrests_miss = arrests_miss.insert_missing_values(fraction=ratio)
arrests_miss.describe()
print("H2O GLRM with {0}% missing entries".format(100*ratio))
arrests_glrm = H2OGeneralizedLowRankEstimator(k=4,
ignore_const_cols=False,
loss="Quadratic",
regularization_x="None",
regularization_y="None",
init="PlusPlus",
max_iterations=10,
min_step_size=1e-6)
arrests_glrm.train(x=arrests_miss.names,
training_frame=arrests_miss,
validation_frame=arrests_full)
arrests_glrm.show()
# Check imputed data and error metrics
glrm_obj = arrests_glrm._model_json['output']['objective']
train_numerr = arrests_glrm._model_json['output']['training_metrics']._metric_json['numerr']
train_caterr = arrests_glrm._model_json['output']['training_metrics']._metric_json['caterr']
valid_numerr = arrests_glrm._model_json['output']['validation_metrics']._metric_json['numerr']
valid_caterr = arrests_glrm._model_json['output']['validation_metrics']._metric_json['caterr']
assert abs(train_numerr - glrm_obj) < 1e-3, "Numeric error on training data was " + str(train_numerr) + " but should equal final objective " + str(glrm_obj)
assert train_caterr == 0, "Categorical error on training data was " + str(train_caterr) + " but should be zero"
assert valid_caterr == 0, "Categorical error on validation data was " + str(valid_caterr) + " but should be zero"
train_numcnt = arrests_glrm._model_json['output']['training_metrics']._metric_json['numcnt']
valid_numcnt = arrests_glrm._model_json['output']['validation_metrics']._metric_json['numcnt']
assert valid_numcnt > train_numcnt, "Number of non-missing numerical entries in training data should be less than validation data"
assert valid_numcnt == totobs, "Number of non-missing numerical entries in validation data was " + str(valid_numcnt) + " but should be " + str(totobs)
train_err[i] = train_numerr
valid_err[i] = valid_numerr
# h2o.remove(arrests_glrm._model_json['output']['loading_key']['name'])
for i in range(len(missing_ratios)):
print("Missing ratio: {0}% --> Training error: {1}\tValidation error: {2}".format(missing_ratios[i]*100, train_err[i], valid_err[i]))
if __name__ == "__main__":
pyunit_utils.standalone_test(glrm_arrests_miss)
else:
glrm_arrests_miss() | en | 0.332776 | # Check imputed data and error metrics # h2o.remove(arrests_glrm._model_json['output']['loading_key']['name']) | 2.393313 | 2 |
Advent of Code 2019/aoc_day8_part1.py | markbrown314/toy_problems | 0 | 6632904 | <reponame>markbrown314/toy_problems<filename>Advent of Code 2019/aoc_day8_part1.py<gh_stars>0
"""
🎅
--- Day 8: Space Image Format ---
The Elves' spirits are lifted when they realize you have an opportunity to reboot one of their Mars
rovers, and so they are curious if you would spend a brief sojourn on Mars. You land your ship near
the rover.
When you reach the rover, you discover that it's already in the process of rebooting! It's just
waiting for someone to enter a BIOS password. The Elf responsible for the rover takes a picture of
the password (your puzzle input) and sends it to you via the Digital Sending Network.
Unfortunately, images sent via the Digital Sending Network aren't encoded with any normal encoding;
instead, they're encoded in a special Space Image Format. None of the Elves seem to remember why
this is the case. They send you the instructions to decode it.
Images are sent as a series of digits that each represent the color of a single pixel. The digits
fill each row of the image left-to-right, then move downward to the next row, filling rows
top-to-bottom until every pixel of the image is filled.
Each image actually consists of a series of identically-sized layers that are filled in this way.
So, the first digit corresponds to the top-left pixel of the first layer, the second digit
corresponds to the pixel to the right of that on the same layer, and so on until the last digit,
which corresponds to the bottom-right pixel of the last layer.
For example, given an image 3 pixels wide and 2 pixels tall, the image data 123456789012 corresponds
to the following image layers:
Layer 1: 123
456
Layer 2: 789
012
The image you received is 25 pixels wide and 6 pixels tall.
To make sure the image wasn't corrupted during transmission, the Elves would like you to find the
layer that contains the fewest 0 digits. On that layer, what is the number of 1 digits multiplied
by the number of 2 digits?
"""
import re
puzzle_input = ""
with open("aoc_day8_input_data.txt") as file_input:
for line in file_input:
puzzle_input += line
layer_array = []
dimensions = (25,6)
layer = []
for pixel in puzzle_input:
if re.match('[0-9]', pixel) is None:
continue
layer.append(int(pixel))
if len(layer) == dimensions[0] * dimensions[1]:
layer_array.append(layer)
layer = []
def count_pixel_by_value(layer, value):
count = 0
for pixel in layer:
if pixel == value:
count += 1
return count
min_zero_count = (dimensions[0] * dimensions[1], 0) # count and layer
for i, layer in enumerate(layer_array):
count = count_pixel_by_value(layer, 0)
if count < min_zero_count[0]:
min_zero_count = (count, i)
i = min_zero_count[1]
ones = count_pixel_by_value(layer_array[i], 1)
twos = count_pixel_by_value(layer_array[i], 2)
print(ones * twos) | of Code 2019/aoc_day8_part1.py<gh_stars>0
"""
🎅
--- Day 8: Space Image Format ---
The Elves' spirits are lifted when they realize you have an opportunity to reboot one of their Mars
rovers, and so they are curious if you would spend a brief sojourn on Mars. You land your ship near
the rover.
When you reach the rover, you discover that it's already in the process of rebooting! It's just
waiting for someone to enter a BIOS password. The Elf responsible for the rover takes a picture of
the password (your puzzle input) and sends it to you via the Digital Sending Network.
Unfortunately, images sent via the Digital Sending Network aren't encoded with any normal encoding;
instead, they're encoded in a special Space Image Format. None of the Elves seem to remember why
this is the case. They send you the instructions to decode it.
Images are sent as a series of digits that each represent the color of a single pixel. The digits
fill each row of the image left-to-right, then move downward to the next row, filling rows
top-to-bottom until every pixel of the image is filled.
Each image actually consists of a series of identically-sized layers that are filled in this way.
So, the first digit corresponds to the top-left pixel of the first layer, the second digit
corresponds to the pixel to the right of that on the same layer, and so on until the last digit,
which corresponds to the bottom-right pixel of the last layer.
For example, given an image 3 pixels wide and 2 pixels tall, the image data 123456789012 corresponds
to the following image layers:
Layer 1: 123
456
Layer 2: 789
012
The image you received is 25 pixels wide and 6 pixels tall.
To make sure the image wasn't corrupted during transmission, the Elves would like you to find the
layer that contains the fewest 0 digits. On that layer, what is the number of 1 digits multiplied
by the number of 2 digits?
"""
import re
puzzle_input = ""
with open("aoc_day8_input_data.txt") as file_input:
for line in file_input:
puzzle_input += line
layer_array = []
dimensions = (25,6)
layer = []
for pixel in puzzle_input:
if re.match('[0-9]', pixel) is None:
continue
layer.append(int(pixel))
if len(layer) == dimensions[0] * dimensions[1]:
layer_array.append(layer)
layer = []
def count_pixel_by_value(layer, value):
count = 0
for pixel in layer:
if pixel == value:
count += 1
return count
min_zero_count = (dimensions[0] * dimensions[1], 0) # count and layer
for i, layer in enumerate(layer_array):
count = count_pixel_by_value(layer, 0)
if count < min_zero_count[0]:
min_zero_count = (count, i)
i = min_zero_count[1]
ones = count_pixel_by_value(layer_array[i], 1)
twos = count_pixel_by_value(layer_array[i], 2)
print(ones * twos) | en | 0.931754 | 🎅 --- Day 8: Space Image Format --- The Elves' spirits are lifted when they realize you have an opportunity to reboot one of their Mars rovers, and so they are curious if you would spend a brief sojourn on Mars. You land your ship near the rover. When you reach the rover, you discover that it's already in the process of rebooting! It's just waiting for someone to enter a BIOS password. The Elf responsible for the rover takes a picture of the password (your puzzle input) and sends it to you via the Digital Sending Network. Unfortunately, images sent via the Digital Sending Network aren't encoded with any normal encoding; instead, they're encoded in a special Space Image Format. None of the Elves seem to remember why this is the case. They send you the instructions to decode it. Images are sent as a series of digits that each represent the color of a single pixel. The digits fill each row of the image left-to-right, then move downward to the next row, filling rows top-to-bottom until every pixel of the image is filled. Each image actually consists of a series of identically-sized layers that are filled in this way. So, the first digit corresponds to the top-left pixel of the first layer, the second digit corresponds to the pixel to the right of that on the same layer, and so on until the last digit, which corresponds to the bottom-right pixel of the last layer. For example, given an image 3 pixels wide and 2 pixels tall, the image data 123456789012 corresponds to the following image layers: Layer 1: 123 456 Layer 2: 789 012 The image you received is 25 pixels wide and 6 pixels tall. To make sure the image wasn't corrupted during transmission, the Elves would like you to find the layer that contains the fewest 0 digits. On that layer, what is the number of 1 digits multiplied by the number of 2 digits? # count and layer | 3.982855 | 4 |
cubes/server/browser.py | devvmh/cubes | 2 | 6632905 | # -*- coding=utf -*-
import json
import logging
from ..logging import get_logger
from ..browser import *
class SlicerBrowser(AggregationBrowser):
"""Aggregation browser for Cubes Slicer OLAP server."""
def __init__(self, cube, store, locale=None, **options):
"""Browser for another Slicer server.
"""
super(SlicerBrowser, self).__init__(cube, store, locale)
self.logger = get_logger()
self.cube = cube
self.locale = locale
self.store = store
def features(self):
# Get the original features as provided by the Slicer server.
# They are stored in browser_options in the Slicer model provider's
# cube().
features = dict(self.cube.browser_options.get("features", {}))
# Replace only the actions, as we are not just a simple proxy.
features["actions"] = ["aggregate", "facts", "fact", "cell", "members"]
return features
def provide_aggregate(self, cell, aggregates, drilldown, split, order,
page, page_size, **options):
params = {}
if cell:
params["cut"] = string_from_cuts(cell.cuts)
if drilldown:
params["drilldown"] = ",".join(drilldown.items_as_strings())
if split:
params["split"] = str(split)
if aggregates:
names = [a.name for a in aggregates]
params["aggregates"] = ",".join(names)
if order:
params["order"] = self._order_param(order)
if page is not None:
params["page"] = str(page)
if page_size is not None:
params["page_size"] = str(page_size)
response = self.store.cube_request("aggregate",
self.cube.basename, params)
result = AggregationResult()
result.cells = response.get('cells', [])
if "summary" in response:
result.summary = response.get('summary')
result.levels = response.get('levels', {})
result.labels = response.get('labels', [])
result.cell = cell
result.aggregates = response.get('aggregates', [])
return result
def facts(self, cell=None, fields=None, order=None, page=None,
page_size=None):
cell = cell or Cell(self.cube)
if fields:
attributes = self.cube.get_attributes(fields)
else:
attributes = []
order = self.prepare_order(order, is_aggregate=False)
params = {}
if cell:
params["cut"] = string_from_cuts(cell.cuts)
if order:
params["order"] = self._order_param(order)
if page is not None:
params["page"] = str(page)
if page_size is not None:
params["page_size"] = str(page_size)
if attributes:
params["fields"] = ",".join(str(attr) for attr in attributes)
params["format"] = "json_lines"
response = self.store.cube_request("facts", self.cube.basename, params,
is_lines=True)
return Facts(response, attributes)
def provide_members(self, cell=None, dimension=None, levels=None,
hierarchy=None, attributes=None, page=None,
page_size=None, order=None, **options):
params = {}
if cell:
params["cut"] = string_from_cuts(cell.cuts)
if order:
params["order"] = self._order_param(order)
if levels:
params["level"] = str(levels[-1])
if hierarchy:
params["hierarchy"] = str(hierarchy)
if page is not None:
params["page"] = str(page)
if page_size is not None:
params["page_size"] = str(page_size)
if attributes:
params["fields"] = ",".join(str(attr) for attr in attributes)
params["format"] = "json_lines"
action = "/cube/%s/members/%s" % (self.cube.basename, str(dimension))
response = self.store.request(action, params, is_lines=True)
return response
def cell_details(self, cell, dimension=None):
cell = cell or Cell(self.cube)
params = {}
if cell:
params["cut"] = string_from_cuts(cell.cuts)
if dimension:
params["dimension"] = str(dimension)
response = self.store.cube_request("cell", self.cube.basename, params)
return response
def fact(self, fact_id):
action = "/cube/%s/fact/%s" % (self.cube.basename, str(fact_id))
response = self.store.request(action)
return response
def is_builtin_function(self, name, aggregate):
return True
def _order_param(self, order):
"""Prepare an order string in form: ``attribute:direction``"""
string = ",".join("%s:%s" % (o[0], o[1]) for o in order)
return string
| # -*- coding=utf -*-
import json
import logging
from ..logging import get_logger
from ..browser import *
class SlicerBrowser(AggregationBrowser):
"""Aggregation browser for Cubes Slicer OLAP server."""
def __init__(self, cube, store, locale=None, **options):
"""Browser for another Slicer server.
"""
super(SlicerBrowser, self).__init__(cube, store, locale)
self.logger = get_logger()
self.cube = cube
self.locale = locale
self.store = store
def features(self):
# Get the original features as provided by the Slicer server.
# They are stored in browser_options in the Slicer model provider's
# cube().
features = dict(self.cube.browser_options.get("features", {}))
# Replace only the actions, as we are not just a simple proxy.
features["actions"] = ["aggregate", "facts", "fact", "cell", "members"]
return features
def provide_aggregate(self, cell, aggregates, drilldown, split, order,
page, page_size, **options):
params = {}
if cell:
params["cut"] = string_from_cuts(cell.cuts)
if drilldown:
params["drilldown"] = ",".join(drilldown.items_as_strings())
if split:
params["split"] = str(split)
if aggregates:
names = [a.name for a in aggregates]
params["aggregates"] = ",".join(names)
if order:
params["order"] = self._order_param(order)
if page is not None:
params["page"] = str(page)
if page_size is not None:
params["page_size"] = str(page_size)
response = self.store.cube_request("aggregate",
self.cube.basename, params)
result = AggregationResult()
result.cells = response.get('cells', [])
if "summary" in response:
result.summary = response.get('summary')
result.levels = response.get('levels', {})
result.labels = response.get('labels', [])
result.cell = cell
result.aggregates = response.get('aggregates', [])
return result
def facts(self, cell=None, fields=None, order=None, page=None,
page_size=None):
cell = cell or Cell(self.cube)
if fields:
attributes = self.cube.get_attributes(fields)
else:
attributes = []
order = self.prepare_order(order, is_aggregate=False)
params = {}
if cell:
params["cut"] = string_from_cuts(cell.cuts)
if order:
params["order"] = self._order_param(order)
if page is not None:
params["page"] = str(page)
if page_size is not None:
params["page_size"] = str(page_size)
if attributes:
params["fields"] = ",".join(str(attr) for attr in attributes)
params["format"] = "json_lines"
response = self.store.cube_request("facts", self.cube.basename, params,
is_lines=True)
return Facts(response, attributes)
def provide_members(self, cell=None, dimension=None, levels=None,
hierarchy=None, attributes=None, page=None,
page_size=None, order=None, **options):
params = {}
if cell:
params["cut"] = string_from_cuts(cell.cuts)
if order:
params["order"] = self._order_param(order)
if levels:
params["level"] = str(levels[-1])
if hierarchy:
params["hierarchy"] = str(hierarchy)
if page is not None:
params["page"] = str(page)
if page_size is not None:
params["page_size"] = str(page_size)
if attributes:
params["fields"] = ",".join(str(attr) for attr in attributes)
params["format"] = "json_lines"
action = "/cube/%s/members/%s" % (self.cube.basename, str(dimension))
response = self.store.request(action, params, is_lines=True)
return response
def cell_details(self, cell, dimension=None):
cell = cell or Cell(self.cube)
params = {}
if cell:
params["cut"] = string_from_cuts(cell.cuts)
if dimension:
params["dimension"] = str(dimension)
response = self.store.cube_request("cell", self.cube.basename, params)
return response
def fact(self, fact_id):
action = "/cube/%s/fact/%s" % (self.cube.basename, str(fact_id))
response = self.store.request(action)
return response
def is_builtin_function(self, name, aggregate):
return True
def _order_param(self, order):
"""Prepare an order string in form: ``attribute:direction``"""
string = ",".join("%s:%s" % (o[0], o[1]) for o in order)
return string
| en | 0.849854 | # -*- coding=utf -*- Aggregation browser for Cubes Slicer OLAP server. Browser for another Slicer server. # Get the original features as provided by the Slicer server. # They are stored in browser_options in the Slicer model provider's # cube(). # Replace only the actions, as we are not just a simple proxy. Prepare an order string in form: ``attribute:direction`` | 2.446311 | 2 |
diofant/functions/special/beta_functions.py | rajkk1/diofant | 57 | 6632906 | from ...core.function import ArgumentIndexError, Function
from .gamma_functions import digamma, gamma
###############################################################################
# ########################## COMPLETE BETA FUNCTION ######################## #
###############################################################################
class beta(Function):
r"""
The beta integral is called the Eulerian integral of the first kind by
Legendre:
.. math::
\mathrm{B}(x,y) := \int^{1}_{0} t^{x-1} (1-t)^{y-1} \mathrm{d}t.
Beta function or Euler's first integral is closely associated with gamma function.
The Beta function often used in probability theory and mathematical statistics.
It satisfies properties like:
.. math::
\mathrm{B}(a,1) = \frac{1}{a} \\
\mathrm{B}(a,b) = \mathrm{B}(b,a) \\
\mathrm{B}(a,b) = \frac{\Gamma(a) \Gamma(b)}{\Gamma(a+b)}
Therefore for integral values of a and b:
.. math::
\mathrm{B} = \frac{(a-1)! (b-1)!}{(a+b-1)!}
Examples
========
The Beta function obeys the mirror symmetry:
>>> conjugate(beta(x, y))
beta(conjugate(x), conjugate(y))
Differentiation with respect to both x and y is supported:
>>> diff(beta(x, y), x)
(polygamma(0, x) - polygamma(0, x + y))*beta(x, y)
>>> diff(beta(x, y), y)
(polygamma(0, y) - polygamma(0, x + y))*beta(x, y)
We can numerically evaluate the gamma function to arbitrary precision
on the whole complex plane:
>>> beta(pi, pi).evalf(40)
0.02671848900111377452242355235388489324562
>>> beta(1 + I, 1 + I).evalf(20)
-0.2112723729365330143 - 0.7655283165378005676*I
See Also
========
diofant.functions.special.gamma_functions.gamma: Gamma function.
diofant.functions.special.gamma_functions.uppergamma: Upper incomplete gamma function.
diofant.functions.special.gamma_functions.lowergamma: Lower incomplete gamma function.
diofant.functions.special.gamma_functions.polygamma: Polygamma function.
diofant.functions.special.gamma_functions.loggamma: Log Gamma function.
diofant.functions.special.gamma_functions.digamma: Digamma function.
diofant.functions.special.gamma_functions.trigamma: Trigamma function.
References
==========
* https://en.wikipedia.org/wiki/Beta_function
* https://mathworld.wolfram.com/BetaFunction.html
* https://dlmf.nist.gov/5.12
"""
unbranched = True
def fdiff(self, argindex):
x, y = self.args
if argindex == 1:
return beta(x, y)*(digamma(x) - digamma(x + y))
elif argindex == 2:
return beta(x, y)*(digamma(y) - digamma(x + y))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, x, y):
pass
def _eval_expand_func(self, **hints):
x, y = self.args
return gamma(x)*gamma(y) / gamma(x + y)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate(), self.args[1].conjugate())
| from ...core.function import ArgumentIndexError, Function
from .gamma_functions import digamma, gamma
###############################################################################
# ########################## COMPLETE BETA FUNCTION ######################## #
###############################################################################
class beta(Function):
r"""
The beta integral is called the Eulerian integral of the first kind by
Legendre:
.. math::
\mathrm{B}(x,y) := \int^{1}_{0} t^{x-1} (1-t)^{y-1} \mathrm{d}t.
Beta function or Euler's first integral is closely associated with gamma function.
The Beta function often used in probability theory and mathematical statistics.
It satisfies properties like:
.. math::
\mathrm{B}(a,1) = \frac{1}{a} \\
\mathrm{B}(a,b) = \mathrm{B}(b,a) \\
\mathrm{B}(a,b) = \frac{\Gamma(a) \Gamma(b)}{\Gamma(a+b)}
Therefore for integral values of a and b:
.. math::
\mathrm{B} = \frac{(a-1)! (b-1)!}{(a+b-1)!}
Examples
========
The Beta function obeys the mirror symmetry:
>>> conjugate(beta(x, y))
beta(conjugate(x), conjugate(y))
Differentiation with respect to both x and y is supported:
>>> diff(beta(x, y), x)
(polygamma(0, x) - polygamma(0, x + y))*beta(x, y)
>>> diff(beta(x, y), y)
(polygamma(0, y) - polygamma(0, x + y))*beta(x, y)
We can numerically evaluate the gamma function to arbitrary precision
on the whole complex plane:
>>> beta(pi, pi).evalf(40)
0.02671848900111377452242355235388489324562
>>> beta(1 + I, 1 + I).evalf(20)
-0.2112723729365330143 - 0.7655283165378005676*I
See Also
========
diofant.functions.special.gamma_functions.gamma: Gamma function.
diofant.functions.special.gamma_functions.uppergamma: Upper incomplete gamma function.
diofant.functions.special.gamma_functions.lowergamma: Lower incomplete gamma function.
diofant.functions.special.gamma_functions.polygamma: Polygamma function.
diofant.functions.special.gamma_functions.loggamma: Log Gamma function.
diofant.functions.special.gamma_functions.digamma: Digamma function.
diofant.functions.special.gamma_functions.trigamma: Trigamma function.
References
==========
* https://en.wikipedia.org/wiki/Beta_function
* https://mathworld.wolfram.com/BetaFunction.html
* https://dlmf.nist.gov/5.12
"""
unbranched = True
def fdiff(self, argindex):
x, y = self.args
if argindex == 1:
return beta(x, y)*(digamma(x) - digamma(x + y))
elif argindex == 2:
return beta(x, y)*(digamma(y) - digamma(x + y))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, x, y):
pass
def _eval_expand_func(self, **hints):
x, y = self.args
return gamma(x)*gamma(y) / gamma(x + y)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate(), self.args[1].conjugate())
| en | 0.387844 | ############################################################################### # ########################## COMPLETE BETA FUNCTION ######################## # ############################################################################### The beta integral is called the Eulerian integral of the first kind by Legendre: .. math:: \mathrm{B}(x,y) := \int^{1}_{0} t^{x-1} (1-t)^{y-1} \mathrm{d}t. Beta function or Euler's first integral is closely associated with gamma function. The Beta function often used in probability theory and mathematical statistics. It satisfies properties like: .. math:: \mathrm{B}(a,1) = \frac{1}{a} \\ \mathrm{B}(a,b) = \mathrm{B}(b,a) \\ \mathrm{B}(a,b) = \frac{\Gamma(a) \Gamma(b)}{\Gamma(a+b)} Therefore for integral values of a and b: .. math:: \mathrm{B} = \frac{(a-1)! (b-1)!}{(a+b-1)!} Examples ======== The Beta function obeys the mirror symmetry: >>> conjugate(beta(x, y)) beta(conjugate(x), conjugate(y)) Differentiation with respect to both x and y is supported: >>> diff(beta(x, y), x) (polygamma(0, x) - polygamma(0, x + y))*beta(x, y) >>> diff(beta(x, y), y) (polygamma(0, y) - polygamma(0, x + y))*beta(x, y) We can numerically evaluate the gamma function to arbitrary precision on the whole complex plane: >>> beta(pi, pi).evalf(40) 0.02671848900111377452242355235388489324562 >>> beta(1 + I, 1 + I).evalf(20) -0.2112723729365330143 - 0.7655283165378005676*I See Also ======== diofant.functions.special.gamma_functions.gamma: Gamma function. diofant.functions.special.gamma_functions.uppergamma: Upper incomplete gamma function. diofant.functions.special.gamma_functions.lowergamma: Lower incomplete gamma function. diofant.functions.special.gamma_functions.polygamma: Polygamma function. diofant.functions.special.gamma_functions.loggamma: Log Gamma function. diofant.functions.special.gamma_functions.digamma: Digamma function. diofant.functions.special.gamma_functions.trigamma: Trigamma function. References ========== * https://en.wikipedia.org/wiki/Beta_function * https://mathworld.wolfram.com/BetaFunction.html * https://dlmf.nist.gov/5.12 | 2.90986 | 3 |
src/models/caec_uniform.py | panpiort8/CatAutoEncoderCompressor | 0 | 6632907 | from functools import reduce
import bitarray
import torch
import torch.nn
from .caec_base import CAECBase
class CAECUniform(CAECBase):
def __init__(self, cfg):
super().__init__(cfg)
self.d = cfg.d
self.latent_dim = reduce(lambda x, y: x * y, self.latent)
self.mult = torch.tensor(2 ** self.d, requires_grad=False, device=cfg.device)
self.arrange = torch.arange(0, 2 ** self.d, device=cfg.device)
def quantize_soft(self, encoded):
q = 0.5 * (encoded + 1) # q in [0, 1]
q = q * self.mult
q = q.view(q.shape[0], -1).unsqueeze(-1)
exp = torch.exp(-torch.abs(q - self.arrange))
sum_u = torch.sum(exp * self.arrange, dim=2)
sum_d = torch.sum(exp, dim=2)
q = sum_u / sum_d
q = q.view(*encoded.shape)
return q
@torch.no_grad()
def quantize_hard(self, encoded):
q = 0.5 * (encoded + 1) # q in [0, 1]
q = torch.floor(q * self.mult).long()
q[q == 16] = 15
return q
def predecode(self, quantized):
quantized = quantized / self.mult # x in [0, 1]
quantized = quantized * 2.0 - 1 # x in [-1, 1]
return quantized
def to_binary(self, quantized):
bits = bitarray.bitarray()
for i in quantized:
bits.extend(format(i, f'0{self.d}b'))
return bits.tobytes()
def from_binary(self, binary):
bits = bitarray.bitarray()
bits.frombytes(binary)
size = 60 * self.latent_dim
output = torch.zeros((size), dtype=torch.float)
for i in range(size):
idx = i * self.d
output[i] = int(bits[idx: idx + self.d].to01(), 2)
return output
| from functools import reduce
import bitarray
import torch
import torch.nn
from .caec_base import CAECBase
class CAECUniform(CAECBase):
def __init__(self, cfg):
super().__init__(cfg)
self.d = cfg.d
self.latent_dim = reduce(lambda x, y: x * y, self.latent)
self.mult = torch.tensor(2 ** self.d, requires_grad=False, device=cfg.device)
self.arrange = torch.arange(0, 2 ** self.d, device=cfg.device)
def quantize_soft(self, encoded):
q = 0.5 * (encoded + 1) # q in [0, 1]
q = q * self.mult
q = q.view(q.shape[0], -1).unsqueeze(-1)
exp = torch.exp(-torch.abs(q - self.arrange))
sum_u = torch.sum(exp * self.arrange, dim=2)
sum_d = torch.sum(exp, dim=2)
q = sum_u / sum_d
q = q.view(*encoded.shape)
return q
@torch.no_grad()
def quantize_hard(self, encoded):
q = 0.5 * (encoded + 1) # q in [0, 1]
q = torch.floor(q * self.mult).long()
q[q == 16] = 15
return q
def predecode(self, quantized):
quantized = quantized / self.mult # x in [0, 1]
quantized = quantized * 2.0 - 1 # x in [-1, 1]
return quantized
def to_binary(self, quantized):
bits = bitarray.bitarray()
for i in quantized:
bits.extend(format(i, f'0{self.d}b'))
return bits.tobytes()
def from_binary(self, binary):
bits = bitarray.bitarray()
bits.frombytes(binary)
size = 60 * self.latent_dim
output = torch.zeros((size), dtype=torch.float)
for i in range(size):
idx = i * self.d
output[i] = int(bits[idx: idx + self.d].to01(), 2)
return output
| en | 0.748876 | # q in [0, 1] # q in [0, 1] # x in [0, 1] # x in [-1, 1] | 2.133437 | 2 |
utils/dp.py | jakiki6/justastupidgame | 2 | 6632908 | import pygame
from __main__ import world
def get_pos_x():
return (pygame.mouse.get_pos()[0] + world.dx) // 32
def get_pos_y():
return (pygame.mouse.get_pos()[1] + world.dy) // 32
def get_pos_xy():
return (get_pos_x(), get_pos_y())
def get_dp_x():
return world.dx % 32
def get_dp_y():
return world.dy % 32
| import pygame
from __main__ import world
def get_pos_x():
return (pygame.mouse.get_pos()[0] + world.dx) // 32
def get_pos_y():
return (pygame.mouse.get_pos()[1] + world.dy) // 32
def get_pos_xy():
return (get_pos_x(), get_pos_y())
def get_dp_x():
return world.dx % 32
def get_dp_y():
return world.dy % 32
| none | 1 | 2.889785 | 3 |
|
thoughts.py | apstsh/thoughtsweb | 1 | 6632909 | <gh_stars>1-10
from flask import Flask
from flask import render_template, request
from dbconnect import fetch_thoughts, submit_thoughts_db
thoughtsapp = Flask(__name__)
@thoughtsapp.route('/thought_for_the_day1')
def display_thoughts1():
result = fetch_thoughts()
result1 = result[0][0].decode()
print(result1)
return render_template('index.html', thought_result=result1)
@thoughtsapp.route('/', methods=['POST'])
def submit_thoughts():
thought = request.form['user_thoughts']
submit_thoughts_db(thought)
return "Thought entered successfully!!"
| from flask import Flask
from flask import render_template, request
from dbconnect import fetch_thoughts, submit_thoughts_db
thoughtsapp = Flask(__name__)
@thoughtsapp.route('/thought_for_the_day1')
def display_thoughts1():
result = fetch_thoughts()
result1 = result[0][0].decode()
print(result1)
return render_template('index.html', thought_result=result1)
@thoughtsapp.route('/', methods=['POST'])
def submit_thoughts():
thought = request.form['user_thoughts']
submit_thoughts_db(thought)
return "Thought entered successfully!!" | none | 1 | 2.652255 | 3 |
|
assignments/assignments_turnitin_msonline_list.py | dgrobani/py3_canvaslmi_api | 18 | 6632910 | <reponame>dgrobani/py3_canvaslmi_api
# https://canvas.instructure.com/doc/api/assignments.html
from datetime import datetime
from canvas.core.courses import get_courses, get_courses_whitelisted, get_course_people, get_courses_by_account_id
from canvas.core.io import write_xlsx_file, tada
from canvas.core.assignments import get_assignments
def assignments_turnitin_msonline_list():
terms = ['2017-1SP']
programs = ['NFNPO', 'NCMO']
synergis = True
course_whitelist = get_courses_whitelisted([])
header = ['term', 'program', 'SIS ID', 'course name', 'assignment name', 'assignment URL', 'due date', 'points',
'group assignment', 'faculty of record']
rows = []
for course in course_whitelist or get_courses(terms, programs, synergis):
course_id = course['id']
if not get_course_people(course_id, 'student'):
continue
course_sis_id = course['sis_course_id']
program = course['course_sis_info']['program']
for assignment in get_assignments(course_id):
if 'external_tool' in assignment['submission_types']:
row = [terms[0],
program,
course_sis_id,
course['name'],
assignment['name'],
assignment['html_url'],
assignment['due_at'][0:10] if assignment['due_at'] else '',
assignment['points_possible'] if assignment['points_possible'] else '',
'X' if 'group_category_id' in assignment and assignment['group_category_id'] else '',
', '.join([p['name'] for p in get_course_people(course_id, 'Faculty of record')])]
rows.append(row)
print(row)
write_xlsx_file('turnitin_assignments_spring_{}'
.format(datetime.now().strftime('%Y.%m.%d.%H.%M.%S')), header, rows)
def assignments_turnitin_msonline_list_dev():
accounts = {'DEV FNPO': '168920', 'DEV CMO': '168922'}
header = ['program', 'course name', 'assignment name', 'assignment URL', 'points']
rows = []
for account in accounts:
for course in get_courses_by_account_id(accounts[account], 'DEFAULT'):
course_id = course['id']
for assignment in get_assignments(course_id):
if 'external_tool' in assignment['submission_types']:
row = [
account,
course['name'],
assignment['name'],
assignment['html_url'],
assignment['points_possible'] if assignment['points_possible'] else '']
rows.append(row)
print(row)
write_xlsx_file('turnitin_assignments_spring_dev_{}'
.format(datetime.now().strftime('%Y.%m.%d.%H.%M.%S')), header, rows)
if __name__ == '__main__':
# assignments_turnitin_msonline_list()
assignments_turnitin_msonline_list_dev()
tada()
| # https://canvas.instructure.com/doc/api/assignments.html
from datetime import datetime
from canvas.core.courses import get_courses, get_courses_whitelisted, get_course_people, get_courses_by_account_id
from canvas.core.io import write_xlsx_file, tada
from canvas.core.assignments import get_assignments
def assignments_turnitin_msonline_list():
terms = ['2017-1SP']
programs = ['NFNPO', 'NCMO']
synergis = True
course_whitelist = get_courses_whitelisted([])
header = ['term', 'program', 'SIS ID', 'course name', 'assignment name', 'assignment URL', 'due date', 'points',
'group assignment', 'faculty of record']
rows = []
for course in course_whitelist or get_courses(terms, programs, synergis):
course_id = course['id']
if not get_course_people(course_id, 'student'):
continue
course_sis_id = course['sis_course_id']
program = course['course_sis_info']['program']
for assignment in get_assignments(course_id):
if 'external_tool' in assignment['submission_types']:
row = [terms[0],
program,
course_sis_id,
course['name'],
assignment['name'],
assignment['html_url'],
assignment['due_at'][0:10] if assignment['due_at'] else '',
assignment['points_possible'] if assignment['points_possible'] else '',
'X' if 'group_category_id' in assignment and assignment['group_category_id'] else '',
', '.join([p['name'] for p in get_course_people(course_id, 'Faculty of record')])]
rows.append(row)
print(row)
write_xlsx_file('turnitin_assignments_spring_{}'
.format(datetime.now().strftime('%Y.%m.%d.%H.%M.%S')), header, rows)
def assignments_turnitin_msonline_list_dev():
accounts = {'DEV FNPO': '168920', 'DEV CMO': '168922'}
header = ['program', 'course name', 'assignment name', 'assignment URL', 'points']
rows = []
for account in accounts:
for course in get_courses_by_account_id(accounts[account], 'DEFAULT'):
course_id = course['id']
for assignment in get_assignments(course_id):
if 'external_tool' in assignment['submission_types']:
row = [
account,
course['name'],
assignment['name'],
assignment['html_url'],
assignment['points_possible'] if assignment['points_possible'] else '']
rows.append(row)
print(row)
write_xlsx_file('turnitin_assignments_spring_dev_{}'
.format(datetime.now().strftime('%Y.%m.%d.%H.%M.%S')), header, rows)
if __name__ == '__main__':
# assignments_turnitin_msonline_list()
assignments_turnitin_msonline_list_dev()
tada() | en | 0.505826 | # https://canvas.instructure.com/doc/api/assignments.html # assignments_turnitin_msonline_list() | 2.923779 | 3 |
src/parsers/lane_2000_general_parser.py | zgordo/openelections-data-or | 18 | 6632911 | <filename>src/parsers/lane_2000_general_parser.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import os
import sys
import pdb
import fileinput
import unicodecsv
import pprint
# Configure variables
county = 'Lane'
outfile = '20001107__or__general__lane__precinct.csv'
headers = ['county', 'precinct', 'office', 'district', 'party', 'candidate', 'votes']
party_prefixes = ['DEM', 'REP']
office_lookup = {
'UNITED STATES PRESIDENT AND VICE PRESIDENT': 'President',
'UNITED STATES SENATOR': 'U.S. Senate',
'REP IN CONGRESS': 'U.S. House',
'SECRETARY OF STATE': 'Secretary of State',
'STATE TREASURER': 'State Treasurer',
'ATTORNEY GENERAL': 'Attorney General',
'GOVERNOR': 'Governor',
'STATE REPRESENTATIVE': 'State House',
'STATE SENATOR': 'State Senate'
}
candidate_lookup = {
'UNITED STATES PRESIDENT AND VICE PRESIDENT':
['<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>'],
'REP IN CONGRESS 4TH DISTRICT':
['<NAME>',
'<NAME>',
'<NAME>'],
'SECRETARY OF STATE':
['<NAME>',
'<NAME>',
'<NAME>',
'<NAME>. (ED) POLE II'],
'STATE TREASURER':
['<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>'],
'ATTORNEY GENERAL':
['<NAME>',
'<NAME>',
'<NAME>'],
'STATE SENATOR DIST 2':
['<NAME>',
'<NAME>'],
'STATE SENATOR DIST 24':
['<NAME>',
'<NAME>'],
'STATE REPRESENTATIVE DIST 4':
['<NAME>',
'<NAME>'],
'STATE REPRESENTATIVE DIST 39':
['<NAME>',
'<NAME>'],
'STATE REPRESENTATIVE DIST 40':
['<NAME>',
'<NAME>. (<NAME>',
'<NAME>'],
'STATE REPRESENTATIVE DIST 41':
['<NAME>',
'<NAME>'],
'STATE REPRESENTATIVE DIST 42':
['<NAME>'],
'STATE REPRESENTATIVE DIST 43':
['<NAME>',
'<NAME>'],
'STATE REPRESENTATIVE DIST 44':
['<NAME>',
'<NAME>'],
'STATE REPRESENTATIVE DIST 47':
['<NAME>',
'<NAME>']
}
def main():
currentCanvass = ""
allCanvasses = []
divisionRE = re.compile("\f")
# read from stdin
for line in sys.stdin.readlines():
if divisionRE.match(line):
previousCanvass = OfficeCanvass(currentCanvass)
# pdb.set_trace()
if previousCanvass.title in candidate_lookup: # exclude lower offices
allCanvasses.append(previousCanvass)
# else:
# print "FAILED: %s" % previousCanvass
currentCanvass = ""
currentCanvass += line
# printCanvasses(allCanvasses)
writeCSV(allCanvasses)
def writeCSV(allCanvasses):
def listGet(inList, index, default):
try:
out = inList[index]
except IndexError:
out = default
return out
with open(outfile, 'wb') as csvfile:
w = unicodecsv.writer(csvfile, encoding='utf-8')
w.writerow(headers)
for canvass in allCanvasses:
for precinct, results in canvass.results.iteritems():
for index, result in enumerate(results):
normalisedOffice = office_lookup[canvass.office] # Normalise the office
candidate = canvass.candidates[index]
party = listGet(canvass.parties, index, "")
normalisedCandidate = normaliseName(candidate) # Normalise the candidate
normalisedPrecinct = precinct.replace("*", "")
row = [county, normalisedPrecinct, normalisedOffice, canvass.district,
party, normalisedCandidate, result]
print row
w.writerow(row)
def normaliseName(name):
name = name.title()
mistakes = {'Defazio': 'DeFazio',
'Mccorkle': 'McCorkle',
'<NAME>': '<NAME>'}
for mistake, correction in mistakes.iteritems():
if mistake in name:
name = name.replace(mistake, correction)
return name
def printCanvasses(allCanvasses):
pp = pprint.PrettyPrinter(indent=4)
for canvass in allCanvasses:
print canvass.parties
print canvass.office
print canvass.district
print canvass.candidates
pp.pprint(canvass.results)
print "====="
class OfficeCanvass(object):
def __init__(self, text):
self.lines = text.split('\r\n') # File uses Windows line endings
self.header = []
self.table = []
self.results = {}
self.district = ""
self.parties = []
self.candidates = []
self.endOfTableRE = re.compile("\*\*\*\*")
self.districtPrefixRE = re.compile(" (\d\d?)\w\w DISTRICT")
self.districtPostfixRE = re.compile(" DIST (\d\d?)")
self.precintRE = re.compile("\d\d? PRECINCTS")
self.partyRE = re.compile("\((\w\w\w)\)")
self.pageNumberRE = re.compile("Page Number\s+[\.\d]+")
self.parseTitle()
self.removeTurnoutColumns()
self.parseOfficeDistrict()
self.populateHeaderAndTable()
self.parseHeader()
self.populateCandidates()
self.parseResults()
def __repr__(self):
return "'"+self.title+"' -- '"+self.office+"'"
def removeTurnoutColumns(self):
for index, line in enumerate(self.lines):
if len(line) > 50:
self.lines[index] = line[:26]+line[43:]
def parseTitle(self):
self.title = self.lines[0].strip()
def parseOfficeDistrict(self):
self.office = self.title
m = self.districtPrefixRE.search(self.office) # try prefix
if not m:
m = self.districtPostfixRE.search(self.office) # if not, try postfix
if m:
self.district = m.group(1)
self.office = self.office.replace(m.group(0), "") # Remove district from office
def populateCandidates(self):
if self.title in candidate_lookup:
self.candidates = candidate_lookup[self.title]
def populateHeaderAndTable(self):
self.header = self.lines[2:26]
for line in self.lines[26:]:
if self.endOfTableRE.search(line):
break
if len(line.strip()):
self.table.append(line)
def parseHeader(self):
headerLines = self.header
# 1. Remove the page number
firstLine = headerLines[0]
m = self.pageNumberRE.search(firstLine)
if m:
firstLine = firstLine.replace(m.group(0), " "*len(m.group(0)))
headerLines[0] = firstLine
# 2. List candidate columns
cols = list(range(35, 80, 6)) # From col 35, every 6 columns contains a party
# 3. Make sure all strings are the same length
longestString = 100
for i, line in enumerate(headerLines):
headerLines[i] = headerLines[i].ljust(longestString)
# 4. Create 2D array of characters to easily zip()
bitmap = []
for line in headerLines:
bitmap.append(list(line))
# for aList in bitmap:
# print aList
# 5. Read vertical columns of characters as the names, removing unused spaces
# print('\n'.join('{}: {}'.format(*k) for k in enumerate(list(headerLines[0]))))
names = []
for col in cols:
name = "".join(zip(*bitmap)[col])
name = " ".join(name.split())
if name:
names.append(name)
self.parties = names
def parseResults(self):
for line in self.table:
columns = line.split()
candidateCount = len(self.candidates)
votes = columns[-candidateCount:]
del(columns[-candidateCount:])
precinct = " ".join(columns)
self.results[precinct] = votes
# Default function is main()
if __name__ == '__main__':
main() | <filename>src/parsers/lane_2000_general_parser.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import os
import sys
import pdb
import fileinput
import unicodecsv
import pprint
# Configure variables
county = 'Lane'
outfile = '20001107__or__general__lane__precinct.csv'
headers = ['county', 'precinct', 'office', 'district', 'party', 'candidate', 'votes']
party_prefixes = ['DEM', 'REP']
office_lookup = {
'UNITED STATES PRESIDENT AND VICE PRESIDENT': 'President',
'UNITED STATES SENATOR': 'U.S. Senate',
'REP IN CONGRESS': 'U.S. House',
'SECRETARY OF STATE': 'Secretary of State',
'STATE TREASURER': 'State Treasurer',
'ATTORNEY GENERAL': 'Attorney General',
'GOVERNOR': 'Governor',
'STATE REPRESENTATIVE': 'State House',
'STATE SENATOR': 'State Senate'
}
candidate_lookup = {
'UNITED STATES PRESIDENT AND VICE PRESIDENT':
['<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>'],
'REP IN CONGRESS 4TH DISTRICT':
['<NAME>',
'<NAME>',
'<NAME>'],
'SECRETARY OF STATE':
['<NAME>',
'<NAME>',
'<NAME>',
'<NAME>. (ED) POLE II'],
'STATE TREASURER':
['<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>'],
'ATTORNEY GENERAL':
['<NAME>',
'<NAME>',
'<NAME>'],
'STATE SENATOR DIST 2':
['<NAME>',
'<NAME>'],
'STATE SENATOR DIST 24':
['<NAME>',
'<NAME>'],
'STATE REPRESENTATIVE DIST 4':
['<NAME>',
'<NAME>'],
'STATE REPRESENTATIVE DIST 39':
['<NAME>',
'<NAME>'],
'STATE REPRESENTATIVE DIST 40':
['<NAME>',
'<NAME>. (<NAME>',
'<NAME>'],
'STATE REPRESENTATIVE DIST 41':
['<NAME>',
'<NAME>'],
'STATE REPRESENTATIVE DIST 42':
['<NAME>'],
'STATE REPRESENTATIVE DIST 43':
['<NAME>',
'<NAME>'],
'STATE REPRESENTATIVE DIST 44':
['<NAME>',
'<NAME>'],
'STATE REPRESENTATIVE DIST 47':
['<NAME>',
'<NAME>']
}
def main():
currentCanvass = ""
allCanvasses = []
divisionRE = re.compile("\f")
# read from stdin
for line in sys.stdin.readlines():
if divisionRE.match(line):
previousCanvass = OfficeCanvass(currentCanvass)
# pdb.set_trace()
if previousCanvass.title in candidate_lookup: # exclude lower offices
allCanvasses.append(previousCanvass)
# else:
# print "FAILED: %s" % previousCanvass
currentCanvass = ""
currentCanvass += line
# printCanvasses(allCanvasses)
writeCSV(allCanvasses)
def writeCSV(allCanvasses):
def listGet(inList, index, default):
try:
out = inList[index]
except IndexError:
out = default
return out
with open(outfile, 'wb') as csvfile:
w = unicodecsv.writer(csvfile, encoding='utf-8')
w.writerow(headers)
for canvass in allCanvasses:
for precinct, results in canvass.results.iteritems():
for index, result in enumerate(results):
normalisedOffice = office_lookup[canvass.office] # Normalise the office
candidate = canvass.candidates[index]
party = listGet(canvass.parties, index, "")
normalisedCandidate = normaliseName(candidate) # Normalise the candidate
normalisedPrecinct = precinct.replace("*", "")
row = [county, normalisedPrecinct, normalisedOffice, canvass.district,
party, normalisedCandidate, result]
print row
w.writerow(row)
def normaliseName(name):
name = name.title()
mistakes = {'Defazio': 'DeFazio',
'Mccorkle': 'McCorkle',
'<NAME>': '<NAME>'}
for mistake, correction in mistakes.iteritems():
if mistake in name:
name = name.replace(mistake, correction)
return name
def printCanvasses(allCanvasses):
pp = pprint.PrettyPrinter(indent=4)
for canvass in allCanvasses:
print canvass.parties
print canvass.office
print canvass.district
print canvass.candidates
pp.pprint(canvass.results)
print "====="
class OfficeCanvass(object):
def __init__(self, text):
self.lines = text.split('\r\n') # File uses Windows line endings
self.header = []
self.table = []
self.results = {}
self.district = ""
self.parties = []
self.candidates = []
self.endOfTableRE = re.compile("\*\*\*\*")
self.districtPrefixRE = re.compile(" (\d\d?)\w\w DISTRICT")
self.districtPostfixRE = re.compile(" DIST (\d\d?)")
self.precintRE = re.compile("\d\d? PRECINCTS")
self.partyRE = re.compile("\((\w\w\w)\)")
self.pageNumberRE = re.compile("Page Number\s+[\.\d]+")
self.parseTitle()
self.removeTurnoutColumns()
self.parseOfficeDistrict()
self.populateHeaderAndTable()
self.parseHeader()
self.populateCandidates()
self.parseResults()
def __repr__(self):
return "'"+self.title+"' -- '"+self.office+"'"
def removeTurnoutColumns(self):
for index, line in enumerate(self.lines):
if len(line) > 50:
self.lines[index] = line[:26]+line[43:]
def parseTitle(self):
self.title = self.lines[0].strip()
def parseOfficeDistrict(self):
self.office = self.title
m = self.districtPrefixRE.search(self.office) # try prefix
if not m:
m = self.districtPostfixRE.search(self.office) # if not, try postfix
if m:
self.district = m.group(1)
self.office = self.office.replace(m.group(0), "") # Remove district from office
def populateCandidates(self):
if self.title in candidate_lookup:
self.candidates = candidate_lookup[self.title]
def populateHeaderAndTable(self):
self.header = self.lines[2:26]
for line in self.lines[26:]:
if self.endOfTableRE.search(line):
break
if len(line.strip()):
self.table.append(line)
def parseHeader(self):
headerLines = self.header
# 1. Remove the page number
firstLine = headerLines[0]
m = self.pageNumberRE.search(firstLine)
if m:
firstLine = firstLine.replace(m.group(0), " "*len(m.group(0)))
headerLines[0] = firstLine
# 2. List candidate columns
cols = list(range(35, 80, 6)) # From col 35, every 6 columns contains a party
# 3. Make sure all strings are the same length
longestString = 100
for i, line in enumerate(headerLines):
headerLines[i] = headerLines[i].ljust(longestString)
# 4. Create 2D array of characters to easily zip()
bitmap = []
for line in headerLines:
bitmap.append(list(line))
# for aList in bitmap:
# print aList
# 5. Read vertical columns of characters as the names, removing unused spaces
# print('\n'.join('{}: {}'.format(*k) for k in enumerate(list(headerLines[0]))))
names = []
for col in cols:
name = "".join(zip(*bitmap)[col])
name = " ".join(name.split())
if name:
names.append(name)
self.parties = names
def parseResults(self):
for line in self.table:
columns = line.split()
candidateCount = len(self.candidates)
votes = columns[-candidateCount:]
del(columns[-candidateCount:])
precinct = " ".join(columns)
self.results[precinct] = votes
# Default function is main()
if __name__ == '__main__':
main() | en | 0.727332 | #!/usr/bin/python # -*- coding: utf-8 -*- # The MIT License (MIT) # Copyright (c) 2016 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # Configure variables # read from stdin # pdb.set_trace() # exclude lower offices # else: # print "FAILED: %s" % previousCanvass # printCanvasses(allCanvasses) # Normalise the office # Normalise the candidate # File uses Windows line endings # try prefix # if not, try postfix # Remove district from office # 1. Remove the page number # 2. List candidate columns # From col 35, every 6 columns contains a party # 3. Make sure all strings are the same length # 4. Create 2D array of characters to easily zip() # for aList in bitmap: # print aList # 5. Read vertical columns of characters as the names, removing unused spaces # print('\n'.join('{}: {}'.format(*k) for k in enumerate(list(headerLines[0])))) # Default function is main() | 1.965165 | 2 |
ghidra_bridge/ghidra_bridge.py | novafacing/ghidra_bridge | 0 | 6632912 | import sys
import weakref
import pydoc
from jfx_bridge import bridge
from .server.ghidra_bridge_port import DEFAULT_SERVER_PORT
from .server.ghidra_bridge_host import DEFAULT_SERVER_HOST
""" Use this list to exclude modules and names loaded by the remote ghidra_bridge side from being loaded into namespaces (they'll
still be present in the BridgedObject for the __main__ module. This prevents the ghidra_bridge imported by ghidra_bridge_server
being loaded over the local ghidra_bridge and causing issues. You probably only want this for stuff imported by the ghidra_bridge_server
script that might conflict on the local side (or which is totally unnecessary on the local side, like GhidraBridgeServer).
"""
EXCLUDED_REMOTE_IMPORTS = ["logging", "subprocess", "sys",
"ghidra_bridge", "bridge", "GhidraBridgeServer"]
GHIDRA_BRIDGE_NAMESPACE_TRACK = "__ghidra_bridge_namespace_track__"
def get_listing_panel(tool, ghidra):
""" Get the code listing UI element, so we can get up-to-date location/highlight/selection """
cvs = tool.getService(ghidra.app.services.CodeViewerService)
return cvs.getListingPanel()
class GhidraBridge():
def __init__(self, connect_to_host=bridge.DEFAULT_HOST, connect_to_port=DEFAULT_SERVER_PORT, loglevel=None, namespace=None, interactive_mode=None, response_timeout=bridge.DEFAULT_RESPONSE_TIMEOUT):
""" Set up a bridge. Default settings connect to the default ghidra bridge server,
If namespace is specified (e.g., locals() or globals()), automatically calls get_flat_api() with that namespace.
loglevel for what logging messages you want to capture
interactive_mode should auto-detect interactive environments (e.g., ipython or not in a script), but
you can force it to True or False if you need to. False is normal ghidra script behaviour
(currentAddress/getState() etc locked to the values when the script started. True is closer to the
behaviour in the Ghidra Jython shell - current*/getState() reflect the current values in the GUI
response_timeout is how long to wait for a response before throwing an exception, in seconds
"""
self.bridge = bridge.BridgeClient(
connect_to_host=connect_to_host, connect_to_port=connect_to_port, loglevel=loglevel, response_timeout=response_timeout)
if interactive_mode is None:
# from https://stackoverflow.com/questions/2356399/tell-if-python-is-in-interactive-mode, sys.ps1 only present in interactive interpreters
interactive_mode = bool(getattr(sys, 'ps1', sys.flags.interactive))
self.interactive_mode = interactive_mode
self.interactive_listener = None
self.flat_api_modules_list = []
self.namespace_list = []
self.namespace = None
if namespace is not None:
if connect_to_host is None or connect_to_port is None:
raise Exception(
"Can't get_flat_api for the namespace if connect_to_host/port are none - need a server!")
# track the namespace we loaded with - if we're part of an __enter__/__exit__ setup, we'll use it to automatically unload the flat api
self.namespace = namespace
self.get_flat_api(namespace=self.namespace)
def get_flat_api(self, namespace=None):
""" Get the flat API (as well as the GhidraScript API). If a namespace is provided (e.g., locals() or globals()), load the methods and
fields from the APIs into that namespace (call unload_flat_api() to remove). Otherwise, just return the bridged module.
Note that the ghidra and java packages are always loaded into the remote script's side, so get_flat_api with namespace will get the
ghidra api and java namespace for you for free.
"""
remote_main = self.bridge.remote_import("__main__")
if namespace is not None:
# we're going to need the all of __main__, so get it all in one hit
remote_main._bridged_get_all()
if self.interactive_mode:
# if we're in headless mode (indicated by no state attribute for pythonRun or no tool for ghidra headless), we can't actually do interactive mode - we don't have access to a PluginTool
if not hasattr(remote_main, 'state') or remote_main.state.getTool() is None:
self.interactive_mode = False
self.bridge.logger.warning(
"Disabling interactive mode - not supported when running against a headless Ghidra")
else:
# first, manually update all the current* values (this allows us to get the latest values, instead of what they were when the server started
tool = remote_main.state.getTool() # note: tool shouldn't change
listing_panel = get_listing_panel(tool, remote_main.ghidra)
locn = listing_panel.getProgramLocation()
# set the values as overrides in the bridged object - this prevents them from being changed in the remote object
remote_main._bridge_set_override(
"currentAddress", locn.getAddress())
remote_main._bridge_set_override(
"currentProgram", listing_panel.getProgram())
remote_main._bridge_set_override("currentLocation", locn)
remote_main._bridge_set_override(
"currentSelection", listing_panel.getProgramSelection())
remote_main._bridge_set_override(
"currentHighlight", listing_panel.getProgramHighlight())
# next, keep a reference to this module for updating these addresses
self.flat_api_modules_list.append(weakref.ref(remote_main))
# next, overwrite getState with the getState_fix
def getState_fix():
""" Used when in interactive mode - instead of calling the remote getState,
relies on the fact that the current* variables are being updated and creates
a GhidraState based on them.
This avoids resetting the GUI to the original values in the remote getState
"""
return remote_main.ghidra.app.script.GhidraState(tool, tool.getProject(), remote_main.currentProgram, remote_main.currentLocation, remote_main.currentSelection, remote_main.currentHighlight)
remote_main._bridge_set_override("getState", getState_fix)
# finally, install a listener for updates from the GUI events
if self.interactive_listener is None:
def update_vars(currentProgram=None, currentLocation=None, currentSelection=None, currentHighlight=None):
""" For all the namespaces and modules we've returned, update the current* variables that have changed
"""
# clear out any dead references
self.flat_api_modules_list = [
module for module in self.flat_api_modules_list if module() is not None]
update_list = [
module() for module in self.flat_api_modules_list]
for update in update_list:
# possible that a module might have been removed between the clear out and preparing the update list
if update is not None:
if currentProgram is not None:
update.currentProgram = currentProgram
if currentLocation is not None:
# match the order of updates in GhidraScript - location before address
update.currentLocation = currentLocation
update.currentAddress = currentLocation.getAddress()
if currentSelection is not None:
update.currentSelection = currentSelection if not currentSelection.isEmpty() else None
if currentHighlight is not None:
update.currentHighlight = currentHighlight if not currentHighlight.isEmpty() else None
# repeat the same for the namespace dictionaries, but also make sure we update the tracker so we know what to remove later
for update_dict in self.namespace_list:
if currentProgram is not None:
update_dict["currentProgram"] = currentProgram
update_dict[GHIDRA_BRIDGE_NAMESPACE_TRACK]["currentProgram"] = update_dict["currentProgram"]
if currentLocation is not None:
# match the order of updates in GhidraScript - location before address
update_dict["currentLocation"] = currentLocation
update_dict[GHIDRA_BRIDGE_NAMESPACE_TRACK]["currentLocation"] = update_dict["currentLocation"]
update_dict["currentAddress"] = currentLocation.getAddress(
)
update_dict[GHIDRA_BRIDGE_NAMESPACE_TRACK]["currentAddress"] = update_dict["currentAddress"]
if currentSelection is not None:
update_dict["currentSelection"] = currentSelection if not currentSelection.isEmpty(
) else None
update_dict[GHIDRA_BRIDGE_NAMESPACE_TRACK]["currentSelection"] = update_dict["currentSelection"]
if currentHighlight is not None:
update_dict["currentHighlight"] = currentHighlight if not currentHighlight.isEmpty(
) else None
update_dict[GHIDRA_BRIDGE_NAMESPACE_TRACK]["currentHighlight"] = update_dict["currentHighlight"]
# create the interactive listener to call our update_vars function (InteractiveListener defined in the GhidraBridgeServer class)
self.interactive_listener = remote_main.GhidraBridgeServer.InteractiveListener(
remote_main.state.getTool(), update_vars)
if namespace is not None:
# add a special var to the namespace to track what we add, so we can remove it easily later
namespace[GHIDRA_BRIDGE_NAMESPACE_TRACK] = dict()
# load in all the attrs from remote main, skipping the double underscores and avoiding overloading our own ghidra_bridge (and similar modules)
try:
for attr in set(remote_main._bridge_attrs + list(remote_main._bridge_overrides.keys())):
if not attr.startswith("__") and attr not in EXCLUDED_REMOTE_IMPORTS:
remote_attr = getattr(remote_main, attr)
namespace[attr] = remote_attr
# record what we added to the namespace
namespace[GHIDRA_BRIDGE_NAMESPACE_TRACK][attr] = remote_attr
# overload isinstance with bridged_isinstance, so checking bridged objects are of bridged types will just work
namespace["isinstance"] = bridge.bridged_isinstance
namespace[GHIDRA_BRIDGE_NAMESPACE_TRACK]["isinstance"] = bridge.bridged_isinstance
# overwrite help with our own function for using ghidra's help
def ghidra_help(param=None):
""" Used when in interactive mode - calls through the bridge to call ghidra's help and capture the output, then print it locally """
if param is not None and not bridge._is_bridged_object(param):
# asking for help on something that isn't bridged - just use the original help
# make sure we have the real help, just in case we've overridden it already
builtin_help = None
try:
from builtins import help as builtin_help # python3
except:
# try falling back to python2 syntax
from __builtin__ import help as builtin_help
builtin_help(param)
else:
# make a remote help call - either param is bridged, or no param (in which case, we'll get the default help for the GhidraScript API)
help_output = remote_main.GhidraBridgeServer.ghidra_help(
param)
pydoc.pager(help_output)
namespace["help"] = ghidra_help
namespace[GHIDRA_BRIDGE_NAMESPACE_TRACK]["help"] = ghidra_help
except Exception:
self.unload_flat_api(namespace)
raise
# if we're interactive, keep track of the namespace so we can update the current* values
if self.interactive_mode:
self.namespace_list.append(namespace)
return remote_main
def unload_flat_api(self, namespace=None):
""" If get_flat_api was called with a namespace and loaded methods/fields into it, unload_flat_api will remove them.
Note: if the values don't match what was loaded, we assume the caller has modified for their own reasons, and leave alone.
"""
if namespace is None:
if self.namespace is None:
raise Exception(
"Bridge wasn't initialized with a namespace - need to specify the namespace you want to unload from")
namespace = self.namespace
if self.interactive_mode and namespace in self.namespace_list:
self.namespace_list.remove(namespace)
if GHIDRA_BRIDGE_NAMESPACE_TRACK in namespace:
for key, value in namespace[GHIDRA_BRIDGE_NAMESPACE_TRACK].items():
if key in namespace:
# we use "is", not ==, because we're checking it's the same object, not just that it matches
if namespace[key] is value:
del namespace[key]
else:
raise Exception(GHIDRA_BRIDGE_NAMESPACE_TRACK +
" not present in namespace - get_flat_api() didn't load into this namespace")
def get_ghidra_api(self):
""" get the ghidra api - `ghidra = bridge.get_ghidra_api()` equivalent to doing `import ghidra` in your script.
Note that the module returned from get_flat_api() will also contain the ghidra module, so you may not need to call this.
"""
return self.bridge.remote_import("ghidra")
def get_java_api(self):
""" get the java namespace - `java = bridge.get_java_api()` equivalent to doing `import java` in your script.
Note that the module returned from get_flat_api() will also contain the java module, so you may not need to call this.
"""
return self.bridge.remote_import("java")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.namespace is not None:
self.unload_flat_api(self.namespace)
if self.interactive_listener is not None:
self.interactive_listener.stop_listening()
| import sys
import weakref
import pydoc
from jfx_bridge import bridge
from .server.ghidra_bridge_port import DEFAULT_SERVER_PORT
from .server.ghidra_bridge_host import DEFAULT_SERVER_HOST
""" Use this list to exclude modules and names loaded by the remote ghidra_bridge side from being loaded into namespaces (they'll
still be present in the BridgedObject for the __main__ module. This prevents the ghidra_bridge imported by ghidra_bridge_server
being loaded over the local ghidra_bridge and causing issues. You probably only want this for stuff imported by the ghidra_bridge_server
script that might conflict on the local side (or which is totally unnecessary on the local side, like GhidraBridgeServer).
"""
EXCLUDED_REMOTE_IMPORTS = ["logging", "subprocess", "sys",
"ghidra_bridge", "bridge", "GhidraBridgeServer"]
GHIDRA_BRIDGE_NAMESPACE_TRACK = "__ghidra_bridge_namespace_track__"
def get_listing_panel(tool, ghidra):
""" Get the code listing UI element, so we can get up-to-date location/highlight/selection """
cvs = tool.getService(ghidra.app.services.CodeViewerService)
return cvs.getListingPanel()
class GhidraBridge():
def __init__(self, connect_to_host=bridge.DEFAULT_HOST, connect_to_port=DEFAULT_SERVER_PORT, loglevel=None, namespace=None, interactive_mode=None, response_timeout=bridge.DEFAULT_RESPONSE_TIMEOUT):
""" Set up a bridge. Default settings connect to the default ghidra bridge server,
If namespace is specified (e.g., locals() or globals()), automatically calls get_flat_api() with that namespace.
loglevel for what logging messages you want to capture
interactive_mode should auto-detect interactive environments (e.g., ipython or not in a script), but
you can force it to True or False if you need to. False is normal ghidra script behaviour
(currentAddress/getState() etc locked to the values when the script started. True is closer to the
behaviour in the Ghidra Jython shell - current*/getState() reflect the current values in the GUI
response_timeout is how long to wait for a response before throwing an exception, in seconds
"""
self.bridge = bridge.BridgeClient(
connect_to_host=connect_to_host, connect_to_port=connect_to_port, loglevel=loglevel, response_timeout=response_timeout)
if interactive_mode is None:
# from https://stackoverflow.com/questions/2356399/tell-if-python-is-in-interactive-mode, sys.ps1 only present in interactive interpreters
interactive_mode = bool(getattr(sys, 'ps1', sys.flags.interactive))
self.interactive_mode = interactive_mode
self.interactive_listener = None
self.flat_api_modules_list = []
self.namespace_list = []
self.namespace = None
if namespace is not None:
if connect_to_host is None or connect_to_port is None:
raise Exception(
"Can't get_flat_api for the namespace if connect_to_host/port are none - need a server!")
# track the namespace we loaded with - if we're part of an __enter__/__exit__ setup, we'll use it to automatically unload the flat api
self.namespace = namespace
self.get_flat_api(namespace=self.namespace)
def get_flat_api(self, namespace=None):
""" Get the flat API (as well as the GhidraScript API). If a namespace is provided (e.g., locals() or globals()), load the methods and
fields from the APIs into that namespace (call unload_flat_api() to remove). Otherwise, just return the bridged module.
Note that the ghidra and java packages are always loaded into the remote script's side, so get_flat_api with namespace will get the
ghidra api and java namespace for you for free.
"""
remote_main = self.bridge.remote_import("__main__")
if namespace is not None:
# we're going to need the all of __main__, so get it all in one hit
remote_main._bridged_get_all()
if self.interactive_mode:
# if we're in headless mode (indicated by no state attribute for pythonRun or no tool for ghidra headless), we can't actually do interactive mode - we don't have access to a PluginTool
if not hasattr(remote_main, 'state') or remote_main.state.getTool() is None:
self.interactive_mode = False
self.bridge.logger.warning(
"Disabling interactive mode - not supported when running against a headless Ghidra")
else:
# first, manually update all the current* values (this allows us to get the latest values, instead of what they were when the server started
tool = remote_main.state.getTool() # note: tool shouldn't change
listing_panel = get_listing_panel(tool, remote_main.ghidra)
locn = listing_panel.getProgramLocation()
# set the values as overrides in the bridged object - this prevents them from being changed in the remote object
remote_main._bridge_set_override(
"currentAddress", locn.getAddress())
remote_main._bridge_set_override(
"currentProgram", listing_panel.getProgram())
remote_main._bridge_set_override("currentLocation", locn)
remote_main._bridge_set_override(
"currentSelection", listing_panel.getProgramSelection())
remote_main._bridge_set_override(
"currentHighlight", listing_panel.getProgramHighlight())
# next, keep a reference to this module for updating these addresses
self.flat_api_modules_list.append(weakref.ref(remote_main))
# next, overwrite getState with the getState_fix
def getState_fix():
""" Used when in interactive mode - instead of calling the remote getState,
relies on the fact that the current* variables are being updated and creates
a GhidraState based on them.
This avoids resetting the GUI to the original values in the remote getState
"""
return remote_main.ghidra.app.script.GhidraState(tool, tool.getProject(), remote_main.currentProgram, remote_main.currentLocation, remote_main.currentSelection, remote_main.currentHighlight)
remote_main._bridge_set_override("getState", getState_fix)
# finally, install a listener for updates from the GUI events
if self.interactive_listener is None:
def update_vars(currentProgram=None, currentLocation=None, currentSelection=None, currentHighlight=None):
""" For all the namespaces and modules we've returned, update the current* variables that have changed
"""
# clear out any dead references
self.flat_api_modules_list = [
module for module in self.flat_api_modules_list if module() is not None]
update_list = [
module() for module in self.flat_api_modules_list]
for update in update_list:
# possible that a module might have been removed between the clear out and preparing the update list
if update is not None:
if currentProgram is not None:
update.currentProgram = currentProgram
if currentLocation is not None:
# match the order of updates in GhidraScript - location before address
update.currentLocation = currentLocation
update.currentAddress = currentLocation.getAddress()
if currentSelection is not None:
update.currentSelection = currentSelection if not currentSelection.isEmpty() else None
if currentHighlight is not None:
update.currentHighlight = currentHighlight if not currentHighlight.isEmpty() else None
# repeat the same for the namespace dictionaries, but also make sure we update the tracker so we know what to remove later
for update_dict in self.namespace_list:
if currentProgram is not None:
update_dict["currentProgram"] = currentProgram
update_dict[GHIDRA_BRIDGE_NAMESPACE_TRACK]["currentProgram"] = update_dict["currentProgram"]
if currentLocation is not None:
# match the order of updates in GhidraScript - location before address
update_dict["currentLocation"] = currentLocation
update_dict[GHIDRA_BRIDGE_NAMESPACE_TRACK]["currentLocation"] = update_dict["currentLocation"]
update_dict["currentAddress"] = currentLocation.getAddress(
)
update_dict[GHIDRA_BRIDGE_NAMESPACE_TRACK]["currentAddress"] = update_dict["currentAddress"]
if currentSelection is not None:
update_dict["currentSelection"] = currentSelection if not currentSelection.isEmpty(
) else None
update_dict[GHIDRA_BRIDGE_NAMESPACE_TRACK]["currentSelection"] = update_dict["currentSelection"]
if currentHighlight is not None:
update_dict["currentHighlight"] = currentHighlight if not currentHighlight.isEmpty(
) else None
update_dict[GHIDRA_BRIDGE_NAMESPACE_TRACK]["currentHighlight"] = update_dict["currentHighlight"]
# create the interactive listener to call our update_vars function (InteractiveListener defined in the GhidraBridgeServer class)
self.interactive_listener = remote_main.GhidraBridgeServer.InteractiveListener(
remote_main.state.getTool(), update_vars)
if namespace is not None:
# add a special var to the namespace to track what we add, so we can remove it easily later
namespace[GHIDRA_BRIDGE_NAMESPACE_TRACK] = dict()
# load in all the attrs from remote main, skipping the double underscores and avoiding overloading our own ghidra_bridge (and similar modules)
try:
for attr in set(remote_main._bridge_attrs + list(remote_main._bridge_overrides.keys())):
if not attr.startswith("__") and attr not in EXCLUDED_REMOTE_IMPORTS:
remote_attr = getattr(remote_main, attr)
namespace[attr] = remote_attr
# record what we added to the namespace
namespace[GHIDRA_BRIDGE_NAMESPACE_TRACK][attr] = remote_attr
# overload isinstance with bridged_isinstance, so checking bridged objects are of bridged types will just work
namespace["isinstance"] = bridge.bridged_isinstance
namespace[GHIDRA_BRIDGE_NAMESPACE_TRACK]["isinstance"] = bridge.bridged_isinstance
# overwrite help with our own function for using ghidra's help
def ghidra_help(param=None):
""" Used when in interactive mode - calls through the bridge to call ghidra's help and capture the output, then print it locally """
if param is not None and not bridge._is_bridged_object(param):
# asking for help on something that isn't bridged - just use the original help
# make sure we have the real help, just in case we've overridden it already
builtin_help = None
try:
from builtins import help as builtin_help # python3
except:
# try falling back to python2 syntax
from __builtin__ import help as builtin_help
builtin_help(param)
else:
# make a remote help call - either param is bridged, or no param (in which case, we'll get the default help for the GhidraScript API)
help_output = remote_main.GhidraBridgeServer.ghidra_help(
param)
pydoc.pager(help_output)
namespace["help"] = ghidra_help
namespace[GHIDRA_BRIDGE_NAMESPACE_TRACK]["help"] = ghidra_help
except Exception:
self.unload_flat_api(namespace)
raise
# if we're interactive, keep track of the namespace so we can update the current* values
if self.interactive_mode:
self.namespace_list.append(namespace)
return remote_main
def unload_flat_api(self, namespace=None):
""" If get_flat_api was called with a namespace and loaded methods/fields into it, unload_flat_api will remove them.
Note: if the values don't match what was loaded, we assume the caller has modified for their own reasons, and leave alone.
"""
if namespace is None:
if self.namespace is None:
raise Exception(
"Bridge wasn't initialized with a namespace - need to specify the namespace you want to unload from")
namespace = self.namespace
if self.interactive_mode and namespace in self.namespace_list:
self.namespace_list.remove(namespace)
if GHIDRA_BRIDGE_NAMESPACE_TRACK in namespace:
for key, value in namespace[GHIDRA_BRIDGE_NAMESPACE_TRACK].items():
if key in namespace:
# we use "is", not ==, because we're checking it's the same object, not just that it matches
if namespace[key] is value:
del namespace[key]
else:
raise Exception(GHIDRA_BRIDGE_NAMESPACE_TRACK +
" not present in namespace - get_flat_api() didn't load into this namespace")
def get_ghidra_api(self):
""" get the ghidra api - `ghidra = bridge.get_ghidra_api()` equivalent to doing `import ghidra` in your script.
Note that the module returned from get_flat_api() will also contain the ghidra module, so you may not need to call this.
"""
return self.bridge.remote_import("ghidra")
def get_java_api(self):
""" get the java namespace - `java = bridge.get_java_api()` equivalent to doing `import java` in your script.
Note that the module returned from get_flat_api() will also contain the java module, so you may not need to call this.
"""
return self.bridge.remote_import("java")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.namespace is not None:
self.unload_flat_api(self.namespace)
if self.interactive_listener is not None:
self.interactive_listener.stop_listening()
| en | 0.818189 | Use this list to exclude modules and names loaded by the remote ghidra_bridge side from being loaded into namespaces (they'll still be present in the BridgedObject for the __main__ module. This prevents the ghidra_bridge imported by ghidra_bridge_server being loaded over the local ghidra_bridge and causing issues. You probably only want this for stuff imported by the ghidra_bridge_server script that might conflict on the local side (or which is totally unnecessary on the local side, like GhidraBridgeServer). Get the code listing UI element, so we can get up-to-date location/highlight/selection Set up a bridge. Default settings connect to the default ghidra bridge server, If namespace is specified (e.g., locals() or globals()), automatically calls get_flat_api() with that namespace. loglevel for what logging messages you want to capture interactive_mode should auto-detect interactive environments (e.g., ipython or not in a script), but you can force it to True or False if you need to. False is normal ghidra script behaviour (currentAddress/getState() etc locked to the values when the script started. True is closer to the behaviour in the Ghidra Jython shell - current*/getState() reflect the current values in the GUI response_timeout is how long to wait for a response before throwing an exception, in seconds # from https://stackoverflow.com/questions/2356399/tell-if-python-is-in-interactive-mode, sys.ps1 only present in interactive interpreters # track the namespace we loaded with - if we're part of an __enter__/__exit__ setup, we'll use it to automatically unload the flat api Get the flat API (as well as the GhidraScript API). If a namespace is provided (e.g., locals() or globals()), load the methods and fields from the APIs into that namespace (call unload_flat_api() to remove). Otherwise, just return the bridged module. Note that the ghidra and java packages are always loaded into the remote script's side, so get_flat_api with namespace will get the ghidra api and java namespace for you for free. # we're going to need the all of __main__, so get it all in one hit # if we're in headless mode (indicated by no state attribute for pythonRun or no tool for ghidra headless), we can't actually do interactive mode - we don't have access to a PluginTool # first, manually update all the current* values (this allows us to get the latest values, instead of what they were when the server started # note: tool shouldn't change # set the values as overrides in the bridged object - this prevents them from being changed in the remote object # next, keep a reference to this module for updating these addresses # next, overwrite getState with the getState_fix Used when in interactive mode - instead of calling the remote getState, relies on the fact that the current* variables are being updated and creates a GhidraState based on them. This avoids resetting the GUI to the original values in the remote getState # finally, install a listener for updates from the GUI events For all the namespaces and modules we've returned, update the current* variables that have changed # clear out any dead references # possible that a module might have been removed between the clear out and preparing the update list # match the order of updates in GhidraScript - location before address # repeat the same for the namespace dictionaries, but also make sure we update the tracker so we know what to remove later # match the order of updates in GhidraScript - location before address # create the interactive listener to call our update_vars function (InteractiveListener defined in the GhidraBridgeServer class) # add a special var to the namespace to track what we add, so we can remove it easily later # load in all the attrs from remote main, skipping the double underscores and avoiding overloading our own ghidra_bridge (and similar modules) # record what we added to the namespace # overload isinstance with bridged_isinstance, so checking bridged objects are of bridged types will just work # overwrite help with our own function for using ghidra's help Used when in interactive mode - calls through the bridge to call ghidra's help and capture the output, then print it locally # asking for help on something that isn't bridged - just use the original help # make sure we have the real help, just in case we've overridden it already # python3 # try falling back to python2 syntax # make a remote help call - either param is bridged, or no param (in which case, we'll get the default help for the GhidraScript API) # if we're interactive, keep track of the namespace so we can update the current* values If get_flat_api was called with a namespace and loaded methods/fields into it, unload_flat_api will remove them. Note: if the values don't match what was loaded, we assume the caller has modified for their own reasons, and leave alone. # we use "is", not ==, because we're checking it's the same object, not just that it matches get the ghidra api - `ghidra = bridge.get_ghidra_api()` equivalent to doing `import ghidra` in your script. Note that the module returned from get_flat_api() will also contain the ghidra module, so you may not need to call this. get the java namespace - `java = bridge.get_java_api()` equivalent to doing `import java` in your script. Note that the module returned from get_flat_api() will also contain the java module, so you may not need to call this. | 1.915601 | 2 |
map_config/migrate_graph.py | piyushk/utexas_guidance | 0 | 6632913 | <reponame>piyushk/utexas_guidance
import yaml
with open("graph_copy.yaml", 'r') as stream:
contents = yaml.load(stream)
for vtx in contents:
vtx["id"] += 49
for i in range(len(vtx["edges"])):
vtx["edges"][i] += 49
vtx["z"] = 15.0
with open("graph_copy2.yaml", 'w') as stream:
yaml.dump(contents, stream, default_flow_style=False)
| import yaml
with open("graph_copy.yaml", 'r') as stream:
contents = yaml.load(stream)
for vtx in contents:
vtx["id"] += 49
for i in range(len(vtx["edges"])):
vtx["edges"][i] += 49
vtx["z"] = 15.0
with open("graph_copy2.yaml", 'w') as stream:
yaml.dump(contents, stream, default_flow_style=False) | none | 1 | 2.344363 | 2 |
|
tests/app/db.py | tlwr/notifications-api | 0 | 6632914 | <reponame>tlwr/notifications-api
import random
import uuid
from datetime import datetime, date, timedelta
from app import db
from app.dao.email_branding_dao import dao_create_email_branding
from app.dao.inbound_sms_dao import dao_create_inbound_sms
from app.dao.invited_org_user_dao import save_invited_org_user
from app.dao.invited_user_dao import save_invited_user
from app.dao.jobs_dao import dao_create_job
from app.dao.notifications_dao import (
dao_create_notification,
)
from app.dao.organisation_dao import dao_create_organisation, dao_add_service_to_organisation
from app.dao.permissions_dao import permission_dao
from app.dao.service_callback_api_dao import save_service_callback_api
from app.dao.service_data_retention_dao import insert_service_data_retention
from app.dao.service_inbound_api_dao import save_service_inbound_api
from app.dao.service_permissions_dao import dao_add_service_permission
from app.dao.service_sms_sender_dao import update_existing_sms_sender_with_inbound_number, dao_update_service_sms_sender
from app.dao.services_dao import dao_create_service, dao_add_user_to_service
from app.dao.templates_dao import dao_create_template, dao_update_template
from app.dao.users_dao import save_model_user
from app.models import (
ApiKey,
DailySortedLetter,
InboundSms,
InboundNumber,
Job,
Notification,
EmailBranding,
LetterRate,
Organisation,
Permission,
Rate,
Service,
ServiceEmailReplyTo,
ServiceInboundApi,
ServiceCallbackApi,
ServiceLetterContact,
ServicePermission,
ServiceSmsSender,
ServiceGuestList,
Template,
User,
EMAIL_TYPE,
MOBILE_TYPE,
SMS_TYPE,
LETTER_TYPE,
KEY_TYPE_NORMAL,
AnnualBilling,
InvitedOrganisationUser,
FactBilling,
FactNotificationStatus,
Complaint,
InvitedUser,
TemplateFolder,
LetterBranding,
Domain,
NotificationHistory,
ReturnedLetter,
ServiceContactList,
BroadcastMessage,
BroadcastStatusType,
BroadcastEvent,
BroadcastProvider,
BroadcastProviderMessage,
BroadcastProviderMessageNumber
)
def create_user(
*,
mobile_number="+447700900986",
email="<EMAIL>",
state='active',
id_=None,
name="Test User"
):
data = {
'id': id_ or uuid.uuid4(),
'name': name,
'email_address': email,
'password': 'password',
'mobile_number': mobile_number,
'state': state
}
user = User.query.filter_by(email_address=email).first()
if not user:
user = User(**data)
save_model_user(user, validated_email_access=True)
return user
def create_permissions(user, service, *permissions):
permissions = [
Permission(service_id=service.id, user_id=user.id, permission=p)
for p in permissions
]
permission_dao.set_user_service_permission(user, service, permissions, _commit=True)
def create_service(
user=None,
service_name="Sample service",
service_id=None,
restricted=False,
count_as_live=True,
service_permissions=None,
research_mode=False,
active=True,
email_from=None,
prefix_sms=True,
message_limit=1000,
organisation_type='central',
check_if_service_exists=False,
go_live_user=None,
go_live_at=None,
crown=True,
organisation=None
):
if check_if_service_exists:
service = Service.query.filter_by(name=service_name).first()
if (not check_if_service_exists) or (check_if_service_exists and not service):
service = Service(
name=service_name,
message_limit=message_limit,
restricted=restricted,
email_from=email_from if email_from else service_name.lower().replace(' ', '.'),
created_by=user if user else create_user(email='{}<EMAIL>'.format(uuid.uuid4())),
prefix_sms=prefix_sms,
organisation_type=organisation_type,
organisation=organisation,
go_live_user=go_live_user,
go_live_at=go_live_at,
crown=crown
)
dao_create_service(
service,
service.created_by,
service_id,
service_permissions=service_permissions,
)
service.active = active
service.research_mode = research_mode
service.count_as_live = count_as_live
else:
if user and user not in service.users:
dao_add_user_to_service(service, user)
return service
def create_service_with_inbound_number(
inbound_number='1234567',
*args, **kwargs
):
service = create_service(*args, **kwargs)
sms_sender = ServiceSmsSender.query.filter_by(service_id=service.id).first()
inbound = create_inbound_number(number=inbound_number, service_id=service.id)
update_existing_sms_sender_with_inbound_number(service_sms_sender=sms_sender,
sms_sender=inbound_number,
inbound_number_id=inbound.id)
return service
def create_service_with_defined_sms_sender(
sms_sender_value='1234567',
*args, **kwargs
):
service = create_service(*args, **kwargs)
sms_sender = ServiceSmsSender.query.filter_by(service_id=service.id).first()
dao_update_service_sms_sender(service_id=service.id,
service_sms_sender_id=sms_sender.id,
is_default=True,
sms_sender=sms_sender_value)
return service
def create_template(
service,
template_type=SMS_TYPE,
template_name=None,
subject='Template subject',
content='Dear Sir/Madam, Hello. Yours Truly, The Government.',
reply_to=None,
hidden=False,
archived=False,
folder=None,
postage=None,
process_type='normal',
contact_block_id=None
):
data = {
'name': template_name or '{} Template Name'.format(template_type),
'template_type': template_type,
'content': content,
'service': service,
'created_by': service.created_by,
'reply_to': reply_to,
'hidden': hidden,
'folder': folder,
'process_type': process_type,
}
if template_type == LETTER_TYPE:
data["postage"] = postage or "second"
if contact_block_id:
data['service_letter_contact_id'] = contact_block_id
if template_type != SMS_TYPE:
data['subject'] = subject
template = Template(**data)
dao_create_template(template)
if archived:
template.archived = archived
dao_update_template(template)
return template
def create_notification(
template=None,
job=None,
job_row_number=None,
to_field=None,
status='created',
reference=None,
created_at=None,
sent_at=None,
updated_at=None,
billable_units=1,
personalisation=None,
api_key=None,
key_type=KEY_TYPE_NORMAL,
sent_by=None,
client_reference=None,
rate_multiplier=None,
international=False,
phone_prefix=None,
scheduled_for=None,
normalised_to=None,
one_off=False,
reply_to_text=None,
created_by_id=None,
postage=None,
document_download_count=None,
):
assert job or template
if job:
template = job.template
if created_at is None:
created_at = datetime.utcnow()
if to_field is None:
to_field = '+447700900855' if template.template_type == SMS_TYPE else 'test@<EMAIL>'
if status not in ('created', 'validation-failed', 'virus-scan-failed', 'pending-virus-check'):
sent_at = sent_at or datetime.utcnow()
updated_at = updated_at or datetime.utcnow()
if not one_off and (job is None and api_key is None):
# we did not specify in test - lets create it
api_key = ApiKey.query.filter(ApiKey.service == template.service, ApiKey.key_type == key_type).first()
if not api_key:
api_key = create_api_key(template.service, key_type=key_type)
if template.template_type == 'letter' and postage is None:
postage = 'second'
data = {
'id': uuid.uuid4(),
'to': to_field,
'job_id': job and job.id,
'job': job,
'service_id': template.service.id,
'service': template.service,
'template_id': template.id,
'template_version': template.version,
'status': status,
'reference': reference,
'created_at': created_at,
'sent_at': sent_at,
'billable_units': billable_units,
'personalisation': personalisation,
'notification_type': template.template_type,
'api_key': api_key,
'api_key_id': api_key and api_key.id,
'key_type': api_key.key_type if api_key else key_type,
'sent_by': sent_by,
'updated_at': updated_at,
'client_reference': client_reference,
'job_row_number': job_row_number,
'rate_multiplier': rate_multiplier,
'international': international,
'phone_prefix': phone_prefix,
'normalised_to': normalised_to,
'reply_to_text': reply_to_text,
'created_by_id': created_by_id,
'postage': postage,
'document_download_count': document_download_count,
}
notification = Notification(**data)
dao_create_notification(notification)
return notification
def create_notification_history(
template=None,
job=None,
job_row_number=None,
status='created',
reference=None,
created_at=None,
sent_at=None,
updated_at=None,
billable_units=1,
api_key=None,
key_type=KEY_TYPE_NORMAL,
sent_by=None,
client_reference=None,
rate_multiplier=None,
international=False,
phone_prefix=None,
created_by_id=None,
postage=None,
id=None
):
assert job or template
if job:
template = job.template
if created_at is None:
created_at = datetime.utcnow()
if status != 'created':
sent_at = sent_at or datetime.utcnow()
updated_at = updated_at or datetime.utcnow()
if template.template_type == 'letter' and postage is None:
postage = 'second'
data = {
'id': id or uuid.uuid4(),
'job_id': job and job.id,
'job': job,
'service_id': template.service.id,
'service': template.service,
'template_id': template.id,
'template_version': template.version,
'status': status,
'reference': reference,
'created_at': created_at,
'sent_at': sent_at,
'billable_units': billable_units,
'notification_type': template.template_type,
'api_key': api_key,
'api_key_id': api_key and api_key.id,
'key_type': api_key.key_type if api_key else key_type,
'sent_by': sent_by,
'updated_at': updated_at,
'client_reference': client_reference,
'job_row_number': job_row_number,
'rate_multiplier': rate_multiplier,
'international': international,
'phone_prefix': phone_prefix,
'created_by_id': created_by_id,
'postage': postage
}
notification_history = NotificationHistory(**data)
db.session.add(notification_history)
db.session.commit()
return notification_history
def create_job(
template,
notification_count=1,
created_at=None,
job_status='pending',
scheduled_for=None,
processing_started=None,
processing_finished=None,
original_file_name='some.csv',
archived=False,
contact_list_id=None,
):
data = {
'id': uuid.uuid4(),
'service_id': template.service_id,
'service': template.service,
'template_id': template.id,
'template_version': template.version,
'original_file_name': original_file_name,
'notification_count': notification_count,
'created_at': created_at or datetime.utcnow(),
'created_by': template.created_by,
'job_status': job_status,
'scheduled_for': scheduled_for,
'processing_started': processing_started,
'processing_finished': processing_finished,
'archived': archived,
'contact_list_id': contact_list_id,
}
job = Job(**data)
dao_create_job(job)
return job
def create_service_permission(service_id, permission=EMAIL_TYPE):
dao_add_service_permission(
service_id if service_id else create_service().id, permission)
service_permissions = ServicePermission.query.all()
return service_permissions
def create_inbound_sms(
service,
notify_number=None,
user_number='447700900111',
provider_date=None,
provider_reference=None,
content='Hello',
provider="mmg",
created_at=None
):
if not service.inbound_number:
create_inbound_number(
# create random inbound number
notify_number or '07{:09}'.format(random.randint(0, 1e9 - 1)),
provider=provider,
service_id=service.id
)
inbound = InboundSms(
service=service,
created_at=created_at or datetime.utcnow(),
notify_number=service.get_inbound_number(),
user_number=user_number,
provider_date=provider_date or datetime.utcnow(),
provider_reference=provider_reference or 'foo',
content=content,
provider=provider
)
dao_create_inbound_sms(inbound)
return inbound
def create_service_inbound_api(
service,
url="https://something.com",
bearer_token="<PASSWORD>",
):
service_inbound_api = ServiceInboundApi(service_id=service.id,
url=url,
bearer_token=bearer_token,
updated_by_id=service.users[0].id
)
save_service_inbound_api(service_inbound_api)
return service_inbound_api
def create_service_callback_api(
service,
url="https://something.com",
bearer_token="<PASSWORD>",
callback_type="delivery_status"
):
service_callback_api = ServiceCallbackApi(service_id=service.id,
url=url,
bearer_token=bearer_token,
updated_by_id=service.users[0].id,
callback_type=callback_type
)
save_service_callback_api(service_callback_api)
return service_callback_api
def create_email_branding(colour='blue', logo='test_x2.png', name='test_org_1', text='DisplayName'):
data = {
'colour': colour,
'logo': logo,
'name': name,
'text': text,
}
email_branding = EmailBranding(**data)
dao_create_email_branding(email_branding)
return email_branding
def create_rate(start_date, value, notification_type):
rate = Rate(
id=uuid.uuid4(),
valid_from=start_date,
rate=value,
notification_type=notification_type
)
db.session.add(rate)
db.session.commit()
return rate
def create_letter_rate(start_date=None, end_date=None, crown=True, sheet_count=1, rate=0.33, post_class='second'):
if start_date is None:
start_date = datetime(2016, 1, 1)
rate = LetterRate(
id=uuid.uuid4(),
start_date=start_date,
end_date=end_date,
crown=crown,
sheet_count=sheet_count,
rate=rate,
post_class=post_class
)
db.session.add(rate)
db.session.commit()
return rate
def create_api_key(service, key_type=KEY_TYPE_NORMAL, key_name=None):
id_ = uuid.uuid4()
name = key_name if key_name else '{} api key {}'.format(key_type, id_)
api_key = ApiKey(
service=service,
name=name,
created_by=service.created_by,
key_type=key_type,
id=id_,
secret=uuid.uuid4()
)
db.session.add(api_key)
db.session.commit()
return api_key
def create_inbound_number(number, provider='mmg', active=True, service_id=None):
inbound_number = InboundNumber(
id=uuid.uuid4(),
number=number,
provider=provider,
active=active,
service_id=service_id
)
db.session.add(inbound_number)
db.session.commit()
return inbound_number
def create_reply_to_email(
service,
email_address,
is_default=True,
archived=False
):
data = {
'service': service,
'email_address': email_address,
'is_default': is_default,
'archived': archived,
}
reply_to = ServiceEmailReplyTo(**data)
db.session.add(reply_to)
db.session.commit()
return reply_to
def create_service_sms_sender(
service,
sms_sender,
is_default=True,
inbound_number_id=None,
archived=False
):
data = {
'service_id': service.id,
'sms_sender': sms_sender,
'is_default': is_default,
'inbound_number_id': inbound_number_id,
'archived': archived,
}
service_sms_sender = ServiceSmsSender(**data)
db.session.add(service_sms_sender)
db.session.commit()
return service_sms_sender
def create_letter_contact(
service,
contact_block,
is_default=True,
archived=False
):
data = {
'service': service,
'contact_block': contact_block,
'is_default': is_default,
'archived': archived,
}
letter_content = ServiceLetterContact(**data)
db.session.add(letter_content)
db.session.commit()
return letter_content
def create_annual_billing(
service_id, free_sms_fragment_limit, financial_year_start
):
annual_billing = AnnualBilling(
service_id=service_id,
free_sms_fragment_limit=free_sms_fragment_limit,
financial_year_start=financial_year_start
)
db.session.add(annual_billing)
db.session.commit()
return annual_billing
def create_domain(domain, organisation_id):
domain = Domain(domain=domain, organisation_id=organisation_id)
db.session.add(domain)
db.session.commit()
return domain
def create_organisation(name='test_org_1', active=True, organisation_type=None, domains=None, organisation_id=None):
data = {
'id': organisation_id,
'name': name,
'active': active,
'organisation_type': organisation_type,
}
organisation = Organisation(**data)
dao_create_organisation(organisation)
for domain in domains or []:
create_domain(domain, organisation.id)
return organisation
def create_invited_org_user(organisation, invited_by, email_address='<EMAIL>'):
invited_org_user = InvitedOrganisationUser(
email_address=email_address,
invited_by=invited_by,
organisation=organisation,
)
save_invited_org_user(invited_org_user)
return invited_org_user
def create_daily_sorted_letter(billing_day=None,
file_name="Notify-20180118123.rs.txt",
unsorted_count=0,
sorted_count=0):
daily_sorted_letter = DailySortedLetter(
billing_day=billing_day or date(2018, 1, 18),
file_name=file_name,
unsorted_count=unsorted_count,
sorted_count=sorted_count
)
db.session.add(daily_sorted_letter)
db.session.commit()
return daily_sorted_letter
def create_ft_billing(bst_date,
template,
*,
provider='test',
rate_multiplier=1,
international=False,
rate=0,
billable_unit=1,
notifications_sent=1,
postage='none'
):
data = FactBilling(bst_date=bst_date,
service_id=template.service_id,
template_id=template.id,
notification_type=template.template_type,
provider=provider,
rate_multiplier=rate_multiplier,
international=international,
rate=rate,
billable_units=billable_unit,
notifications_sent=notifications_sent,
postage=postage)
db.session.add(data)
db.session.commit()
return data
def create_ft_notification_status(
bst_date,
notification_type='sms',
service=None,
template=None,
job=None,
key_type='normal',
notification_status='delivered',
count=1
):
if job:
template = job.template
if template:
service = template.service
notification_type = template.template_type
else:
if not service:
service = create_service()
template = create_template(service=service, template_type=notification_type)
data = FactNotificationStatus(
bst_date=bst_date,
template_id=template.id,
service_id=service.id,
job_id=job.id if job else uuid.UUID(int=0),
notification_type=notification_type,
key_type=key_type,
notification_status=notification_status,
notification_count=count
)
db.session.add(data)
db.session.commit()
return data
def create_service_guest_list(service, email_address=None, mobile_number=None):
if email_address:
guest_list_user = ServiceGuestList.from_string(service.id, EMAIL_TYPE, email_address)
elif mobile_number:
guest_list_user = ServiceGuestList.from_string(service.id, MOBILE_TYPE, mobile_number)
else:
guest_list_user = ServiceGuestList.from_string(service.id, EMAIL_TYPE, '<EMAIL>')
db.session.add(guest_list_user)
db.session.commit()
return guest_list_user
def create_complaint(service=None,
notification=None,
created_at=None):
if not service:
service = create_service()
if not notification:
template = create_template(service=service, template_type='email')
notification = create_notification(template=template)
complaint = Complaint(notification_id=notification.id,
service_id=service.id,
ses_feedback_id=str(uuid.uuid4()),
complaint_type='abuse',
complaint_date=datetime.utcnow(),
created_at=created_at if created_at else datetime.now()
)
db.session.add(complaint)
db.session.commit()
return complaint
def ses_complaint_callback_malformed_message_id():
return {
'Signature': 'bb',
'SignatureVersion': '1', 'MessageAttributes': {}, 'MessageId': '98c6e927-af5d-5f3b-9522-bab736f2cbde',
'UnsubscribeUrl': 'https://sns.eu-west-1.amazonaws.com',
'TopicArn': 'arn:ses_notifications', 'Type': 'Notification',
'Timestamp': '2018-06-05T14:00:15.952Z', 'Subject': None,
'Message': '{"notificationType":"Complaint","complaint":{"complainedRecipients":[{"emailAddress":"<EMAIL>"}],"timestamp":"2018-06-05T13:59:58.000Z","feedbackId":"ses_feedback_id"},"mail":{"timestamp":"2018-06-05T14:00:15.950Z","source":"\\"Some Service\\" <someservicenotifications.service.gov.uk>","sourceArn":"arn:identity/notifications.service.gov.uk","sourceIp":"192.168.3.11","sendingAccountId":"888450439860","badMessageId":"ref1","destination":["<EMAIL>"]}}', # noqa
'SigningCertUrl': 'https://sns.pem'
}
def ses_complaint_callback_with_missing_complaint_type():
"""
https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notification-contents.html#complaint-object
"""
return {
'Signature': 'bb',
'SignatureVersion': '1', 'MessageAttributes': {}, 'MessageId': '98c6e927-af5d-5f3b-9522-bab736f2cbde',
'UnsubscribeUrl': 'https://sns.eu-west-1.amazonaws.com',
'TopicArn': 'arn:ses_notifications', 'Type': 'Notification',
'Timestamp': '2018-06-05T14:00:15.952Z', 'Subject': None,
'Message': '{"notificationType":"Complaint","complaint":{"complainedRecipients":[{"emailAddress":"<EMAIL>"}],"timestamp":"2018-06-05T13:59:58.000Z","feedbackId":"ses_feedback_id"},"mail":{"timestamp":"2018-06-05T14:00:15.950Z","source":"\\"Some Service\\" <someservicenotifications.service.gov.uk>","sourceArn":"arn:identity/notifications.service.gov.uk","sourceIp":"192.168.3.11","sendingAccountId":"888450439860","messageId":"ref1","destination":["<EMAIL>"]}}', # noqa
'SigningCertUrl': 'https://sns.pem'
}
def ses_complaint_callback():
"""
https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notification-contents.html#complaint-object
"""
return {
'Signature': 'bb',
'SignatureVersion': '1', 'MessageAttributes': {}, 'MessageId': '98c6e927-af5d-5f3b-9522-bab736f2cbde',
'UnsubscribeUrl': 'https://sns.eu-west-1.amazonaws.com',
'TopicArn': 'arn:ses_notifications', 'Type': 'Notification',
'Timestamp': '2018-06-05T14:00:15.952Z', 'Subject': None,
'Message': '{"notificationType":"Complaint","complaint":{"complaintFeedbackType": "abuse", "complainedRecipients":[{"emailAddress":"<EMAIL>"}],"timestamp":"2018-06-05T13:59:58.000Z","feedbackId":"ses_feedback_id"},"mail":{"timestamp":"2018-06-05T14:00:15.950Z","source":"\\"Some Service\\" <someservicenotifications.service.gov.uk>","sourceArn":"arn:identity/notifications.service.gov.uk","sourceIp":"192.168.3.11","sendingAccountId":"888450439860","messageId":"ref1","destination":["<EMAIL>"]}}', # noqa
'SigningCertUrl': 'https://sns.pem'
}
def ses_notification_callback():
return '{\n "Type" : "Notification",\n "MessageId" : "ref1",' \
'\n "TopicArn" : "arn:aws:sns:eu-west-1:123456789012:testing",' \
'\n "Message" : "{\\"notificationType\\":\\"Delivery\\",' \
'\\"mail\\":{\\"timestamp\\":\\"2016-03-14T12:35:25.909Z\\",' \
'\\"source\\":\\"test@test-domain.com\\",' \
'\\"sourceArn\\":\\"arn:aws:ses:eu-west-1:123456789012:identity/testing-notify\\",' \
'\\"sendingAccountId\\":\\"123456789012\\",' \
'\\"messageId\\":\\"ref1\\",' \
'\\"destination\\":[\\"<EMAIL>\\"]},' \
'\\"delivery\\":{\\"timestamp\\":\\"2016-03-14T12:35:26.567Z\\",' \
'\\"processingTimeMillis\\":658,' \
'\\"recipients\\":[\\"<EMAIL>\\"],' \
'\\"smtpResponse\\":\\"250 2.0.0 OK 1457958926 uo5si26480932wjc.221 - gsmtp\\",' \
'\\"reportingMTA\\":\\"a6-238.smtp-out.eu-west-1.amazonses.com\\"}}",' \
'\n "Timestamp" : "2016-03-14T12:35:26.665Z",\n "SignatureVersion" : "1",' \
'\n "Signature" : "X8d7eTAOZ6wlnrdVVPYanrAlsX0SMPfOzhoTEBnQqYkrNWTqQY91C0f3bxtPdUhUt' \
'OowyPAOkTQ4KnZuzphfhVb2p1MyVYMxNKcBFB05/qaCX99+92fjw4x9LeUOwyGwMv5F0Vkfi5qZCcEw69uVrhYL' \
'VSTFTrzi/yCtru+yFULMQ6UhbY09GwiP6hjxZMVr8aROQy5lLHglqQzOuSZ4KeD85JjifHdKzlx8jjQ+uj+FLzHXPMA' \
'PmPU1JK9kpoHZ1oPshAFgPDpphJe+HwcJ8ezmk+3AEUr3wWli3xF+49y8Z2anASSVp6YI2YP95UT8Rlh3qT3T+V9V8rbSVislxA==",' \
'\n "SigningCertURL" : "https://sns.eu-west-1.amazonaws.com/SimpleNotificationService-bb750' \
'dd426d95ee9390147a5624348ee.pem",' \
'\n "UnsubscribeURL" : "https://sns.eu-west-1.amazonaws.com/?Action=Unsubscribe&S' \
'subscriptionArn=arn:aws:sns:eu-west-1:302763885840:preview-emails:d6aad3ef-83d6-4cf3-a470-54e2e75916da"\n}'
def create_service_data_retention(
service,
notification_type='sms',
days_of_retention=3
):
data_retention = insert_service_data_retention(
service_id=service.id,
notification_type=notification_type,
days_of_retention=days_of_retention
)
return data_retention
def create_invited_user(service=None,
to_email_address=None):
if service is None:
service = create_service()
if to_email_address is None:
to_email_address = '<EMAIL>'
from_user = service.users[0]
data = {
'service': service,
'email_address': to_email_address,
'from_user': from_user,
'permissions': 'send_messages,manage_service,manage_api_keys',
'folder_permissions': [str(uuid.uuid4()), str(uuid.uuid4())]
}
invited_user = InvitedUser(**data)
save_invited_user(invited_user)
return invited_user
def create_template_folder(service, name='foo', parent=None):
tf = TemplateFolder(name=name, service=service, parent=parent)
db.session.add(tf)
db.session.commit()
return tf
def create_letter_branding(name='HM Government', filename='hm-government'):
test_domain_branding = LetterBranding(name=name,
filename=filename,
)
db.session.add(test_domain_branding)
db.session.commit()
return test_domain_branding
def set_up_usage_data(start_date):
year = int(start_date.strftime('%Y'))
one_week_earlier = start_date - timedelta(days=7)
two_days_later = start_date + timedelta(days=2)
one_week_later = start_date + timedelta(days=7)
one_month_later = start_date + timedelta(days=31)
service = create_service(service_name='a - with sms and letter')
letter_template_1 = create_template(service=service, template_type='letter')
sms_template_1 = create_template(service=service, template_type='sms')
create_annual_billing(service_id=service.id, free_sms_fragment_limit=10, financial_year_start=year)
org = create_organisation(name="Org for {}".format(service.name))
dao_add_service_to_organisation(service=service, organisation_id=org.id)
service_2 = create_service(service_name='b - emails')
email_template = create_template(service=service_2, template_type='email')
org_2 = create_organisation(name='Org for {}'.format(service_2.name))
dao_add_service_to_organisation(service=service_2, organisation_id=org_2.id)
service_3 = create_service(service_name='c - letters only')
letter_template_3 = create_template(service=service_3, template_type='letter')
org_3 = create_organisation(name="Org for {}".format(service_3.name))
dao_add_service_to_organisation(service=service_3, organisation_id=org_3.id)
service_4 = create_service(service_name='d - service without org')
letter_template_4 = create_template(service=service_4, template_type='letter')
service_sms_only = create_service(service_name='b - chargeable sms')
sms_template = create_template(service=service_sms_only, template_type='sms')
create_annual_billing(service_id=service_sms_only.id, free_sms_fragment_limit=10, financial_year_start=year)
create_ft_billing(bst_date=one_week_earlier, template=sms_template_1, billable_unit=2, rate=0.11)
create_ft_billing(bst_date=start_date, template=sms_template_1, billable_unit=2, rate=0.11)
create_ft_billing(bst_date=two_days_later, template=sms_template_1, billable_unit=1, rate=0.11)
create_ft_billing(bst_date=one_week_later, template=letter_template_1,
notifications_sent=2, billable_unit=1, rate=.35, postage='first')
create_ft_billing(bst_date=one_month_later, template=letter_template_1,
notifications_sent=4, billable_unit=2, rate=.45, postage='second')
create_ft_billing(bst_date=one_week_later, template=letter_template_1,
notifications_sent=2, billable_unit=2, rate=.45, postage='second')
create_ft_billing(bst_date=one_week_earlier, template=sms_template, rate=0.11, billable_unit=12)
create_ft_billing(bst_date=two_days_later, template=sms_template, rate=0.11)
create_ft_billing(bst_date=one_week_later, template=sms_template, billable_unit=2, rate=0.11)
create_ft_billing(bst_date=start_date, template=letter_template_3,
notifications_sent=2, billable_unit=3, rate=.50, postage='first')
create_ft_billing(bst_date=one_week_later, template=letter_template_3,
notifications_sent=8, billable_unit=5, rate=.65, postage='second')
create_ft_billing(bst_date=one_month_later, template=letter_template_3,
notifications_sent=12, billable_unit=5, rate=.65, postage='second')
create_ft_billing(bst_date=two_days_later, template=letter_template_4,
notifications_sent=7, billable_unit=4, rate=1.55, postage='rest-of-world')
create_ft_billing(bst_date=two_days_later, template=letter_template_4,
notifications_sent=8, billable_unit=4, rate=1.55, postage='europe')
create_ft_billing(bst_date=two_days_later, template=letter_template_4,
notifications_sent=2, billable_unit=1, rate=.35, postage='second')
create_ft_billing(bst_date=two_days_later, template=letter_template_4,
notifications_sent=1, billable_unit=1, rate=.50, postage='first')
create_ft_billing(bst_date=start_date, template=email_template, notifications_sent=10)
return org, org_3, service, service_3, service_4, service_sms_only, org_2, service_2
def create_returned_letter(service=None, reported_at=None, notification_id=None):
if not service:
service = create_service(service_name='a - with sms and letter')
returned_letter = ReturnedLetter(
service_id=service.id,
reported_at=reported_at or datetime.utcnow(),
notification_id=notification_id or uuid.uuid4(),
created_at=datetime.utcnow(),
)
db.session.add(returned_letter)
db.session.commit()
return returned_letter
def create_service_contact_list(
service=None,
original_file_name='EmergencyContactList.xls',
row_count=100,
template_type='email',
created_by_id=None,
archived=False,
):
if not service:
service = create_service(service_name='service for contact list', user=create_user())
contact_list = ServiceContactList(
service_id=service.id,
original_file_name=original_file_name,
row_count=row_count,
template_type=template_type,
created_by_id=created_by_id or service.users[0].id,
created_at=datetime.utcnow(),
archived=archived,
)
db.session.add(contact_list)
db.session.commit()
return contact_list
def create_broadcast_message(
template,
created_by=None,
personalisation=None,
status=BroadcastStatusType.DRAFT,
starts_at=None,
finishes_at=None,
areas=None,
):
broadcast_message = BroadcastMessage(
service_id=template.service_id,
template_id=template.id,
template_version=template.version,
personalisation=personalisation or {},
status=status,
starts_at=starts_at,
finishes_at=finishes_at,
created_by_id=created_by.id if created_by else template.created_by_id,
areas=areas or {},
)
db.session.add(broadcast_message)
db.session.commit()
return broadcast_message
def create_broadcast_event(
broadcast_message,
sent_at=None,
message_type='alert',
transmitted_content=None,
transmitted_areas=None,
transmitted_sender=None,
transmitted_starts_at=None,
transmitted_finishes_at=None,
):
b_e = BroadcastEvent(
service=broadcast_message.service,
broadcast_message=broadcast_message,
sent_at=sent_at or datetime.utcnow(),
message_type=message_type,
transmitted_content=transmitted_content or {'body': 'this is an emergency broadcast message'},
transmitted_areas=transmitted_areas or broadcast_message.areas,
transmitted_sender=transmitted_sender or 'www.notifications.service.gov.uk',
transmitted_starts_at=transmitted_starts_at,
transmitted_finishes_at=transmitted_finishes_at or datetime.utcnow(),
)
db.session.add(b_e)
db.session.commit()
return b_e
def create_broadcast_provider_message(
broadcast_event,
provider,
status='sending'
):
broadcast_provider_message_id = uuid.uuid4()
provider_message = BroadcastProviderMessage(
id=broadcast_provider_message_id,
broadcast_event=broadcast_event,
provider=provider,
status=status,
)
db.session.add(provider_message)
db.session.commit()
provider_message_number = None
if provider == BroadcastProvider.VODAFONE:
provider_message_number = BroadcastProviderMessageNumber(
broadcast_provider_message_id=broadcast_provider_message_id)
db.session.add(provider_message_number)
db.session.commit()
return provider_message, provider_message_number
| import random
import uuid
from datetime import datetime, date, timedelta
from app import db
from app.dao.email_branding_dao import dao_create_email_branding
from app.dao.inbound_sms_dao import dao_create_inbound_sms
from app.dao.invited_org_user_dao import save_invited_org_user
from app.dao.invited_user_dao import save_invited_user
from app.dao.jobs_dao import dao_create_job
from app.dao.notifications_dao import (
dao_create_notification,
)
from app.dao.organisation_dao import dao_create_organisation, dao_add_service_to_organisation
from app.dao.permissions_dao import permission_dao
from app.dao.service_callback_api_dao import save_service_callback_api
from app.dao.service_data_retention_dao import insert_service_data_retention
from app.dao.service_inbound_api_dao import save_service_inbound_api
from app.dao.service_permissions_dao import dao_add_service_permission
from app.dao.service_sms_sender_dao import update_existing_sms_sender_with_inbound_number, dao_update_service_sms_sender
from app.dao.services_dao import dao_create_service, dao_add_user_to_service
from app.dao.templates_dao import dao_create_template, dao_update_template
from app.dao.users_dao import save_model_user
from app.models import (
ApiKey,
DailySortedLetter,
InboundSms,
InboundNumber,
Job,
Notification,
EmailBranding,
LetterRate,
Organisation,
Permission,
Rate,
Service,
ServiceEmailReplyTo,
ServiceInboundApi,
ServiceCallbackApi,
ServiceLetterContact,
ServicePermission,
ServiceSmsSender,
ServiceGuestList,
Template,
User,
EMAIL_TYPE,
MOBILE_TYPE,
SMS_TYPE,
LETTER_TYPE,
KEY_TYPE_NORMAL,
AnnualBilling,
InvitedOrganisationUser,
FactBilling,
FactNotificationStatus,
Complaint,
InvitedUser,
TemplateFolder,
LetterBranding,
Domain,
NotificationHistory,
ReturnedLetter,
ServiceContactList,
BroadcastMessage,
BroadcastStatusType,
BroadcastEvent,
BroadcastProvider,
BroadcastProviderMessage,
BroadcastProviderMessageNumber
)
def create_user(
*,
mobile_number="+447700900986",
email="<EMAIL>",
state='active',
id_=None,
name="Test User"
):
data = {
'id': id_ or uuid.uuid4(),
'name': name,
'email_address': email,
'password': 'password',
'mobile_number': mobile_number,
'state': state
}
user = User.query.filter_by(email_address=email).first()
if not user:
user = User(**data)
save_model_user(user, validated_email_access=True)
return user
def create_permissions(user, service, *permissions):
permissions = [
Permission(service_id=service.id, user_id=user.id, permission=p)
for p in permissions
]
permission_dao.set_user_service_permission(user, service, permissions, _commit=True)
def create_service(
user=None,
service_name="Sample service",
service_id=None,
restricted=False,
count_as_live=True,
service_permissions=None,
research_mode=False,
active=True,
email_from=None,
prefix_sms=True,
message_limit=1000,
organisation_type='central',
check_if_service_exists=False,
go_live_user=None,
go_live_at=None,
crown=True,
organisation=None
):
if check_if_service_exists:
service = Service.query.filter_by(name=service_name).first()
if (not check_if_service_exists) or (check_if_service_exists and not service):
service = Service(
name=service_name,
message_limit=message_limit,
restricted=restricted,
email_from=email_from if email_from else service_name.lower().replace(' ', '.'),
created_by=user if user else create_user(email='{}<EMAIL>'.format(uuid.uuid4())),
prefix_sms=prefix_sms,
organisation_type=organisation_type,
organisation=organisation,
go_live_user=go_live_user,
go_live_at=go_live_at,
crown=crown
)
dao_create_service(
service,
service.created_by,
service_id,
service_permissions=service_permissions,
)
service.active = active
service.research_mode = research_mode
service.count_as_live = count_as_live
else:
if user and user not in service.users:
dao_add_user_to_service(service, user)
return service
def create_service_with_inbound_number(
inbound_number='1234567',
*args, **kwargs
):
service = create_service(*args, **kwargs)
sms_sender = ServiceSmsSender.query.filter_by(service_id=service.id).first()
inbound = create_inbound_number(number=inbound_number, service_id=service.id)
update_existing_sms_sender_with_inbound_number(service_sms_sender=sms_sender,
sms_sender=inbound_number,
inbound_number_id=inbound.id)
return service
def create_service_with_defined_sms_sender(
sms_sender_value='1234567',
*args, **kwargs
):
service = create_service(*args, **kwargs)
sms_sender = ServiceSmsSender.query.filter_by(service_id=service.id).first()
dao_update_service_sms_sender(service_id=service.id,
service_sms_sender_id=sms_sender.id,
is_default=True,
sms_sender=sms_sender_value)
return service
def create_template(
service,
template_type=SMS_TYPE,
template_name=None,
subject='Template subject',
content='Dear Sir/Madam, Hello. Yours Truly, The Government.',
reply_to=None,
hidden=False,
archived=False,
folder=None,
postage=None,
process_type='normal',
contact_block_id=None
):
data = {
'name': template_name or '{} Template Name'.format(template_type),
'template_type': template_type,
'content': content,
'service': service,
'created_by': service.created_by,
'reply_to': reply_to,
'hidden': hidden,
'folder': folder,
'process_type': process_type,
}
if template_type == LETTER_TYPE:
data["postage"] = postage or "second"
if contact_block_id:
data['service_letter_contact_id'] = contact_block_id
if template_type != SMS_TYPE:
data['subject'] = subject
template = Template(**data)
dao_create_template(template)
if archived:
template.archived = archived
dao_update_template(template)
return template
def create_notification(
template=None,
job=None,
job_row_number=None,
to_field=None,
status='created',
reference=None,
created_at=None,
sent_at=None,
updated_at=None,
billable_units=1,
personalisation=None,
api_key=None,
key_type=KEY_TYPE_NORMAL,
sent_by=None,
client_reference=None,
rate_multiplier=None,
international=False,
phone_prefix=None,
scheduled_for=None,
normalised_to=None,
one_off=False,
reply_to_text=None,
created_by_id=None,
postage=None,
document_download_count=None,
):
assert job or template
if job:
template = job.template
if created_at is None:
created_at = datetime.utcnow()
if to_field is None:
to_field = '+447700900855' if template.template_type == SMS_TYPE else 'test@<EMAIL>'
if status not in ('created', 'validation-failed', 'virus-scan-failed', 'pending-virus-check'):
sent_at = sent_at or datetime.utcnow()
updated_at = updated_at or datetime.utcnow()
if not one_off and (job is None and api_key is None):
# we did not specify in test - lets create it
api_key = ApiKey.query.filter(ApiKey.service == template.service, ApiKey.key_type == key_type).first()
if not api_key:
api_key = create_api_key(template.service, key_type=key_type)
if template.template_type == 'letter' and postage is None:
postage = 'second'
data = {
'id': uuid.uuid4(),
'to': to_field,
'job_id': job and job.id,
'job': job,
'service_id': template.service.id,
'service': template.service,
'template_id': template.id,
'template_version': template.version,
'status': status,
'reference': reference,
'created_at': created_at,
'sent_at': sent_at,
'billable_units': billable_units,
'personalisation': personalisation,
'notification_type': template.template_type,
'api_key': api_key,
'api_key_id': api_key and api_key.id,
'key_type': api_key.key_type if api_key else key_type,
'sent_by': sent_by,
'updated_at': updated_at,
'client_reference': client_reference,
'job_row_number': job_row_number,
'rate_multiplier': rate_multiplier,
'international': international,
'phone_prefix': phone_prefix,
'normalised_to': normalised_to,
'reply_to_text': reply_to_text,
'created_by_id': created_by_id,
'postage': postage,
'document_download_count': document_download_count,
}
notification = Notification(**data)
dao_create_notification(notification)
return notification
def create_notification_history(
template=None,
job=None,
job_row_number=None,
status='created',
reference=None,
created_at=None,
sent_at=None,
updated_at=None,
billable_units=1,
api_key=None,
key_type=KEY_TYPE_NORMAL,
sent_by=None,
client_reference=None,
rate_multiplier=None,
international=False,
phone_prefix=None,
created_by_id=None,
postage=None,
id=None
):
assert job or template
if job:
template = job.template
if created_at is None:
created_at = datetime.utcnow()
if status != 'created':
sent_at = sent_at or datetime.utcnow()
updated_at = updated_at or datetime.utcnow()
if template.template_type == 'letter' and postage is None:
postage = 'second'
data = {
'id': id or uuid.uuid4(),
'job_id': job and job.id,
'job': job,
'service_id': template.service.id,
'service': template.service,
'template_id': template.id,
'template_version': template.version,
'status': status,
'reference': reference,
'created_at': created_at,
'sent_at': sent_at,
'billable_units': billable_units,
'notification_type': template.template_type,
'api_key': api_key,
'api_key_id': api_key and api_key.id,
'key_type': api_key.key_type if api_key else key_type,
'sent_by': sent_by,
'updated_at': updated_at,
'client_reference': client_reference,
'job_row_number': job_row_number,
'rate_multiplier': rate_multiplier,
'international': international,
'phone_prefix': phone_prefix,
'created_by_id': created_by_id,
'postage': postage
}
notification_history = NotificationHistory(**data)
db.session.add(notification_history)
db.session.commit()
return notification_history
def create_job(
template,
notification_count=1,
created_at=None,
job_status='pending',
scheduled_for=None,
processing_started=None,
processing_finished=None,
original_file_name='some.csv',
archived=False,
contact_list_id=None,
):
data = {
'id': uuid.uuid4(),
'service_id': template.service_id,
'service': template.service,
'template_id': template.id,
'template_version': template.version,
'original_file_name': original_file_name,
'notification_count': notification_count,
'created_at': created_at or datetime.utcnow(),
'created_by': template.created_by,
'job_status': job_status,
'scheduled_for': scheduled_for,
'processing_started': processing_started,
'processing_finished': processing_finished,
'archived': archived,
'contact_list_id': contact_list_id,
}
job = Job(**data)
dao_create_job(job)
return job
def create_service_permission(service_id, permission=EMAIL_TYPE):
dao_add_service_permission(
service_id if service_id else create_service().id, permission)
service_permissions = ServicePermission.query.all()
return service_permissions
def create_inbound_sms(
service,
notify_number=None,
user_number='447700900111',
provider_date=None,
provider_reference=None,
content='Hello',
provider="mmg",
created_at=None
):
if not service.inbound_number:
create_inbound_number(
# create random inbound number
notify_number or '07{:09}'.format(random.randint(0, 1e9 - 1)),
provider=provider,
service_id=service.id
)
inbound = InboundSms(
service=service,
created_at=created_at or datetime.utcnow(),
notify_number=service.get_inbound_number(),
user_number=user_number,
provider_date=provider_date or datetime.utcnow(),
provider_reference=provider_reference or 'foo',
content=content,
provider=provider
)
dao_create_inbound_sms(inbound)
return inbound
def create_service_inbound_api(
service,
url="https://something.com",
bearer_token="<PASSWORD>",
):
service_inbound_api = ServiceInboundApi(service_id=service.id,
url=url,
bearer_token=bearer_token,
updated_by_id=service.users[0].id
)
save_service_inbound_api(service_inbound_api)
return service_inbound_api
def create_service_callback_api(
service,
url="https://something.com",
bearer_token="<PASSWORD>",
callback_type="delivery_status"
):
service_callback_api = ServiceCallbackApi(service_id=service.id,
url=url,
bearer_token=bearer_token,
updated_by_id=service.users[0].id,
callback_type=callback_type
)
save_service_callback_api(service_callback_api)
return service_callback_api
def create_email_branding(colour='blue', logo='test_x2.png', name='test_org_1', text='DisplayName'):
data = {
'colour': colour,
'logo': logo,
'name': name,
'text': text,
}
email_branding = EmailBranding(**data)
dao_create_email_branding(email_branding)
return email_branding
def create_rate(start_date, value, notification_type):
rate = Rate(
id=uuid.uuid4(),
valid_from=start_date,
rate=value,
notification_type=notification_type
)
db.session.add(rate)
db.session.commit()
return rate
def create_letter_rate(start_date=None, end_date=None, crown=True, sheet_count=1, rate=0.33, post_class='second'):
if start_date is None:
start_date = datetime(2016, 1, 1)
rate = LetterRate(
id=uuid.uuid4(),
start_date=start_date,
end_date=end_date,
crown=crown,
sheet_count=sheet_count,
rate=rate,
post_class=post_class
)
db.session.add(rate)
db.session.commit()
return rate
def create_api_key(service, key_type=KEY_TYPE_NORMAL, key_name=None):
id_ = uuid.uuid4()
name = key_name if key_name else '{} api key {}'.format(key_type, id_)
api_key = ApiKey(
service=service,
name=name,
created_by=service.created_by,
key_type=key_type,
id=id_,
secret=uuid.uuid4()
)
db.session.add(api_key)
db.session.commit()
return api_key
def create_inbound_number(number, provider='mmg', active=True, service_id=None):
inbound_number = InboundNumber(
id=uuid.uuid4(),
number=number,
provider=provider,
active=active,
service_id=service_id
)
db.session.add(inbound_number)
db.session.commit()
return inbound_number
def create_reply_to_email(
service,
email_address,
is_default=True,
archived=False
):
data = {
'service': service,
'email_address': email_address,
'is_default': is_default,
'archived': archived,
}
reply_to = ServiceEmailReplyTo(**data)
db.session.add(reply_to)
db.session.commit()
return reply_to
def create_service_sms_sender(
service,
sms_sender,
is_default=True,
inbound_number_id=None,
archived=False
):
data = {
'service_id': service.id,
'sms_sender': sms_sender,
'is_default': is_default,
'inbound_number_id': inbound_number_id,
'archived': archived,
}
service_sms_sender = ServiceSmsSender(**data)
db.session.add(service_sms_sender)
db.session.commit()
return service_sms_sender
def create_letter_contact(
service,
contact_block,
is_default=True,
archived=False
):
data = {
'service': service,
'contact_block': contact_block,
'is_default': is_default,
'archived': archived,
}
letter_content = ServiceLetterContact(**data)
db.session.add(letter_content)
db.session.commit()
return letter_content
def create_annual_billing(
service_id, free_sms_fragment_limit, financial_year_start
):
annual_billing = AnnualBilling(
service_id=service_id,
free_sms_fragment_limit=free_sms_fragment_limit,
financial_year_start=financial_year_start
)
db.session.add(annual_billing)
db.session.commit()
return annual_billing
def create_domain(domain, organisation_id):
domain = Domain(domain=domain, organisation_id=organisation_id)
db.session.add(domain)
db.session.commit()
return domain
def create_organisation(name='test_org_1', active=True, organisation_type=None, domains=None, organisation_id=None):
data = {
'id': organisation_id,
'name': name,
'active': active,
'organisation_type': organisation_type,
}
organisation = Organisation(**data)
dao_create_organisation(organisation)
for domain in domains or []:
create_domain(domain, organisation.id)
return organisation
def create_invited_org_user(organisation, invited_by, email_address='<EMAIL>'):
invited_org_user = InvitedOrganisationUser(
email_address=email_address,
invited_by=invited_by,
organisation=organisation,
)
save_invited_org_user(invited_org_user)
return invited_org_user
def create_daily_sorted_letter(billing_day=None,
file_name="Notify-20180118123.rs.txt",
unsorted_count=0,
sorted_count=0):
daily_sorted_letter = DailySortedLetter(
billing_day=billing_day or date(2018, 1, 18),
file_name=file_name,
unsorted_count=unsorted_count,
sorted_count=sorted_count
)
db.session.add(daily_sorted_letter)
db.session.commit()
return daily_sorted_letter
def create_ft_billing(bst_date,
template,
*,
provider='test',
rate_multiplier=1,
international=False,
rate=0,
billable_unit=1,
notifications_sent=1,
postage='none'
):
data = FactBilling(bst_date=bst_date,
service_id=template.service_id,
template_id=template.id,
notification_type=template.template_type,
provider=provider,
rate_multiplier=rate_multiplier,
international=international,
rate=rate,
billable_units=billable_unit,
notifications_sent=notifications_sent,
postage=postage)
db.session.add(data)
db.session.commit()
return data
def create_ft_notification_status(
bst_date,
notification_type='sms',
service=None,
template=None,
job=None,
key_type='normal',
notification_status='delivered',
count=1
):
if job:
template = job.template
if template:
service = template.service
notification_type = template.template_type
else:
if not service:
service = create_service()
template = create_template(service=service, template_type=notification_type)
data = FactNotificationStatus(
bst_date=bst_date,
template_id=template.id,
service_id=service.id,
job_id=job.id if job else uuid.UUID(int=0),
notification_type=notification_type,
key_type=key_type,
notification_status=notification_status,
notification_count=count
)
db.session.add(data)
db.session.commit()
return data
def create_service_guest_list(service, email_address=None, mobile_number=None):
if email_address:
guest_list_user = ServiceGuestList.from_string(service.id, EMAIL_TYPE, email_address)
elif mobile_number:
guest_list_user = ServiceGuestList.from_string(service.id, MOBILE_TYPE, mobile_number)
else:
guest_list_user = ServiceGuestList.from_string(service.id, EMAIL_TYPE, '<EMAIL>')
db.session.add(guest_list_user)
db.session.commit()
return guest_list_user
def create_complaint(service=None,
notification=None,
created_at=None):
if not service:
service = create_service()
if not notification:
template = create_template(service=service, template_type='email')
notification = create_notification(template=template)
complaint = Complaint(notification_id=notification.id,
service_id=service.id,
ses_feedback_id=str(uuid.uuid4()),
complaint_type='abuse',
complaint_date=datetime.utcnow(),
created_at=created_at if created_at else datetime.now()
)
db.session.add(complaint)
db.session.commit()
return complaint
def ses_complaint_callback_malformed_message_id():
return {
'Signature': 'bb',
'SignatureVersion': '1', 'MessageAttributes': {}, 'MessageId': '98c6e927-af5d-5f3b-9522-bab736f2cbde',
'UnsubscribeUrl': 'https://sns.eu-west-1.amazonaws.com',
'TopicArn': 'arn:ses_notifications', 'Type': 'Notification',
'Timestamp': '2018-06-05T14:00:15.952Z', 'Subject': None,
'Message': '{"notificationType":"Complaint","complaint":{"complainedRecipients":[{"emailAddress":"<EMAIL>"}],"timestamp":"2018-06-05T13:59:58.000Z","feedbackId":"ses_feedback_id"},"mail":{"timestamp":"2018-06-05T14:00:15.950Z","source":"\\"Some Service\\" <someservicenotifications.service.gov.uk>","sourceArn":"arn:identity/notifications.service.gov.uk","sourceIp":"192.168.3.11","sendingAccountId":"888450439860","badMessageId":"ref1","destination":["<EMAIL>"]}}', # noqa
'SigningCertUrl': 'https://sns.pem'
}
def ses_complaint_callback_with_missing_complaint_type():
"""
https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notification-contents.html#complaint-object
"""
return {
'Signature': 'bb',
'SignatureVersion': '1', 'MessageAttributes': {}, 'MessageId': '98c6e927-af5d-5f3b-9522-bab736f2cbde',
'UnsubscribeUrl': 'https://sns.eu-west-1.amazonaws.com',
'TopicArn': 'arn:ses_notifications', 'Type': 'Notification',
'Timestamp': '2018-06-05T14:00:15.952Z', 'Subject': None,
'Message': '{"notificationType":"Complaint","complaint":{"complainedRecipients":[{"emailAddress":"<EMAIL>"}],"timestamp":"2018-06-05T13:59:58.000Z","feedbackId":"ses_feedback_id"},"mail":{"timestamp":"2018-06-05T14:00:15.950Z","source":"\\"Some Service\\" <someservicenotifications.service.gov.uk>","sourceArn":"arn:identity/notifications.service.gov.uk","sourceIp":"192.168.3.11","sendingAccountId":"888450439860","messageId":"ref1","destination":["<EMAIL>"]}}', # noqa
'SigningCertUrl': 'https://sns.pem'
}
def ses_complaint_callback():
"""
https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notification-contents.html#complaint-object
"""
return {
'Signature': 'bb',
'SignatureVersion': '1', 'MessageAttributes': {}, 'MessageId': '98c6e927-af5d-5f3b-9522-bab736f2cbde',
'UnsubscribeUrl': 'https://sns.eu-west-1.amazonaws.com',
'TopicArn': 'arn:ses_notifications', 'Type': 'Notification',
'Timestamp': '2018-06-05T14:00:15.952Z', 'Subject': None,
'Message': '{"notificationType":"Complaint","complaint":{"complaintFeedbackType": "abuse", "complainedRecipients":[{"emailAddress":"<EMAIL>"}],"timestamp":"2018-06-05T13:59:58.000Z","feedbackId":"ses_feedback_id"},"mail":{"timestamp":"2018-06-05T14:00:15.950Z","source":"\\"Some Service\\" <someservicenotifications.service.gov.uk>","sourceArn":"arn:identity/notifications.service.gov.uk","sourceIp":"192.168.3.11","sendingAccountId":"888450439860","messageId":"ref1","destination":["<EMAIL>"]}}', # noqa
'SigningCertUrl': 'https://sns.pem'
}
def ses_notification_callback():
return '{\n "Type" : "Notification",\n "MessageId" : "ref1",' \
'\n "TopicArn" : "arn:aws:sns:eu-west-1:123456789012:testing",' \
'\n "Message" : "{\\"notificationType\\":\\"Delivery\\",' \
'\\"mail\\":{\\"timestamp\\":\\"2016-03-14T12:35:25.909Z\\",' \
'\\"source\\":\\"test@test-domain.com\\",' \
'\\"sourceArn\\":\\"arn:aws:ses:eu-west-1:123456789012:identity/testing-notify\\",' \
'\\"sendingAccountId\\":\\"123456789012\\",' \
'\\"messageId\\":\\"ref1\\",' \
'\\"destination\\":[\\"<EMAIL>\\"]},' \
'\\"delivery\\":{\\"timestamp\\":\\"2016-03-14T12:35:26.567Z\\",' \
'\\"processingTimeMillis\\":658,' \
'\\"recipients\\":[\\"<EMAIL>\\"],' \
'\\"smtpResponse\\":\\"250 2.0.0 OK 1457958926 uo5si26480932wjc.221 - gsmtp\\",' \
'\\"reportingMTA\\":\\"a6-238.smtp-out.eu-west-1.amazonses.com\\"}}",' \
'\n "Timestamp" : "2016-03-14T12:35:26.665Z",\n "SignatureVersion" : "1",' \
'\n "Signature" : "X8d7eTAOZ6wlnrdVVPYanrAlsX0SMPfOzhoTEBnQqYkrNWTqQY91C0f3bxtPdUhUt' \
'OowyPAOkTQ4KnZuzphfhVb2p1MyVYMxNKcBFB05/qaCX99+92fjw4x9LeUOwyGwMv5F0Vkfi5qZCcEw69uVrhYL' \
'VSTFTrzi/yCtru+yFULMQ6UhbY09GwiP6hjxZMVr8aROQy5lLHglqQzOuSZ4KeD85JjifHdKzlx8jjQ+uj+FLzHXPMA' \
'PmPU1JK9kpoHZ1oPshAFgPDpphJe+HwcJ8ezmk+3AEUr3wWli3xF+49y8Z2anASSVp6YI2YP95UT8Rlh3qT3T+V9V8rbSVislxA==",' \
'\n "SigningCertURL" : "https://sns.eu-west-1.amazonaws.com/SimpleNotificationService-bb750' \
'dd426d95ee9390147a5624348ee.pem",' \
'\n "UnsubscribeURL" : "https://sns.eu-west-1.amazonaws.com/?Action=Unsubscribe&S' \
'subscriptionArn=arn:aws:sns:eu-west-1:302763885840:preview-emails:d6aad3ef-83d6-4cf3-a470-54e2e75916da"\n}'
def create_service_data_retention(
service,
notification_type='sms',
days_of_retention=3
):
data_retention = insert_service_data_retention(
service_id=service.id,
notification_type=notification_type,
days_of_retention=days_of_retention
)
return data_retention
def create_invited_user(service=None,
to_email_address=None):
if service is None:
service = create_service()
if to_email_address is None:
to_email_address = '<EMAIL>'
from_user = service.users[0]
data = {
'service': service,
'email_address': to_email_address,
'from_user': from_user,
'permissions': 'send_messages,manage_service,manage_api_keys',
'folder_permissions': [str(uuid.uuid4()), str(uuid.uuid4())]
}
invited_user = InvitedUser(**data)
save_invited_user(invited_user)
return invited_user
def create_template_folder(service, name='foo', parent=None):
tf = TemplateFolder(name=name, service=service, parent=parent)
db.session.add(tf)
db.session.commit()
return tf
def create_letter_branding(name='HM Government', filename='hm-government'):
test_domain_branding = LetterBranding(name=name,
filename=filename,
)
db.session.add(test_domain_branding)
db.session.commit()
return test_domain_branding
def set_up_usage_data(start_date):
year = int(start_date.strftime('%Y'))
one_week_earlier = start_date - timedelta(days=7)
two_days_later = start_date + timedelta(days=2)
one_week_later = start_date + timedelta(days=7)
one_month_later = start_date + timedelta(days=31)
service = create_service(service_name='a - with sms and letter')
letter_template_1 = create_template(service=service, template_type='letter')
sms_template_1 = create_template(service=service, template_type='sms')
create_annual_billing(service_id=service.id, free_sms_fragment_limit=10, financial_year_start=year)
org = create_organisation(name="Org for {}".format(service.name))
dao_add_service_to_organisation(service=service, organisation_id=org.id)
service_2 = create_service(service_name='b - emails')
email_template = create_template(service=service_2, template_type='email')
org_2 = create_organisation(name='Org for {}'.format(service_2.name))
dao_add_service_to_organisation(service=service_2, organisation_id=org_2.id)
service_3 = create_service(service_name='c - letters only')
letter_template_3 = create_template(service=service_3, template_type='letter')
org_3 = create_organisation(name="Org for {}".format(service_3.name))
dao_add_service_to_organisation(service=service_3, organisation_id=org_3.id)
service_4 = create_service(service_name='d - service without org')
letter_template_4 = create_template(service=service_4, template_type='letter')
service_sms_only = create_service(service_name='b - chargeable sms')
sms_template = create_template(service=service_sms_only, template_type='sms')
create_annual_billing(service_id=service_sms_only.id, free_sms_fragment_limit=10, financial_year_start=year)
create_ft_billing(bst_date=one_week_earlier, template=sms_template_1, billable_unit=2, rate=0.11)
create_ft_billing(bst_date=start_date, template=sms_template_1, billable_unit=2, rate=0.11)
create_ft_billing(bst_date=two_days_later, template=sms_template_1, billable_unit=1, rate=0.11)
create_ft_billing(bst_date=one_week_later, template=letter_template_1,
notifications_sent=2, billable_unit=1, rate=.35, postage='first')
create_ft_billing(bst_date=one_month_later, template=letter_template_1,
notifications_sent=4, billable_unit=2, rate=.45, postage='second')
create_ft_billing(bst_date=one_week_later, template=letter_template_1,
notifications_sent=2, billable_unit=2, rate=.45, postage='second')
create_ft_billing(bst_date=one_week_earlier, template=sms_template, rate=0.11, billable_unit=12)
create_ft_billing(bst_date=two_days_later, template=sms_template, rate=0.11)
create_ft_billing(bst_date=one_week_later, template=sms_template, billable_unit=2, rate=0.11)
create_ft_billing(bst_date=start_date, template=letter_template_3,
notifications_sent=2, billable_unit=3, rate=.50, postage='first')
create_ft_billing(bst_date=one_week_later, template=letter_template_3,
notifications_sent=8, billable_unit=5, rate=.65, postage='second')
create_ft_billing(bst_date=one_month_later, template=letter_template_3,
notifications_sent=12, billable_unit=5, rate=.65, postage='second')
create_ft_billing(bst_date=two_days_later, template=letter_template_4,
notifications_sent=7, billable_unit=4, rate=1.55, postage='rest-of-world')
create_ft_billing(bst_date=two_days_later, template=letter_template_4,
notifications_sent=8, billable_unit=4, rate=1.55, postage='europe')
create_ft_billing(bst_date=two_days_later, template=letter_template_4,
notifications_sent=2, billable_unit=1, rate=.35, postage='second')
create_ft_billing(bst_date=two_days_later, template=letter_template_4,
notifications_sent=1, billable_unit=1, rate=.50, postage='first')
create_ft_billing(bst_date=start_date, template=email_template, notifications_sent=10)
return org, org_3, service, service_3, service_4, service_sms_only, org_2, service_2
def create_returned_letter(service=None, reported_at=None, notification_id=None):
if not service:
service = create_service(service_name='a - with sms and letter')
returned_letter = ReturnedLetter(
service_id=service.id,
reported_at=reported_at or datetime.utcnow(),
notification_id=notification_id or uuid.uuid4(),
created_at=datetime.utcnow(),
)
db.session.add(returned_letter)
db.session.commit()
return returned_letter
def create_service_contact_list(
service=None,
original_file_name='EmergencyContactList.xls',
row_count=100,
template_type='email',
created_by_id=None,
archived=False,
):
if not service:
service = create_service(service_name='service for contact list', user=create_user())
contact_list = ServiceContactList(
service_id=service.id,
original_file_name=original_file_name,
row_count=row_count,
template_type=template_type,
created_by_id=created_by_id or service.users[0].id,
created_at=datetime.utcnow(),
archived=archived,
)
db.session.add(contact_list)
db.session.commit()
return contact_list
def create_broadcast_message(
template,
created_by=None,
personalisation=None,
status=BroadcastStatusType.DRAFT,
starts_at=None,
finishes_at=None,
areas=None,
):
broadcast_message = BroadcastMessage(
service_id=template.service_id,
template_id=template.id,
template_version=template.version,
personalisation=personalisation or {},
status=status,
starts_at=starts_at,
finishes_at=finishes_at,
created_by_id=created_by.id if created_by else template.created_by_id,
areas=areas or {},
)
db.session.add(broadcast_message)
db.session.commit()
return broadcast_message
def create_broadcast_event(
broadcast_message,
sent_at=None,
message_type='alert',
transmitted_content=None,
transmitted_areas=None,
transmitted_sender=None,
transmitted_starts_at=None,
transmitted_finishes_at=None,
):
b_e = BroadcastEvent(
service=broadcast_message.service,
broadcast_message=broadcast_message,
sent_at=sent_at or datetime.utcnow(),
message_type=message_type,
transmitted_content=transmitted_content or {'body': 'this is an emergency broadcast message'},
transmitted_areas=transmitted_areas or broadcast_message.areas,
transmitted_sender=transmitted_sender or 'www.notifications.service.gov.uk',
transmitted_starts_at=transmitted_starts_at,
transmitted_finishes_at=transmitted_finishes_at or datetime.utcnow(),
)
db.session.add(b_e)
db.session.commit()
return b_e
def create_broadcast_provider_message(
broadcast_event,
provider,
status='sending'
):
broadcast_provider_message_id = uuid.uuid4()
provider_message = BroadcastProviderMessage(
id=broadcast_provider_message_id,
broadcast_event=broadcast_event,
provider=provider,
status=status,
)
db.session.add(provider_message)
db.session.commit()
provider_message_number = None
if provider == BroadcastProvider.VODAFONE:
provider_message_number = BroadcastProviderMessageNumber(
broadcast_provider_message_id=broadcast_provider_message_id)
db.session.add(provider_message_number)
db.session.commit()
return provider_message, provider_message_number | en | 0.579506 | # we did not specify in test - lets create it # create random inbound number # noqa https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notification-contents.html#complaint-object # noqa https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notification-contents.html#complaint-object # noqa | 1.255212 | 1 |
tests/test_change_speed.py | sedv8808/AudioFilters_py | 2 | 6632915 | import numpy as np
import pytest
from audiofilters.change_speed import change_speed
input_signal = np.genfromtxt('tests/data/bark.csv', dtype = 'float32')
'''
Test that exception is raised for unsupported input_signal argument type.
'''
def test_unsupported_input_signal_type_raises_error():
with pytest.raises(Exception):
change_speed(np.array(['1', '2', '3']), 1.5)
'''
Test that exception is raised for negative rate argument
'''
def test_negative_rate_raises_error():
with pytest.raises(Exception):
change_speed(input_signal, -1)
'''
Test that exception is raised for rate argument equal to zero
'''
def test_zero_rate_raises_error():
with pytest.raises(Exception):
change_speed(input_signal, 0)
'''
Make sure the output matches example data when speed is increased
'''
def test_change_speed_doubles_playback_speed():
expected_output = np.genfromtxt('tests/data/change_speed/bark_double_speed.csv', dtype = 'float32')
output_signal = change_speed(input_signal, 2.0)
# Mean squared error between input and output signal
mse = ((expected_output - output_signal)**2).mean(axis=0)
print(mse)
assert mse < 0.00001, "Output does not match test data!"
'''
Make sure the output matches example data when speed is decreased
'''
def test_change_speed_halves_playback_speed():
expected_output = np.genfromtxt('tests/data/change_speed/bark_half_speed.csv', dtype = 'float32')
output_signal = change_speed(input_signal, 0.5)
# Mean squared error between input and output signal
mse = ((expected_output - output_signal)**2).mean(axis=0)
print(mse)
assert mse < 0.00001, "Output does not match test data!"
| import numpy as np
import pytest
from audiofilters.change_speed import change_speed
input_signal = np.genfromtxt('tests/data/bark.csv', dtype = 'float32')
'''
Test that exception is raised for unsupported input_signal argument type.
'''
def test_unsupported_input_signal_type_raises_error():
with pytest.raises(Exception):
change_speed(np.array(['1', '2', '3']), 1.5)
'''
Test that exception is raised for negative rate argument
'''
def test_negative_rate_raises_error():
with pytest.raises(Exception):
change_speed(input_signal, -1)
'''
Test that exception is raised for rate argument equal to zero
'''
def test_zero_rate_raises_error():
with pytest.raises(Exception):
change_speed(input_signal, 0)
'''
Make sure the output matches example data when speed is increased
'''
def test_change_speed_doubles_playback_speed():
expected_output = np.genfromtxt('tests/data/change_speed/bark_double_speed.csv', dtype = 'float32')
output_signal = change_speed(input_signal, 2.0)
# Mean squared error between input and output signal
mse = ((expected_output - output_signal)**2).mean(axis=0)
print(mse)
assert mse < 0.00001, "Output does not match test data!"
'''
Make sure the output matches example data when speed is decreased
'''
def test_change_speed_halves_playback_speed():
expected_output = np.genfromtxt('tests/data/change_speed/bark_half_speed.csv', dtype = 'float32')
output_signal = change_speed(input_signal, 0.5)
# Mean squared error between input and output signal
mse = ((expected_output - output_signal)**2).mean(axis=0)
print(mse)
assert mse < 0.00001, "Output does not match test data!"
| en | 0.858531 | Test that exception is raised for unsupported input_signal argument type. Test that exception is raised for negative rate argument Test that exception is raised for rate argument equal to zero Make sure the output matches example data when speed is increased # Mean squared error between input and output signal Make sure the output matches example data when speed is decreased # Mean squared error between input and output signal | 2.908185 | 3 |
vegaapiclient/generated/vega/checkpoint/v1/__init__.py | vegaprotocol/sdk-python | 1 | 6632916 | from . import checkpoint_pb2_grpc as checkpoint_grpc
from . import checkpoint_pb2 as checkpoint
__all__ = [
"checkpoint_grpc",
"checkpoint",
]
| from . import checkpoint_pb2_grpc as checkpoint_grpc
from . import checkpoint_pb2 as checkpoint
__all__ = [
"checkpoint_grpc",
"checkpoint",
]
| none | 1 | 1.040267 | 1 |
|
app/models/modifier.py | marcusosso/uwhvz | 9 | 6632917 | <filename>app/models/modifier.py
import uuid
from datetime import datetime
from django.db import models
from enumfields import Enum, EnumField
from .faction import Faction
class ModifierType(Enum):
ONE_TIME_USE = 'O'
SUPPLY_CODE = 'S'
TAG = 'T'
class Modifier(models.Model):
id: uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
faction: Faction = models.ForeignKey(Faction, on_delete=models.PROTECT)
modifier_type: Enum = EnumField(enum=ModifierType, max_length=1)
modifier_amount: int = models.IntegerField()
created_at: datetime = models.DateTimeField(auto_now_add=True)
modified_at: datetime = models.DateTimeField(auto_now=True)
def __str__(self):
return f"{self.faction}: +{self.modifier_amount}pts for {self.modifier_type}"
| <filename>app/models/modifier.py
import uuid
from datetime import datetime
from django.db import models
from enumfields import Enum, EnumField
from .faction import Faction
class ModifierType(Enum):
ONE_TIME_USE = 'O'
SUPPLY_CODE = 'S'
TAG = 'T'
class Modifier(models.Model):
id: uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
faction: Faction = models.ForeignKey(Faction, on_delete=models.PROTECT)
modifier_type: Enum = EnumField(enum=ModifierType, max_length=1)
modifier_amount: int = models.IntegerField()
created_at: datetime = models.DateTimeField(auto_now_add=True)
modified_at: datetime = models.DateTimeField(auto_now=True)
def __str__(self):
return f"{self.faction}: +{self.modifier_amount}pts for {self.modifier_type}"
| none | 1 | 2.17703 | 2 |
|
themis/__init__.py | celskeggs/themis | 1 | 6632918 | <gh_stars>1-10
import themis.channel
import themis.frc
import themis.joystick
import themis.drive
from themis.channel import *
| import themis.channel
import themis.frc
import themis.joystick
import themis.drive
from themis.channel import * | none | 1 | 1.036824 | 1 |
|
extract/moor/job_lists.py | parkermac/LO_user | 0 | 6632919 | <reponame>parkermac/LO_user
"""
Module to create dicts for multiple (or single) mooring extractions.
"""
def get_sta_dict(job_name):
# specific job definitions
if job_name == 'willapa_bc': # Willapa Bay Center PCSGA Mooring
sta_dict = {
'wbc': (-123.9516, 46.6290)
}
elif job_name == 'mickett_1':
sta_dict = {
'ORCA_Hansville': (-122.6270, 47.9073),
'ORCA_Hoodsport': (-123.1126, 47.4218),
'ORCA_Point_Wells': (-122.3972, 47.7612),
'Central_Main_Stem_Hood_Canal': (-122.989507, 47.574352),
'North_Central_Main_Basin': (-122.440755, 47.825099)
}
elif job_name == 'mickett_2':
sta_dict = {
'Carr_Inlet_ORCA': (-122 - 43.8/60, 47 + 16.8/60),
'East_of_Fox_Island': (-122 - 35.158/60, 47 + 13.185/60)
}
elif job_name == 'stoll_corals':
sta_dict = {
'Carson_D01_Lopez': (-122.8728, 48.36816),
'Carson_D02_Admiralty': (-122.7883, 48.19252),
'Carson_D04_Admiralty': (-122.8166, 48.19764),
'Carson_D05_Keystone': (-122.6576, 48.12828),
'Carson_D07_NorthAdmiralty': (-122.8898, 48.22245),
'Carson_D08_Canada': (-123.149, 48.36136),
'USNM_19270_Canada': (-123.233, 48.35),
'USNM_92626_Admiralty': (-122.80, 48.1917),
'USNM_19228_Dungeness': (-123.189, 48.225),
'USNM_19272_Admiralty': (-122.817, 48.20),
'USNM_92620_Lopez': (-122.85, 48.3667),
}
elif job_name == 'stoll_obs':
sta_dict = {
'DOE_SJF002': (-123.025, 48.25),
'DOE_ADM002': ( -122.8417151, 48.1875056),
'DOE_ADM001': ( -122.616715, 48.0300056),
'WOAC_STN21': (-122.8504, 48.1883),
'WOAC_STN20': (-122.6848, 48.142),
'WOAC_STN19': (-122.6318, 48.0915),
}
elif job_name == 'Kelly':
# note I pushed two of the locations a bit West to get off the landmask
sta_dict = {
'Seal_Rock': (-122.87004, 47.70557),
'Little_Dewatto': (-123.08612-.005, 47.44489),
'Red_Bluff': (-123.10438-.007, 47.41625)
}
elif job_name == 'jazzy':
sta_dict = {
'Middle_Bank': (-123.09651, 48.40935),
'East_Bank': (-122.97376, 48.30042),
'Upright_Channel': (-122.923005, 48.55410),
'Blakely_Orcas': (-122.82880, 48.58790),
'Rosario_Strait': (-122.74001, 48.64631),
'North_Station': (-123.04166, 48.58330),
'South_Station': (-122.94330, 48.42000),
'Hein_Bank': (-123.03940, 48.35825)
}
else:
print('Unsupported job name!')
a = dict()
return a
return sta_dict | """
Module to create dicts for multiple (or single) mooring extractions.
"""
def get_sta_dict(job_name):
# specific job definitions
if job_name == 'willapa_bc': # Willapa Bay Center PCSGA Mooring
sta_dict = {
'wbc': (-123.9516, 46.6290)
}
elif job_name == 'mickett_1':
sta_dict = {
'ORCA_Hansville': (-122.6270, 47.9073),
'ORCA_Hoodsport': (-123.1126, 47.4218),
'ORCA_Point_Wells': (-122.3972, 47.7612),
'Central_Main_Stem_Hood_Canal': (-122.989507, 47.574352),
'North_Central_Main_Basin': (-122.440755, 47.825099)
}
elif job_name == 'mickett_2':
sta_dict = {
'Carr_Inlet_ORCA': (-122 - 43.8/60, 47 + 16.8/60),
'East_of_Fox_Island': (-122 - 35.158/60, 47 + 13.185/60)
}
elif job_name == 'stoll_corals':
sta_dict = {
'Carson_D01_Lopez': (-122.8728, 48.36816),
'Carson_D02_Admiralty': (-122.7883, 48.19252),
'Carson_D04_Admiralty': (-122.8166, 48.19764),
'Carson_D05_Keystone': (-122.6576, 48.12828),
'Carson_D07_NorthAdmiralty': (-122.8898, 48.22245),
'Carson_D08_Canada': (-123.149, 48.36136),
'USNM_19270_Canada': (-123.233, 48.35),
'USNM_92626_Admiralty': (-122.80, 48.1917),
'USNM_19228_Dungeness': (-123.189, 48.225),
'USNM_19272_Admiralty': (-122.817, 48.20),
'USNM_92620_Lopez': (-122.85, 48.3667),
}
elif job_name == 'stoll_obs':
sta_dict = {
'DOE_SJF002': (-123.025, 48.25),
'DOE_ADM002': ( -122.8417151, 48.1875056),
'DOE_ADM001': ( -122.616715, 48.0300056),
'WOAC_STN21': (-122.8504, 48.1883),
'WOAC_STN20': (-122.6848, 48.142),
'WOAC_STN19': (-122.6318, 48.0915),
}
elif job_name == 'Kelly':
# note I pushed two of the locations a bit West to get off the landmask
sta_dict = {
'Seal_Rock': (-122.87004, 47.70557),
'Little_Dewatto': (-123.08612-.005, 47.44489),
'Red_Bluff': (-123.10438-.007, 47.41625)
}
elif job_name == 'jazzy':
sta_dict = {
'Middle_Bank': (-123.09651, 48.40935),
'East_Bank': (-122.97376, 48.30042),
'Upright_Channel': (-122.923005, 48.55410),
'Blakely_Orcas': (-122.82880, 48.58790),
'Rosario_Strait': (-122.74001, 48.64631),
'North_Station': (-123.04166, 48.58330),
'South_Station': (-122.94330, 48.42000),
'Hein_Bank': (-123.03940, 48.35825)
}
else:
print('Unsupported job name!')
a = dict()
return a
return sta_dict | en | 0.833967 | Module to create dicts for multiple (or single) mooring extractions. # specific job definitions # Willapa Bay Center PCSGA Mooring # note I pushed two of the locations a bit West to get off the landmask | 2.772697 | 3 |
pages/themes/PyQT-Lecture1/Tasks_and_HW/Solution_PyQT_HW_1_My_First_GUI/Ui_my_first_GUI.py | WWWCourses/PythonCourseNetIT-Slides | 0 | 6632920 | <reponame>WWWCourses/PythonCourseNetIT-Slides<gh_stars>0
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/nemsys/projects/courses/netIT/PythonCourseNetIT/PythonCourse307-Labs/lab44/HW/my_first_GUI.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(836, 276)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(60, 70, 711, 80))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.lineEdit = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout.addWidget(self.lineEdit)
self.pushButton = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 836, 27))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuPreferences = QtWidgets.QMenu(self.menubar)
self.menuPreferences.setObjectName("menuPreferences")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionOPen = QtWidgets.QAction(MainWindow)
self.actionOPen.setObjectName("actionOPen")
self.actionSave = QtWidgets.QAction(MainWindow)
self.actionSave.setObjectName("actionSave")
self.actionSave_as = QtWidgets.QAction(MainWindow)
self.actionSave_as.setObjectName("actionSave_as")
self.actionSettings = QtWidgets.QAction(MainWindow)
self.actionSettings.setObjectName("actionSettings")
self.menuFile.addAction(self.actionOPen)
self.menuFile.addAction(self.actionSave)
self.menuFile.addAction(self.actionSave_as)
self.menuPreferences.addAction(self.actionSettings)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuPreferences.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "User Name:"))
self.pushButton.setText(_translate("MainWindow", "OK"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuPreferences.setTitle(_translate("MainWindow", "Preferences"))
self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.actionOPen.setText(_translate("MainWindow", "Open"))
self.actionSave.setText(_translate("MainWindow", "Save"))
self.actionSave_as.setText(_translate("MainWindow", "Save as ..."))
self.actionSettings.setText(_translate("MainWindow", "Settings"))
| # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/nemsys/projects/courses/netIT/PythonCourseNetIT/PythonCourse307-Labs/lab44/HW/my_first_GUI.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(836, 276)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(60, 70, 711, 80))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.lineEdit = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout.addWidget(self.lineEdit)
self.pushButton = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 836, 27))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuPreferences = QtWidgets.QMenu(self.menubar)
self.menuPreferences.setObjectName("menuPreferences")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionOPen = QtWidgets.QAction(MainWindow)
self.actionOPen.setObjectName("actionOPen")
self.actionSave = QtWidgets.QAction(MainWindow)
self.actionSave.setObjectName("actionSave")
self.actionSave_as = QtWidgets.QAction(MainWindow)
self.actionSave_as.setObjectName("actionSave_as")
self.actionSettings = QtWidgets.QAction(MainWindow)
self.actionSettings.setObjectName("actionSettings")
self.menuFile.addAction(self.actionOPen)
self.menuFile.addAction(self.actionSave)
self.menuFile.addAction(self.actionSave_as)
self.menuPreferences.addAction(self.actionSettings)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuPreferences.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "User Name:"))
self.pushButton.setText(_translate("MainWindow", "OK"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuPreferences.setTitle(_translate("MainWindow", "Preferences"))
self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.actionOPen.setText(_translate("MainWindow", "Open"))
self.actionSave.setText(_translate("MainWindow", "Save"))
self.actionSave_as.setText(_translate("MainWindow", "Save as ..."))
self.actionSettings.setText(_translate("MainWindow", "Settings")) | en | 0.772911 | # -*- coding: utf-8 -*- # Form implementation generated from reading ui file '/home/nemsys/projects/courses/netIT/PythonCourseNetIT/PythonCourse307-Labs/lab44/HW/my_first_GUI.ui' # # Created by: PyQt5 UI code generator 5.15.4 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. | 2.478045 | 2 |
collectfast/storage_extensions/s3boto.py | jpnauta/collectfast | 0 | 6632921 | from storages.utils import safe_join
from collectfast.storage_extensions.base import BaseStorageExtensions, check_preload_metadata
class S3BotoStorageExtensions(BaseStorageExtensions):
"""
Storage extensions for django-storage's `S3BotoStorage`
"""
def __init__(self, storage):
super(S3BotoStorageExtensions, self).__init__(storage)
check_preload_metadata(storage)
def get_etag(self, path):
normalized_path = safe_join(self.storage.location, path).replace('\\', '/')
try:
return self.storage.bucket.get_key(normalized_path).etag
except AttributeError:
return None
| from storages.utils import safe_join
from collectfast.storage_extensions.base import BaseStorageExtensions, check_preload_metadata
class S3BotoStorageExtensions(BaseStorageExtensions):
"""
Storage extensions for django-storage's `S3BotoStorage`
"""
def __init__(self, storage):
super(S3BotoStorageExtensions, self).__init__(storage)
check_preload_metadata(storage)
def get_etag(self, path):
normalized_path = safe_join(self.storage.location, path).replace('\\', '/')
try:
return self.storage.bucket.get_key(normalized_path).etag
except AttributeError:
return None
| en | 0.537756 | Storage extensions for django-storage's `S3BotoStorage` | 1.89642 | 2 |
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py | wbear2/ambari | 5 | 6632922 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management import *
import status_params
# server configurations
config = Script.get_config()
conf_dir = "/etc/hadoop/conf"
mapred_user = status_params.mapred_user
pid_dir_prefix = status_params.pid_dir_prefix
mapred_pid_dir = status_params.mapred_pid_dir
historyserver_pid_file = status_params.historyserver_pid_file
jobtracker_pid_file = status_params.jobtracker_pid_file
tasktracker_pid_file = status_params.tasktracker_pid_file
hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
hadoop_bin = "/usr/lib/hadoop/bin"
user_group = config['configurations']['global']['user_group']
hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
mapred_log_dir_prefix = default("mapred_log_dir_prefix",hdfs_log_dir_prefix)
mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
update_exclude_file_only = config['commandParams']['update_exclude_file_only']
hadoop_jar_location = "/usr/lib/hadoop/"
smokeuser = config['configurations']['global']['smokeuser']
_authentication = config['configurations']['core-site']['hadoop.security.authentication']
security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
#exclude file
mr_exclude_hosts = default("/clusterHostInfo/decom_tt_hosts", [])
exclude_file_path = config['configurations']['mapred-site']['mapred.hosts.exclude']
mapred_hosts_file_path = config['configurations']['mapred-site']['mapred.hosts']
#hdfs directories
mapreduce_jobhistory_intermediate_done_dir = default('/configurations/mapred-site/mapreduce.jobhistory.intermediate-done-dir', '/mr-history/tmp')
mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapred.job.tracker.history.completed.location']
#for create_hdfs_directory
hostname = config["hostname"]
hadoop_conf_dir = "/etc/hadoop/conf"
hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
hdfs_user = config['configurations']['global']['hdfs_user']
import functools
#create partial functions with common arguments for every HdfsDirectory call
#to create hdfs directory we need to call params.HdfsDirectory in code
HdfsDirectory = functools.partial(
HdfsDirectory,
conf_dir=hadoop_conf_dir,
hdfs_user=hdfs_user,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local
)
mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
slave_hosts = default("/clusterHostInfo/slave_hosts", [])
| """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management import *
import status_params
# server configurations
config = Script.get_config()
conf_dir = "/etc/hadoop/conf"
mapred_user = status_params.mapred_user
pid_dir_prefix = status_params.pid_dir_prefix
mapred_pid_dir = status_params.mapred_pid_dir
historyserver_pid_file = status_params.historyserver_pid_file
jobtracker_pid_file = status_params.jobtracker_pid_file
tasktracker_pid_file = status_params.tasktracker_pid_file
hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
hadoop_bin = "/usr/lib/hadoop/bin"
user_group = config['configurations']['global']['user_group']
hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
mapred_log_dir_prefix = default("mapred_log_dir_prefix",hdfs_log_dir_prefix)
mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
update_exclude_file_only = config['commandParams']['update_exclude_file_only']
hadoop_jar_location = "/usr/lib/hadoop/"
smokeuser = config['configurations']['global']['smokeuser']
_authentication = config['configurations']['core-site']['hadoop.security.authentication']
security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
#exclude file
mr_exclude_hosts = default("/clusterHostInfo/decom_tt_hosts", [])
exclude_file_path = config['configurations']['mapred-site']['mapred.hosts.exclude']
mapred_hosts_file_path = config['configurations']['mapred-site']['mapred.hosts']
#hdfs directories
mapreduce_jobhistory_intermediate_done_dir = default('/configurations/mapred-site/mapreduce.jobhistory.intermediate-done-dir', '/mr-history/tmp')
mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapred.job.tracker.history.completed.location']
#for create_hdfs_directory
hostname = config["hostname"]
hadoop_conf_dir = "/etc/hadoop/conf"
hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
hdfs_user = config['configurations']['global']['hdfs_user']
import functools
#create partial functions with common arguments for every HdfsDirectory call
#to create hdfs directory we need to call params.HdfsDirectory in code
HdfsDirectory = functools.partial(
HdfsDirectory,
conf_dir=hadoop_conf_dir,
hdfs_user=hdfs_user,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local
)
mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
slave_hosts = default("/clusterHostInfo/slave_hosts", [])
| en | 0.834741 | Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ambari Agent # server configurations #exclude file #hdfs directories #for create_hdfs_directory #create partial functions with common arguments for every HdfsDirectory call #to create hdfs directory we need to call params.HdfsDirectory in code | 1.149215 | 1 |
setup.py | youngsm/adsgrb | 0 | 6632923 | #!/usr/bin/env python
"""The setup script."""
import os
import re
import sys
from functools import reduce
from setuptools import find_packages
from setuptools import setup
with open("README.rst") as readme_file:
readme = readme_file.read()
# with open("./requirements.txt") as requirements_file:
# requirements = requirements_file.read().splitlines()
requirements = ["requests", "matplotlib", "numpy", "astroquery",
"scipy", "pandas", "glob2", "astropy", "dustmaps",
"plotly", "ipython", "PyPDF2", "numdifftools", "lmfit",
"ads", "emcee", "corner"]
test_requirements = []
# Thank you <NAME> for this nice versioning method
major, minor1, minor2, release, serial = sys.version_info
readfile_kwargs = {"encoding": "utf-8"} if major >= 3 else {}
version_regex = re.compile('__version__ = "(.*?)"')
with open(
reduce(os.path.join, [os.path.dirname(os.path.abspath(__file__)), "grblc", "__init__.py"]),
**readfile_kwargs
) as fp:
contents = fp.read()
version = version_regex.findall(contents)[0]
setup(
author="<NAME>",
author_email="<EMAIL>",
python_requires=">=3.6",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
description="A Python package for GRB optical light curve studies.",
install_requires=requirements,
license="MIT license",
long_description=readme,
include_package_data=True,
keywords="grblc",
name="grblc",
packages=find_packages(include=["grblc", "grblc.*"]),
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/youngsm/grblc",
version=version,
zip_safe=False,
)
| #!/usr/bin/env python
"""The setup script."""
import os
import re
import sys
from functools import reduce
from setuptools import find_packages
from setuptools import setup
with open("README.rst") as readme_file:
readme = readme_file.read()
# with open("./requirements.txt") as requirements_file:
# requirements = requirements_file.read().splitlines()
requirements = ["requests", "matplotlib", "numpy", "astroquery",
"scipy", "pandas", "glob2", "astropy", "dustmaps",
"plotly", "ipython", "PyPDF2", "numdifftools", "lmfit",
"ads", "emcee", "corner"]
test_requirements = []
# Thank you <NAME> for this nice versioning method
major, minor1, minor2, release, serial = sys.version_info
readfile_kwargs = {"encoding": "utf-8"} if major >= 3 else {}
version_regex = re.compile('__version__ = "(.*?)"')
with open(
reduce(os.path.join, [os.path.dirname(os.path.abspath(__file__)), "grblc", "__init__.py"]),
**readfile_kwargs
) as fp:
contents = fp.read()
version = version_regex.findall(contents)[0]
setup(
author="<NAME>",
author_email="<EMAIL>",
python_requires=">=3.6",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
description="A Python package for GRB optical light curve studies.",
install_requires=requirements,
license="MIT license",
long_description=readme,
include_package_data=True,
keywords="grblc",
name="grblc",
packages=find_packages(include=["grblc", "grblc.*"]),
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/youngsm/grblc",
version=version,
zip_safe=False,
)
| en | 0.726226 | #!/usr/bin/env python The setup script. # with open("./requirements.txt") as requirements_file: # requirements = requirements_file.read().splitlines() # Thank you <NAME> for this nice versioning method | 1.86507 | 2 |
src/syft/ast/callable.py | dnabanita7/PySyft | 1 | 6632924 | <gh_stars>1-10
# stdlib
from types import ModuleType
from typing import Any
from typing import Callable as CallableT
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
# syft relative
from .. import ast
from .. import lib
from ..core.node.abstract.node import AbstractNodeClient
from ..core.node.common.action.function_or_constructor_action import (
RunFunctionOrConstructorAction,
)
from ..logger import traceback_and_raise
from ..util import inherit_tags
from .util import module_type
class Callable(ast.attribute.Attribute):
"""
Represent a method (can be static), global function, or constructor which can be directly executed.
"""
def __init__(
self,
path_and_name: str,
parent: ast.attribute.Attribute,
object_ref: Optional[Any] = None,
return_type_name: Optional[str] = None,
client: Optional[AbstractNodeClient] = None,
is_static: Optional[bool] = False,
):
"""
Args:
path_and_name: The path for the current node, e.g. `syft.lib.python.List`.
object_ref: The actual python object for which the computation is being made.
return_type_name: The return type name of the given action as a string with its full path.
client: The client for which all computation is being executed.
is_static: If True, the object has to be resolved on the AST, not on an existing pointer.
"""
super().__init__(
path_and_name=path_and_name,
object_ref=object_ref,
return_type_name=return_type_name,
client=client,
parent=parent,
)
self.is_static = is_static
def __call__(
self,
*args: Tuple[Any, ...],
**kwargs: Any,
) -> Optional[Union["Callable", CallableT]]:
"""
The `__call__` method on a `Callable` has two possible roles:
1. If the client is set, execute the function for the client and return the appropriate pointer
given the `return_type_name`.
2. If the client is not set, then the `__call__` is used as a query on the ast.
"""
self.apply_node_changes()
if self.client is not None:
return_tensor_type_pointer_type = self.client.lib_ast.query(
path=self.return_type_name
).pointer_type
ptr = return_tensor_type_pointer_type(client=self.client)
# first downcast anything primitive which is not already PyPrimitive
(
downcast_args,
downcast_kwargs,
) = lib.python.util.downcast_args_and_kwargs(args=args, kwargs=kwargs)
# then we convert anything which isn't a pointer into a pointer
pointer_args, pointer_kwargs = ast.klass.pointerize_args_and_kwargs(
args=downcast_args, kwargs=downcast_kwargs, client=self.client
)
if self.path_and_name is not None:
msg = RunFunctionOrConstructorAction(
path=self.path_and_name,
args=pointer_args,
kwargs=pointer_kwargs,
id_at_location=ptr.id_at_location,
address=self.client.address,
is_static=self.is_static,
)
self.client.send_immediate_msg_without_reply(msg=msg)
inherit_tags(
attr_path_and_name=self.path_and_name,
result=ptr,
self_obj=None,
args=args,
kwargs=kwargs,
)
return ptr
if "path" not in kwargs or "index" not in kwargs:
traceback_and_raise(
ValueError(
"AST with no client attached tries to execute remote function."
)
)
path = kwargs["path"]
index = kwargs["index"]
if len(path) == index:
return self.object_ref
else:
return self.attrs[path[index]](path=path, index=index + 1)
def add_path(
self,
path: Union[str, List[str]],
index: int,
return_type_name: Optional[str] = None,
framework_reference: Optional[ModuleType] = None,
is_static: bool = False,
) -> None:
"""
The add_path method adds new nodes in the AST based on the type of the current node and
the type of the object to be added.
Args:
path: The path for the node in the AST to be added,
e.g. `syft.lib.python.List` or ["syft", "lib", "python", "List]
index: The associated position in the path for the current node.
return_type_name: The return type name of the given action as a
string with its full path.
framework_reference: The Python framework in which we can solve
the same path to obtain the Python object.
is_static: If the node represents a static method.
"""
if index >= len(path) or path[index] in self.attrs:
return
attr_ref = getattr(self.object_ref, path[index])
if isinstance(attr_ref, module_type):
traceback_and_raise(
ValueError("Module cannot be an attribute of Callable.")
)
self.attrs[path[index]] = ast.callable.Callable(
path_and_name=".".join(path[: index + 1]),
object_ref=attr_ref,
return_type_name=return_type_name,
client=self.client,
parent=self,
)
| # stdlib
from types import ModuleType
from typing import Any
from typing import Callable as CallableT
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
# syft relative
from .. import ast
from .. import lib
from ..core.node.abstract.node import AbstractNodeClient
from ..core.node.common.action.function_or_constructor_action import (
RunFunctionOrConstructorAction,
)
from ..logger import traceback_and_raise
from ..util import inherit_tags
from .util import module_type
class Callable(ast.attribute.Attribute):
"""
Represent a method (can be static), global function, or constructor which can be directly executed.
"""
def __init__(
self,
path_and_name: str,
parent: ast.attribute.Attribute,
object_ref: Optional[Any] = None,
return_type_name: Optional[str] = None,
client: Optional[AbstractNodeClient] = None,
is_static: Optional[bool] = False,
):
"""
Args:
path_and_name: The path for the current node, e.g. `syft.lib.python.List`.
object_ref: The actual python object for which the computation is being made.
return_type_name: The return type name of the given action as a string with its full path.
client: The client for which all computation is being executed.
is_static: If True, the object has to be resolved on the AST, not on an existing pointer.
"""
super().__init__(
path_and_name=path_and_name,
object_ref=object_ref,
return_type_name=return_type_name,
client=client,
parent=parent,
)
self.is_static = is_static
def __call__(
self,
*args: Tuple[Any, ...],
**kwargs: Any,
) -> Optional[Union["Callable", CallableT]]:
"""
The `__call__` method on a `Callable` has two possible roles:
1. If the client is set, execute the function for the client and return the appropriate pointer
given the `return_type_name`.
2. If the client is not set, then the `__call__` is used as a query on the ast.
"""
self.apply_node_changes()
if self.client is not None:
return_tensor_type_pointer_type = self.client.lib_ast.query(
path=self.return_type_name
).pointer_type
ptr = return_tensor_type_pointer_type(client=self.client)
# first downcast anything primitive which is not already PyPrimitive
(
downcast_args,
downcast_kwargs,
) = lib.python.util.downcast_args_and_kwargs(args=args, kwargs=kwargs)
# then we convert anything which isn't a pointer into a pointer
pointer_args, pointer_kwargs = ast.klass.pointerize_args_and_kwargs(
args=downcast_args, kwargs=downcast_kwargs, client=self.client
)
if self.path_and_name is not None:
msg = RunFunctionOrConstructorAction(
path=self.path_and_name,
args=pointer_args,
kwargs=pointer_kwargs,
id_at_location=ptr.id_at_location,
address=self.client.address,
is_static=self.is_static,
)
self.client.send_immediate_msg_without_reply(msg=msg)
inherit_tags(
attr_path_and_name=self.path_and_name,
result=ptr,
self_obj=None,
args=args,
kwargs=kwargs,
)
return ptr
if "path" not in kwargs or "index" not in kwargs:
traceback_and_raise(
ValueError(
"AST with no client attached tries to execute remote function."
)
)
path = kwargs["path"]
index = kwargs["index"]
if len(path) == index:
return self.object_ref
else:
return self.attrs[path[index]](path=path, index=index + 1)
def add_path(
self,
path: Union[str, List[str]],
index: int,
return_type_name: Optional[str] = None,
framework_reference: Optional[ModuleType] = None,
is_static: bool = False,
) -> None:
"""
The add_path method adds new nodes in the AST based on the type of the current node and
the type of the object to be added.
Args:
path: The path for the node in the AST to be added,
e.g. `syft.lib.python.List` or ["syft", "lib", "python", "List]
index: The associated position in the path for the current node.
return_type_name: The return type name of the given action as a
string with its full path.
framework_reference: The Python framework in which we can solve
the same path to obtain the Python object.
is_static: If the node represents a static method.
"""
if index >= len(path) or path[index] in self.attrs:
return
attr_ref = getattr(self.object_ref, path[index])
if isinstance(attr_ref, module_type):
traceback_and_raise(
ValueError("Module cannot be an attribute of Callable.")
)
self.attrs[path[index]] = ast.callable.Callable(
path_and_name=".".join(path[: index + 1]),
object_ref=attr_ref,
return_type_name=return_type_name,
client=self.client,
parent=self,
) | en | 0.843668 | # stdlib # syft relative Represent a method (can be static), global function, or constructor which can be directly executed. Args: path_and_name: The path for the current node, e.g. `syft.lib.python.List`. object_ref: The actual python object for which the computation is being made. return_type_name: The return type name of the given action as a string with its full path. client: The client for which all computation is being executed. is_static: If True, the object has to be resolved on the AST, not on an existing pointer. The `__call__` method on a `Callable` has two possible roles: 1. If the client is set, execute the function for the client and return the appropriate pointer given the `return_type_name`. 2. If the client is not set, then the `__call__` is used as a query on the ast. # first downcast anything primitive which is not already PyPrimitive # then we convert anything which isn't a pointer into a pointer The add_path method adds new nodes in the AST based on the type of the current node and the type of the object to be added. Args: path: The path for the node in the AST to be added, e.g. `syft.lib.python.List` or ["syft", "lib", "python", "List] index: The associated position in the path for the current node. return_type_name: The return type name of the given action as a string with its full path. framework_reference: The Python framework in which we can solve the same path to obtain the Python object. is_static: If the node represents a static method. | 2.249453 | 2 |
yourcar/car/migrations/0001_initial.py | italopaiva/your.car | 0 | 6632925 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-06 02:47
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('car_model', models.CharField(max_length=20)),
('color', models.CharField(max_length=20)),
('year', models.SmallIntegerField(help_text='Use year as YYYY.', validators=[django.core.validators.RegexValidator('^[0-9]{4}$', 'Year in invalid format!', 'invalid')])),
('mileage', models.IntegerField(default=0, help_text='Or your car is brand new or it have some mileage traveled', validators=[django.core.validators.MinValueValidator(0)])),
],
),
migrations.CreateModel(
name='OilChange',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(verbose_name='date changed')),
('mileage', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='car.Car')),
],
),
migrations.CreateModel(
name='Refuel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(verbose_name='date refueled')),
('liters', models.DecimalField(decimal_places=3, max_digits=7)),
('fuel_price', models.DecimalField(decimal_places=2, max_digits=4)),
('mileage', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('fuel_type', models.CharField(choices=[('Regular gas', 'Regular gas'), ('Premium gas', 'Premium gas'), ('Alcohol', 'Alcohol'), ('Diesel', 'Diesel')], default='Regular gas', max_length=20)),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='car.Car')),
],
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-06 02:47
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('car_model', models.CharField(max_length=20)),
('color', models.CharField(max_length=20)),
('year', models.SmallIntegerField(help_text='Use year as YYYY.', validators=[django.core.validators.RegexValidator('^[0-9]{4}$', 'Year in invalid format!', 'invalid')])),
('mileage', models.IntegerField(default=0, help_text='Or your car is brand new or it have some mileage traveled', validators=[django.core.validators.MinValueValidator(0)])),
],
),
migrations.CreateModel(
name='OilChange',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(verbose_name='date changed')),
('mileage', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='car.Car')),
],
),
migrations.CreateModel(
name='Refuel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(verbose_name='date refueled')),
('liters', models.DecimalField(decimal_places=3, max_digits=7)),
('fuel_price', models.DecimalField(decimal_places=2, max_digits=4)),
('mileage', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])),
('fuel_type', models.CharField(choices=[('Regular gas', 'Regular gas'), ('Premium gas', 'Premium gas'), ('Alcohol', 'Alcohol'), ('Diesel', 'Diesel')], default='Regular gas', max_length=20)),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='car.Car')),
],
),
]
| en | 0.833257 | # -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-06-06 02:47 | 1.972026 | 2 |
sources/gamestage03.py | kantel/turtlepy | 0 | 6632926 | import turtle as t
import random as r
import math
import os
WIDTH = 600
HEIGHT = 600
NUMGOALS = 6
NUMALIENS = 4
class Sprite(t.Turtle):
def __init__(self, tshape, tcolor):
t.Turtle.__init__(self)
self.penup()
self.speed(0)
self.shape(tshape)
self.color(tcolor)
self.speed = 1
self.max_speed = 15
def move(self):
self.forward(self.speed)
# Ränder checken und ausweichen
if self.xcor() >= WIDTH/2 - 50 or self.xcor() <= -WIDTH/2 + 50:
self.forward(-self.speed)
self.left(75)
if self.ycor() >= HEIGHT/2 - 50 or self.ycor() <= -HEIGHT/2 + 50:
self.forward(-self.speed)
self.left(75)
class GameWorld(t.Turtle):
def __init__(self):
t.Turtle.__init__(self)
self.penup()
self.hideturtle()
self.speed(0)
self.color("white")
self.pensize(2)
def draw_border(self):
self.penup()
self.goto(-WIDTH/2 + 40, -HEIGHT/2 + 40)
self.pendown()
self.goto(-WIDTH/2 + 40, HEIGHT/2 - 40)
self.goto(WIDTH/2 - 40, HEIGHT/2 - 40)
self.goto(WIDTH/2 - 40, -HEIGHT/2 + 40)
self.goto(-WIDTH/2 + 40, -HEIGHT/2 + 40)
class HeadUpDisplay(t.Turtle):
def __init__(self):
t.Turtle.__init__(self)
self.penup()
self.hideturtle()
self.speed(0)
self.color("white")
self.goto(-WIDTH/2 + 40, HEIGHT/2 - 30)
self.score = 0
def update_score(self):
self.clear()
self.write("Punkte: {}".format(self.score), False, align = "left",
font = ("Arial", 14, "normal"))
def change_score(self, points):
self.score += points
self.update_score()
class Actor(Sprite):
def __init__(self, tshape, tcolor):
Sprite.__init__(self, tshape, tcolor)
self.shapesize(stretch_wid = 0.6, stretch_len = 1.1, outline = None)
self.speed = 5
def turnleft(self):
self.left(30)
def turnright(self):
self.right(30)
def move_faster(self):
self.speed += 1
# Geschwindigkeitsbegrenzug
if abs(self.speed) > self.max_speed:
self.speed = self.max_speed
def move_slower(self):
# Geschwindigkeitsbegrenzung
self.speed -= 1
if abs(self.speed) > self.max_speed:
self.speed = - self.max_speed
def collides(self, obj):
a = self.xcor() - obj.xcor()
b = self.ycor() - obj.ycor()
distance = math.sqrt((a**2) + (b**2))
if distance < 20:
return True
else:
return False
class Goal(Sprite):
def __init__(self, tshape, tcolor):
Sprite.__init__(self, tshape, tcolor)
self.speed = 3 # Default-Vorbelegung, wird bei der Initialisierung überschrieben
self.goto(r.randint(-WIDTH/2 + 60, WIDTH/2 - 60),
r.randint(-HEIGHT/2 + 60, HEIGHT/2 - 60))
self.setheading(r.randint(0, 360))
def jump(self):
self.goto(r.randint(-WIDTH/2 + 60, WIDTH/2 - 60),
r.randint(-HEIGHT/2 + 60, HEIGHT/2 - 60))
self.setheading(r.randint(0, 360))
class Alien(Sprite):
def __init__(self, tshape, tcolor):
Sprite.__init__(self, tshape, tcolor)
self.speed = 3 # Default-Vorbelegung, wird bei der Initialisierung überschrieben
self.goto(r.randint(-WIDTH/2 + 60, WIDTH/2 - 60),
r.randint(-HEIGHT/2 + 60, HEIGHT/2 - 60))
self.setheading(r.randint(0, 360))
def jump(self):
self.goto(r.randint(-WIDTH/2 + 60, WIDTH/2 - 60),
r.randint(-HEIGHT/2 + 60, HEIGHT/2 - 60))
self.setheading(r.randint(0, 360))
wn = t.Screen()
wn.bgcolor("#2b3e50")
path_to_bg = os.path.join(os.getcwd(), "sources/images/space.gif")
wn.bgpic(path_to_bg)
pumpkin = os.path.join(os.getcwd(), "sources/images/pumpkin.gif")
alien = os.path.join(os.getcwd(), "sources/images/alien.gif")
wn.register_shape(pumpkin)
wn.register_shape(alien)
wn.setup(width = WIDTH, height = HEIGHT)
wn.title("Turtle Graphics Game Tutorial – Stage 3")
player = Actor("triangle", "red")
world = GameWorld()
hud = HeadUpDisplay()
# Die Grenzen des Spielfeldes zeichnen
world.draw_border()
# Die Zielobjekte erzeugen
goals = []
for i in range(NUMGOALS):
goals.append(Goal(pumpkin, "gold"))
goals[i].speed = r.randint(2, 7)
# Die Aliens erzeugen
aliens = []
for i in range(NUMALIENS):
aliens.append(Alien(alien, "blue"))
aliens[i].speed = r.randint(2, 7)
def exitGame():
global keepGoing
keepGoing = False
# Auf Tastaturereignisse lauschen
t.listen()
t.onkey(player.turnleft, "Left")
t.onkey(player.turnright, "Right")
t.onkey(player.move_faster, "Up")
t.onkey(player.move_slower, "Down")
t.onkey(exitGame, "Escape") # Escape beendet das Spiel
# Bildschirm-Refresh ausschalten
wn.tracer(0)
# Spiel-Schleife
keepGoing = True
while keepGoing:
wn.update() # Den gesamten Bildschirm neuzeichnen
player.move()
hud.change_score(0)
for goal in goals:
goal.move()
if player.collides(goal):
goal.jump()
hud.change_score(10)
for alien in aliens:
alien.move()
if player.collides(alien):
alien.jump()
hud.change_score(-20)
if hud.score < 0:
print("You lost the game!")
keepGoing = False
| import turtle as t
import random as r
import math
import os
WIDTH = 600
HEIGHT = 600
NUMGOALS = 6
NUMALIENS = 4
class Sprite(t.Turtle):
def __init__(self, tshape, tcolor):
t.Turtle.__init__(self)
self.penup()
self.speed(0)
self.shape(tshape)
self.color(tcolor)
self.speed = 1
self.max_speed = 15
def move(self):
self.forward(self.speed)
# Ränder checken und ausweichen
if self.xcor() >= WIDTH/2 - 50 or self.xcor() <= -WIDTH/2 + 50:
self.forward(-self.speed)
self.left(75)
if self.ycor() >= HEIGHT/2 - 50 or self.ycor() <= -HEIGHT/2 + 50:
self.forward(-self.speed)
self.left(75)
class GameWorld(t.Turtle):
def __init__(self):
t.Turtle.__init__(self)
self.penup()
self.hideturtle()
self.speed(0)
self.color("white")
self.pensize(2)
def draw_border(self):
self.penup()
self.goto(-WIDTH/2 + 40, -HEIGHT/2 + 40)
self.pendown()
self.goto(-WIDTH/2 + 40, HEIGHT/2 - 40)
self.goto(WIDTH/2 - 40, HEIGHT/2 - 40)
self.goto(WIDTH/2 - 40, -HEIGHT/2 + 40)
self.goto(-WIDTH/2 + 40, -HEIGHT/2 + 40)
class HeadUpDisplay(t.Turtle):
def __init__(self):
t.Turtle.__init__(self)
self.penup()
self.hideturtle()
self.speed(0)
self.color("white")
self.goto(-WIDTH/2 + 40, HEIGHT/2 - 30)
self.score = 0
def update_score(self):
self.clear()
self.write("Punkte: {}".format(self.score), False, align = "left",
font = ("Arial", 14, "normal"))
def change_score(self, points):
self.score += points
self.update_score()
class Actor(Sprite):
def __init__(self, tshape, tcolor):
Sprite.__init__(self, tshape, tcolor)
self.shapesize(stretch_wid = 0.6, stretch_len = 1.1, outline = None)
self.speed = 5
def turnleft(self):
self.left(30)
def turnright(self):
self.right(30)
def move_faster(self):
self.speed += 1
# Geschwindigkeitsbegrenzug
if abs(self.speed) > self.max_speed:
self.speed = self.max_speed
def move_slower(self):
# Geschwindigkeitsbegrenzung
self.speed -= 1
if abs(self.speed) > self.max_speed:
self.speed = - self.max_speed
def collides(self, obj):
a = self.xcor() - obj.xcor()
b = self.ycor() - obj.ycor()
distance = math.sqrt((a**2) + (b**2))
if distance < 20:
return True
else:
return False
class Goal(Sprite):
def __init__(self, tshape, tcolor):
Sprite.__init__(self, tshape, tcolor)
self.speed = 3 # Default-Vorbelegung, wird bei der Initialisierung überschrieben
self.goto(r.randint(-WIDTH/2 + 60, WIDTH/2 - 60),
r.randint(-HEIGHT/2 + 60, HEIGHT/2 - 60))
self.setheading(r.randint(0, 360))
def jump(self):
self.goto(r.randint(-WIDTH/2 + 60, WIDTH/2 - 60),
r.randint(-HEIGHT/2 + 60, HEIGHT/2 - 60))
self.setheading(r.randint(0, 360))
class Alien(Sprite):
def __init__(self, tshape, tcolor):
Sprite.__init__(self, tshape, tcolor)
self.speed = 3 # Default-Vorbelegung, wird bei der Initialisierung überschrieben
self.goto(r.randint(-WIDTH/2 + 60, WIDTH/2 - 60),
r.randint(-HEIGHT/2 + 60, HEIGHT/2 - 60))
self.setheading(r.randint(0, 360))
def jump(self):
self.goto(r.randint(-WIDTH/2 + 60, WIDTH/2 - 60),
r.randint(-HEIGHT/2 + 60, HEIGHT/2 - 60))
self.setheading(r.randint(0, 360))
wn = t.Screen()
wn.bgcolor("#2b3e50")
path_to_bg = os.path.join(os.getcwd(), "sources/images/space.gif")
wn.bgpic(path_to_bg)
pumpkin = os.path.join(os.getcwd(), "sources/images/pumpkin.gif")
alien = os.path.join(os.getcwd(), "sources/images/alien.gif")
wn.register_shape(pumpkin)
wn.register_shape(alien)
wn.setup(width = WIDTH, height = HEIGHT)
wn.title("Turtle Graphics Game Tutorial – Stage 3")
player = Actor("triangle", "red")
world = GameWorld()
hud = HeadUpDisplay()
# Die Grenzen des Spielfeldes zeichnen
world.draw_border()
# Die Zielobjekte erzeugen
goals = []
for i in range(NUMGOALS):
goals.append(Goal(pumpkin, "gold"))
goals[i].speed = r.randint(2, 7)
# Die Aliens erzeugen
aliens = []
for i in range(NUMALIENS):
aliens.append(Alien(alien, "blue"))
aliens[i].speed = r.randint(2, 7)
def exitGame():
global keepGoing
keepGoing = False
# Auf Tastaturereignisse lauschen
t.listen()
t.onkey(player.turnleft, "Left")
t.onkey(player.turnright, "Right")
t.onkey(player.move_faster, "Up")
t.onkey(player.move_slower, "Down")
t.onkey(exitGame, "Escape") # Escape beendet das Spiel
# Bildschirm-Refresh ausschalten
wn.tracer(0)
# Spiel-Schleife
keepGoing = True
while keepGoing:
wn.update() # Den gesamten Bildschirm neuzeichnen
player.move()
hud.change_score(0)
for goal in goals:
goal.move()
if player.collides(goal):
goal.jump()
hud.change_score(10)
for alien in aliens:
alien.move()
if player.collides(alien):
alien.jump()
hud.change_score(-20)
if hud.score < 0:
print("You lost the game!")
keepGoing = False
| de | 0.987292 | # Ränder checken und ausweichen # Geschwindigkeitsbegrenzug # Geschwindigkeitsbegrenzung # Default-Vorbelegung, wird bei der Initialisierung überschrieben # Default-Vorbelegung, wird bei der Initialisierung überschrieben # Die Grenzen des Spielfeldes zeichnen # Die Zielobjekte erzeugen # Die Aliens erzeugen # Auf Tastaturereignisse lauschen # Escape beendet das Spiel # Bildschirm-Refresh ausschalten # Spiel-Schleife # Den gesamten Bildschirm neuzeichnen | 3.689547 | 4 |
HotBlog/HotBlog.py | gavinfish/awesome-python | 7 | 6632927 | <filename>HotBlog/HotBlog.py
import urllib.request as ul
import re
from bs4 import BeautifulSoup
import time
import random
class HotBlog(object):
url_pattern = "http://www.baidu.com/s?ie=utf-8&pn={pn}&wd={wd}"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
def get_page(self, url):
contents = b""
try:
page = ul.urlopen(url, timeout=5)
contents = page.read()
except Exception:
print("Connection timeout!")
return contents.decode("utf-8")
def scan_page(self, n, wd, username):
html = self.get_page(self.url_pattern.format(pn=10 * n, wd=wd))
soup = BeautifulSoup(html, "lxml")
blog_title_pattern = re.compile(".*- {username} - 博客频道 - CSDN.NET$".format(username=username))
print("----- 开始访问第" + str(n + 1) + "页搜索结果-----")
for target in soup.find_all(id=re.compile("tools_[0-9]*_[1-9]")):
data_tools = target.attrs["data-tools"]
parts = data_tools.split('","url":"')
if len(parts) != 2:
continue
title = parts[0][10:]
url = parts[1][:-2]
if re.match(blog_title_pattern, title):
random.seed(time.time())
time.sleep(random.uniform(random.random() * 2, random.random() * 50))
request = ul.Request(url, headers=self.headers)
ul.urlopen(request)
print("visit:" + title)
print("----- 结束访问第" + str(n + 1) + "页搜索结果-----")
def scan_n_pages(self, n, wd, username):
for i in range(n):
self.scan_page(i, wd, username)
def interpret(self):
print("这个脚本可以通过百度搜索引擎来访问CSDN博客,提高博客检索排名。")
username = input("请输入你的CSDN用户名:\n")
key = input("请输入搜索关键词:\n")
page_count = int(input("请输入你想要搜索的页面数:\n"))
self.scan_n_pages(page_count, key, username)
if __name__ == "__main__":
HotBlog().interpret()
| <filename>HotBlog/HotBlog.py
import urllib.request as ul
import re
from bs4 import BeautifulSoup
import time
import random
class HotBlog(object):
url_pattern = "http://www.baidu.com/s?ie=utf-8&pn={pn}&wd={wd}"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
def get_page(self, url):
contents = b""
try:
page = ul.urlopen(url, timeout=5)
contents = page.read()
except Exception:
print("Connection timeout!")
return contents.decode("utf-8")
def scan_page(self, n, wd, username):
html = self.get_page(self.url_pattern.format(pn=10 * n, wd=wd))
soup = BeautifulSoup(html, "lxml")
blog_title_pattern = re.compile(".*- {username} - 博客频道 - CSDN.NET$".format(username=username))
print("----- 开始访问第" + str(n + 1) + "页搜索结果-----")
for target in soup.find_all(id=re.compile("tools_[0-9]*_[1-9]")):
data_tools = target.attrs["data-tools"]
parts = data_tools.split('","url":"')
if len(parts) != 2:
continue
title = parts[0][10:]
url = parts[1][:-2]
if re.match(blog_title_pattern, title):
random.seed(time.time())
time.sleep(random.uniform(random.random() * 2, random.random() * 50))
request = ul.Request(url, headers=self.headers)
ul.urlopen(request)
print("visit:" + title)
print("----- 结束访问第" + str(n + 1) + "页搜索结果-----")
def scan_n_pages(self, n, wd, username):
for i in range(n):
self.scan_page(i, wd, username)
def interpret(self):
print("这个脚本可以通过百度搜索引擎来访问CSDN博客,提高博客检索排名。")
username = input("请输入你的CSDN用户名:\n")
key = input("请输入搜索关键词:\n")
page_count = int(input("请输入你想要搜索的页面数:\n"))
self.scan_n_pages(page_count, key, username)
if __name__ == "__main__":
HotBlog().interpret()
| none | 1 | 2.948788 | 3 |
|
MAPS/sample_stats_constrained.py | gmooers96/CBRAIN-CAM | 0 | 6632928 | <filename>MAPS/sample_stats_constrained.py
import argparse
import json
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import keras
from keras import layers
from keras import backend as K
import tensorflow as tf
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from train_stats_constrained import encoder_gen, decoder_gen
import numpy as np
import gc
import tensorflow_probability as tfp
from scipy import spatial
def f_norm(true, pred):
covariance_truth = tfp.stats.covariance(true)
covariance_prediction = tfp.stats.covariance(pred)
covariance_truth = tf.cast(covariance_truth, dtype=tf.float32)
f_dist = tf.norm(covariance_prediction-covariance_truth, ord="euclidean")
return f_dist
def reconstruct_targets_paper(vae, test_data, targets, id, dataset_max, dataset_min):
"""
TODO
"""
original_samples = []
recon_means = []
recon_vars = []
vmin = 1000
vmax = -1
vmin_var = 1000
vmax_var = -1
for target in targets:
sample = test_data[target]
sample_mean_var = vae.predict(np.expand_dims(sample, 0))
sample_mean = sample_mean_var[0, :128*30]
sample_log_var = sample_mean_var[0, 128*30:]
# Sample reconstruction based on predicted mean and variance
recon_mean = sample_mean
recon_var = np.exp(sample_log_var)
recon_sample = recon_mean + recon_var
# recon_sample = np.random.multivariate_normal(sample_mean, np.exp(sample_log_var) * np.identity(128*30))
# Rescale original sample and reconstruction to original scale
sample = np.interp(sample, (0, 1), (dataset_min, dataset_max))
recon_mean = np.interp(recon_mean, (0, 1), (dataset_min, dataset_max))
recon_sample = np.interp(recon_sample, (0, 1), (dataset_min, dataset_max))
recon_var = recon_sample - recon_mean
# Get min and max of original and reconstructed
max_reconstructed = np.max(recon_mean)
max_recon_var = np.max(recon_var)
print("max of reconstructed", max_reconstructed)
max_sample = np.max(sample.reshape((128*30,)))
print("max of original", max_sample)
min_reconstructed = np.min(recon_mean)
min_recon_var = np.min(recon_var)
print("min of reconstructed", min_reconstructed)
min_sample = np.min(sample.reshape((128*30,)))
print("min of original", min_sample)
# Reshape reconstructed sample
recon_mean = recon_mean.reshape((30, 128))
recon_var = recon_var.reshape((30, 128))
original_samples.append(sample[:, :, 0])
recon_means.append(recon_mean)
recon_vars.append(recon_var)
vmin = min(vmin, min_reconstructed, min_sample)
vmax = max(vmax, max_reconstructed, max_sample)
vmin_var = min(vmin_var, min_recon_var)
vmax_var = max(vmax_var, max_recon_var)
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
fig_size[1] = 8
plt.rcParams["figure.figsize"] = fig_size
fig, axs = plt.subplots(len(targets), 2, sharex=True, sharey=True, constrained_layout=True)
def fmt(x, pos):
return "{:.2f}".format(x)
np.save("CI_Figure_Data/True_Means.npy", original_samples)
np.save("CI_Figure_Data/Reconstruct_Means.npy", recon_means)
for i in range(len(targets)):
y_ticks = np.arange(1400, 0, -400)
#print("y ticks", y_ticks)
sub_img = axs[i, 0].imshow(original_samples[i], cmap='RdBu_r', vmin=vmin, vmax=vmax)
axs[i, 0].invert_yaxis()
axs[i, 0].set_yticklabels(y_ticks)
if i == 2:
axs[i, 0].set_ylabel("Pressure (hpa)", fontsize=12, labelpad=10)
sub_img = axs[i, 1].imshow(recon_means[i], cmap='RdBu_r', vmin=vmin, vmax=vmax)
axs[i, 1].invert_yaxis()
if i == 0:
axs[i, 0].set_title("Original", fontsize = 12)
axs[i, 1].set_title("VAE Reconstruction Mean",fontsize=12)
if i == len(targets) - 1:
axs[i, 0].set_xlabel('CRMs', fontsize=12, labelpad=5)
axs[i, 1].set_xlabel('CRMs', fontsize=12, labelpad=5)
fig.colorbar(sub_img, ax=axs[:, 1], label="Vertical Velocity", shrink=0.6)
#axs[i,1].set_yticks([])
#if i < len(targets) - 2:
#axs[i, 0].set_xticks([])
#axs[i, 1].set_xticks([])
# Hide x labels and tick labels for all but bottom plot.
for row in axs:
for ax in row:
ax.label_outer()
plt.savefig('./model_graphs/reconstructions/Paper_target_test_reconstructions_{}.png'.format(id))
plt.savefig('./model_graphs/reconstructions/Paper_target_test_reconstructions_{}.pdf'.format(id))
def reconstruct_targets(vae, test_data, targets, id, dataset_max, dataset_min):
"""
TODO
"""
original_samples = []
recon_means = []
recon_vars = []
vmin = 1000
vmax = -1
vmin_var = 1000
vmax_var = -1
for target in targets:
sample = test_data[target]
sample_mean_var = vae.predict(np.expand_dims(sample, 0))
sample_mean = sample_mean_var[0, :128*30]
sample_log_var = sample_mean_var[0, 128*30:]
# Sample reconstruction based on predicted mean and variance
recon_mean = sample_mean
recon_var = np.exp(sample_log_var)
recon_sample = recon_mean + recon_var
# recon_sample = np.random.multivariate_normal(sample_mean, np.exp(sample_log_var) * np.identity(128*30))
# Rescale original sample and reconstruction to original scale
sample = np.interp(sample, (0, 1), (dataset_min, dataset_max))
recon_mean = np.interp(recon_mean, (0, 1), (dataset_min, dataset_max))
recon_sample = np.interp(recon_sample, (0, 1), (dataset_min, dataset_max))
recon_var = recon_sample - recon_mean
# Get min and max of original and reconstructed
max_reconstructed = np.max(recon_mean)
max_recon_var = np.max(recon_var)
print("max of reconstructed", max_reconstructed)
max_sample = np.max(sample.reshape((128*30,)))
print("max of original", max_sample)
min_reconstructed = np.min(recon_mean)
min_recon_var = np.min(recon_var)
print("min of reconstructed", min_reconstructed)
min_sample = np.min(sample.reshape((128*30,)))
print("min of original", min_sample)
# Reshape reconstructed sample
recon_mean = recon_mean.reshape((30, 128))
recon_var = recon_var.reshape((30, 128))
original_samples.append(sample[:, :, 0])
recon_means.append(recon_mean)
recon_vars.append(recon_var)
vmin = min(vmin, min_reconstructed, min_sample)
vmax = max(vmax, max_reconstructed, max_sample)
vmin_var = min(vmin_var, min_recon_var)
vmax_var = max(vmax_var, max_recon_var)
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
fig_size[1] = 8
plt.rcParams["figure.figsize"] = fig_size
fig, axs = plt.subplots(len(targets), 3, sharex=True, sharey=True, constrained_layout=True)
def fmt(x, pos):
return "{:.2f}".format(x)
for i in range(len(targets)):
y_ticks = np.arange(1800, 0, -800)
print("y ticks", y_ticks)
sub_img = axs[i, 0].imshow(original_samples[i], cmap='RdBu_r', vmin=vmin, vmax=vmax)
axs[i, 0].invert_yaxis()
axs[i, 0].set_yticklabels(y_ticks)
if i == 2:
axs[i, 0].set_ylabel("Pressure (mbs)", fontsize=12, labelpad=10)
sub_img = axs[i, 1].imshow(recon_means[i], cmap='RdBu_r', vmin=vmin, vmax=vmax)
axs[i, 1].invert_yaxis()
sub_img_var = axs[i, 2].imshow(recon_vars[i], cmap='RdBu_r', vmin=vmin_var, vmax=vmax_var)
axs[i, 2].invert_yaxis()
if i == 0:
axs[i, 0].set_title("Original")
axs[i, 1].set_title("Reconstruction Mean")
axs[i, 2].set_title("Reconstruction Variance")
if i == len(targets) - 1:
axs[i, 0].set_xlabel('CRMs', fontsize=12, labelpad=5)
axs[i, 1].set_xlabel('CRMs', fontsize=12, labelpad=5)
axs[i, 2].set_xlabel('CRMs', fontsize=12, labelpad=5)
fig.colorbar(sub_img, ax=axs[:, 1], label="Vertical Velocity", shrink=0.6)
cb = fig.colorbar(sub_img_var, ax=axs[:, 2], shrink=0.6, format=ticker.FuncFormatter(fmt))
cb.set_label("Variance", labelpad=10)
# Hide x labels and tick labels for all but bottom plot.
for row in axs:
for ax in row:
ax.label_outer()
plt.savefig('./model_graphs/reconstructions/target_test_reconstructions_{}.png'.format(id))
def sample_reconstructions(vae, train_data, test_data, id, dataset_max, dataset_min):
"""
TODO
"""
original_samples = []
recon_samples = []
min_max = []
for i in range(5):
rand_sample = np.random.randint(0, len(train_data))
sample = train_data[rand_sample]
sample_mean_var = vae.predict(np.expand_dims(sample, 0))
sample_mean = sample_mean_var[0, :128*30]
sample_log_var = sample_mean_var[0, 128*30:]
recon_sample = sample_mean
sample = np.interp(sample, (0, 1), (dataset_min, dataset_max))
recon_sample = np.interp(recon_sample, (0, 1), (dataset_min, dataset_max))
print("original sample", sample.reshape((128*30,)))
print("reconstructed sample", recon_sample)
print(np.max(np.abs(sample.reshape((128*30,)) - recon_sample)))
max_reconstructed = np.max(np.abs(recon_sample))
print("max of reconstructed", max_reconstructed)
max_sample = np.max(sample.reshape((128*30,)))
print("max of original", max_sample)
min_reconstructed = np.min(recon_sample)
print("min of reconstructed", min_reconstructed)
min_sample = np.min(sample.reshape((128*30,)))
print("min of original", min_sample)
recon_sample = recon_sample.reshape((30, 128))
original_samples.append(sample[:, :, 0])
recon_samples.append(recon_sample)
min_max.append((min(min_reconstructed, min_sample), max(max_reconstructed, max_sample)))
fig, axs = plt.subplots(5, 2)
for i in range(5):
vmin = min_max[i][0]
vmax = min_max[i][1]
sub_img = axs[i, 0].imshow(original_samples[i], cmap='RdBu_r', vmin=vmin, vmax=vmax)
axs[i, 0].set_ylim(axs[i, 0].get_ylim()[::-1])
fig.colorbar(sub_img, ax=axs[i, 0])
sub_img = axs[i, 1].imshow(recon_samples[i], cmap='RdBu_r', vmin=vmin, vmax=vmax)
axs[i, 1].set_ylim(axs[i, 1].get_ylim()[::-1])
fig.colorbar(sub_img, ax=axs[i, 1])
plt.savefig('./model_graphs/reconstructions/reconstructed_train_samples_{}.png'.format(id))
def sample_latent_space(vae_encoder, train_data, test_data, id, dataset_min, dataset_max, test_labels, dataset_type):
"""
Create a scatter plot of the latent space containing all test samples.
"""
# Predict latent train & test data
_, _, z_test = vae_encoder.predict(test_data)
_, _, z_train = vae_encoder.predict(train_data)
# Apply scaling and tsne
sc = StandardScaler()
z_train_std = sc.fit_transform(z_train)
z_test_std = sc.transform(z_test)
# Instantiate PCA
pca = PCA(n_components=32)
pca.fit(z_train_std)
z_test_pca = pca.transform(z_test_std)
# Instantiate TSNE
tsne = TSNE(n_components=2)
z_test_tsne = tsne.fit_transform(z_test_pca)
np.save("/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/Saved_Data/2D_Latent_Space__{}".format(id), z_test_tsne)
if dataset_type == "half_deep_convection":
colors = ["#FF4940", "#3D9AD1"]
# Make plot of latent test data
#plt.scatter(x=z_test_tsne[np.where(test_labels == 0), 0], y=z_test_tsne[np.where(test_labels == 0), 1], c=colors[0],s=1, label="Deep Convection")
#plt.scatter(x=z_test_tsne[np.where(test_labels == 1), 0], y=z_test_tsne[np.where(test_labels == 1), 1], c=colors[1], s=1, label="Shallow Convection")
print("made it here")
convection = np.squeeze(z_test_tsne[np.where(test_labels == 0),:])
no_convection = np.squeeze(z_test_tsne[np.where(test_labels == 1),:])
#fake = np.squeeze(z_test_tsne[np.where(test_labels == 2),:])
plt.scatter(x=convection[:, 0], y=convection[:, 1], c="#FF4940", s=0.4, label="N0 convective Activity")
plt.scatter(x=no_convection[:, 0], y=no_convection[:, 1], c="#3D9AD1", s=0.4, label="Convective Activity")
#plt.scatter(x=fake[:, 0], y=fake[:, 1], c="yellow", s=0.4, label="White Noise")
plt.legend()
else:
plt.scatter(x=z_test_tsne[:, 0], y=z_test_tsne[:, 1], s=1)
plt.colorbar()
plt.savefig('./model_graphs/latent_space/Amazon_binary_latent_space_with_pca_{}.png'.format(id))
def sample_latent_space_var(vae_encoder, train_data, test_data, id, dataset_min, dataset_max, test_labels, dataset_type):
"""
Create a scatter plot of the latent space containing all test samples.
"""
# Predict latent train & test data
test_mean, test_log_var, z_test = vae_encoder.predict(test_data)
train_mean, train_log_var, z_train = vae_encoder.predict(train_data)
#np.save("PCA_Trials/Covariance_Test_Z_Samples.npy", z_test)
#np.save("PCA_Trials/Covariance_Test_Mean_Samples.npy", test_mean)
#np.save("PCA_Trials/Covariance_Test_Log_Var_Samples.npy", test_log_var)
train_mean_var = np.concatenate((train_mean, train_log_var), axis=1)
test_mean_var = np.concatenate((test_mean, test_log_var), axis=1)
np.save("PCA_Trials/83_PCA_Train.npy", train_mean_var)
np.save("PCA_Trials/83_PCA_Test.npy", test_mean_var)
print("Training data")
print(gdfgdfggd)
#print(dfsdsdgsdg)
#np.save("PCA_Trials/Covariance_Train_High_Dim_Latent_Space.npy", train_mean_var)
#np.save("PCA_Trials/Covariance_Test_High_Dim_Latent_Space.npy", test_mean_var)
# Apply scaling and tsne
sc = StandardScaler()
z_train_std = sc.fit_transform(train_mean_var)
#z_train_std = sc.fit_transform(train_log_var)
z_test_std = sc.transform(test_mean_var)
#z_test_std = sc.transform(test_log_var)
# Instantiate PCA
pca = PCA(n_components=2)
pca.fit(z_train_std)
z_test_pca = pca.transform(z_test_std)
np.save("/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/Synoptic_Latent_Spaces/2D_PCA_Diurnal_Interval_Composite_Anon_Ocean_Region_Latent_Space__{}".format(id), z_test_pca)
print("Made it to the save")
if dataset_type == "half_deep_convection":
colors = ["#FF4940", "#3D9AD1"]
print("made it here")
convection = np.squeeze(z_test_pca[np.where(test_labels == 0),:])
no_convection = np.squeeze(z_test_pca[np.where(test_labels == 1),:])
#fake = np.squeeze(z_test_tsne[np.where(test_labels == 2),:])
plt.scatter(x=convection[:, 0], y=convection[:, 1], c="#FF4940", s=0.4, label="No Convective Activity")
plt.scatter(x=no_convection[:, 0], y=no_convection[:, 1], c="#3D9AD1", s=0.4, label="Convective Activity")
#plt.scatter(x=fake[:, 0], y=fake[:, 1], c="yellow", s=0.4, label="Blue Noise")
plt.legend()
else:
#plt.scatter(x=z_test_tsne[:, 0], y=z_test_tsne[:, 1], c=test_labels, s=1)
plt.scatter(x=z_test_pca[:, 0], y=z_test_pca[:, 1], s=0.1)
plt.colorbar()
plt.savefig('./model_graphs/latent_space/Enthalpy_Covariance_PCA_Mean_Var_latent_space_with_pca_{}.png'.format(id))
def interpolate_points(p1, p2, n_steps=100):
"linear interpolation -- https://openreview.net/pdf?id=S1fQSiCcYm"
ratios = np.linspace(0, 1, num=n_steps)
vectors = list()
for ratio in ratios:
v = (1.0 - ratio) * p1 + ratio * p2
vectors.append(v)
return np.asarray(vectors)
def slerp(count, low, high):
"""Spherical interpolation. val has a range of 0 to 1."""
values = np.linspace(0, 1, num=count)
output_array = np.empty(shape=(count,low.size))
for i in range(len(values)):
val = values[i]
omega = np.arccos(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)))
so = np.sin(omega)
output_array[i,:] = np.sin((1.0-val)*omega) / so * low + np.sin(val*omega)/so * high
return output_array
#https://arxiv.org/pdf/1803.05428.pdf
#https://www.inference.vc/high-dimensional-gaussian-distributions-are-soap-bubble/
def original_slerp(val, low, high):
"""Spherical interpolation. val has a range of 0 to 1. https://github.com/dribnet/plat/blob/master/plat/interpolate.py"""
if val <= 0:
return low
elif val >= 1:
return high
elif np.allclose(low, high):
return low
omega = np.arccos(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)))
so = np.sin(omega)
return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega)/so * high
def numpy_slerp(t, p0, p1):
omega = np.arccos(np.dot(p0/np.linalg.norm(p0), p1/np.linalg.norm(p1)))
so = np.sin(omega)
return np.sin((1.0-t)*omega) / so * p0 + np.sin(t*omega)/so * p1
def latent_space_interpolation(vae, decoder, vae_encoder, train_data, test_data, id, dataset_min, dataset_max, test_labels, dataset_type):
sample_one = np.expand_dims(test_data[15880,:,:], axis=0)
sample_two = np.expand_dims(test_data[6548,:,:],axis=0)
test_mean_one, test_log_var_one, z_test_one = vae_encoder.predict(sample_one)
test_mean_two, test_log_var_two, z_test_two = vae_encoder.predict(sample_two)
count = 100
interpolated_images = np.empty(shape=(count,len(z_test_two[0])))
interpolated_orig_images = np.empty(shape=(count,len(sample_one[0])*len(sample_one[0][0])))
values = np.linspace(0, 1, num=count)
for i in range(count):
interpolated_images[i,:]= numpy_slerp(values[i], z_test_one.flatten(),z_test_two.flatten())
interpolated_orig_images[i,:]= numpy_slerp(values[i], sample_one.flatten(),sample_two.flatten())
reconstructed_Image_Series = decoder.predict(interpolated_images)
reconstructed_Image_finals = reconstructed_Image_Series[:,:3840]
np.save("/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/Interpolation_Data/203_Original_Images_W_Comp_15880_6548.npy", interpolated_orig_images)
np.save("/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/Interpolation_Data/203_Latent_Images_W_Comp_15880_6548.npy", interpolated_images)
np.save("/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/Interpolation_Data/203_Reconstructed_Images_W_Comp_15880_6548.npy", reconstructed_Image_finals)
print("Passed the saves")
interpolated_images.shape
num_images = 10
np.random.seed(42)
plt.figure(figsize=(30, 8))
for i, image_idx in enumerate(interpolated_images):
ax = plt.subplot(5, num_images, i + 1)
plt.imshow(interpolated_images[i].reshape(64, 16).T)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.set_title("Encoded: {}".format(i))
ax = plt.subplot(5, num_images,num_images+ i + 1)
reconstructed_image = decoder.predict(np.expand_dims(interpolated_images[i,:],axis=0))
plt.imshow(np.squeeze(reconstructed_image)[:3840].reshape(128,30).T)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.set_title("Latent: {}".format(i))
ax = plt.subplot(5, num_images,2*num_images+ i + 1)
plt.imshow(interpolated_orig_images[i].reshape(128,30).T)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.set_title("Image: {}".format(i))
plt.savefig("/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/model_graphs/latent_space_interp/amazon_diurnal_trial.png")
def sample_frob_norm(vae, decoder, vae_encoder, train_data, test_data, id, dataset_min, dataset_max, test_labels, dataset_type):
"""
Create a scatter plot of the latent space containing all test samples.
"""
# Predict latent train & test data
test_mean, test_log_var, z_test = vae_encoder.predict(test_data)
print("made it here")
sample_mean_var = decoder.predict(z_test)
sample_mean = sample_mean_var[:, :128*30]
truths = np.reshape(test_data, (len(test_data),30*128))
Rough_Metric = f_norm(truths, sample_mean)
sess = tf.InteractiveSession()
RM = Rough_Metric.eval()
gc.collect()
print(RM.shape)
print(RM)
np.save("Saved_Data/Rough_Overall_FR_Norm__{}.npy".format(id), RM)
print("completed")
def generate_samples(decoder, dataset_min, dataset_max, latent_dim: int, id):
"""
Sample points from prior and send through decoder to get
sample images.
"""
# sample from prior
num_samples = 3
z = np.random.normal(size=(num_samples, latent_dim))
# Get output from decoder
sample_mean_var = decoder.predict(z)
# Extract mean and variance
sample_mean = sample_mean_var[:, :128*30]
sample_log_var = sample_mean_var[:, 128*30:]
fig, axs = plt.subplots(num_samples, 1)
recon_samples = []
for i in range(num_samples):
print(sample_mean[i])
print(sample_mean[i].shape)
# Sample from gaussian decoder outputs
recon_sample = np.random.multivariate_normal(sample_mean[i], np.exp(sample_log_var[i]) * np.identity(128*30))
# Unnormalize sample
recon_sample = np.interp(recon_sample, (0, 1), (dataset_min, dataset_max))
# Reshape
recon_sample = recon_sample.reshape((30, 128))
recon_samples.append(recon_sample)
vmin = np.min(recon_samples)
vmax = np.max(recon_samples)
for i in range(num_samples):
# Show image
sub_img = axs[i].imshow(recon_samples[i], cmap='coolwarm', vmin=vmin, vmax=vmax)
fig.colorbar(sub_img, ax=axs[i])
# Flip y-axis
axs[i].set_ylim(axs[i].get_ylim()[::-1])
# fig.colorbar(sub_img, ax=axs)
plt.tight_layout()
plt.savefig('./model_graphs/generated/generated_samples_{}.png'.format(id))
def main():
args = argument_parsing()
print("Command line args:", args)
f = open("./model_config/config_{}.json".format(args.id))
model_config = json.load(f)
f.close()
train_data = np.load(model_config["data"]["training_data_path"])
test_data = np.load(model_config["data"]["test_data_path"])
# test_labels = np.load(model_config["data"]["test_labels"])[:, 0, 0]
test_labels = np.load(model_config["data"]["test_labels"])
print("Test labels shape:", test_labels.shape, model_config["data"]["test_labels"])
dataset_max = np.load(model_config["data"]["max_scalar"])
dataset_min = np.load(model_config["data"]["min_scalar"])
print("dataset max", dataset_max)
print("dataset min", dataset_min)
img_width = train_data.shape[1]
img_height = train_data.shape[2]
print("Image shape:", img_width, img_height)
# Construct VAE Encoder
encoder_result = encoder_gen((img_width, img_height), model_config["encoder"], args.id)
# Construct VAE Decoder
vae_decoder = decoder_gen(
(img_width, img_height),
model_config["decoder"]
)
_, _, z = encoder_result.vae_encoder(encoder_result.inputs)
x_mu_var = vae_decoder(z)
vae = keras.Model(inputs=[encoder_result.inputs], outputs=[x_mu_var])
# load weights from file
vae.load_weights('./models/model_{}.th'.format(args.id))
print("weights loaded")
train_data = train_data.reshape(train_data.shape+(1,))
test_data = test_data.reshape(test_data.shape+(1,))
# get side by side plots of original vs. reconstructed
# sample_reconstructions(vae, train_data, test_data, args.id, dataset_max, dataset_min)
#reconstruct_targets(vae, test_data, [2, 15, 66 , 85, 94], args.id, dataset_max, dataset_min)
#reconstruct_targets_paper(vae, test_data, [23506, 66 , 23746], args.id, dataset_max, dataset_min)
#reconstruct_targets_paper(vae, test_data, [2, 15, 66 , 85, 94], args.id, dataset_max, dataset_min)
#sample_latent_space(encoder_result.vae_encoder, train_data, test_data, args.id, dataset_min, dataset_max, test_labels, args.dataset_type)
#sample_latent_space_var(encoder_result.vae_encoder, train_data, test_data, args.id, dataset_min, dataset_max, test_labels, args.dataset_type)
latent_space_interpolation(vae, vae_decoder, encoder_result.vae_encoder, train_data, test_data, args.id, dataset_min, dataset_max, test_labels, args.dataset_type)
#sample_frob_norm(vae, vae_decoder, encoder_result.vae_encoder, train_data, test_data, args.id, dataset_min, dataset_max, test_labels, args.dataset_type)
#generate_samples(vae_decoder, dataset_min, dataset_max, model_config["encoder"]["latent_dim"], args.id)
def argument_parsing():
parser = argparse.ArgumentParser()
parser.add_argument('--id', type=int, help='This option specifies the id of the config file to use to train the VAE.')
parser.add_argument('--dataset_type', type=str, help='Name of the dataset that model was trained on.')
args = parser.parse_args()
return args
if __name__ == "__main__":
main()
| <filename>MAPS/sample_stats_constrained.py
import argparse
import json
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import keras
from keras import layers
from keras import backend as K
import tensorflow as tf
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from train_stats_constrained import encoder_gen, decoder_gen
import numpy as np
import gc
import tensorflow_probability as tfp
from scipy import spatial
def f_norm(true, pred):
covariance_truth = tfp.stats.covariance(true)
covariance_prediction = tfp.stats.covariance(pred)
covariance_truth = tf.cast(covariance_truth, dtype=tf.float32)
f_dist = tf.norm(covariance_prediction-covariance_truth, ord="euclidean")
return f_dist
def reconstruct_targets_paper(vae, test_data, targets, id, dataset_max, dataset_min):
"""
TODO
"""
original_samples = []
recon_means = []
recon_vars = []
vmin = 1000
vmax = -1
vmin_var = 1000
vmax_var = -1
for target in targets:
sample = test_data[target]
sample_mean_var = vae.predict(np.expand_dims(sample, 0))
sample_mean = sample_mean_var[0, :128*30]
sample_log_var = sample_mean_var[0, 128*30:]
# Sample reconstruction based on predicted mean and variance
recon_mean = sample_mean
recon_var = np.exp(sample_log_var)
recon_sample = recon_mean + recon_var
# recon_sample = np.random.multivariate_normal(sample_mean, np.exp(sample_log_var) * np.identity(128*30))
# Rescale original sample and reconstruction to original scale
sample = np.interp(sample, (0, 1), (dataset_min, dataset_max))
recon_mean = np.interp(recon_mean, (0, 1), (dataset_min, dataset_max))
recon_sample = np.interp(recon_sample, (0, 1), (dataset_min, dataset_max))
recon_var = recon_sample - recon_mean
# Get min and max of original and reconstructed
max_reconstructed = np.max(recon_mean)
max_recon_var = np.max(recon_var)
print("max of reconstructed", max_reconstructed)
max_sample = np.max(sample.reshape((128*30,)))
print("max of original", max_sample)
min_reconstructed = np.min(recon_mean)
min_recon_var = np.min(recon_var)
print("min of reconstructed", min_reconstructed)
min_sample = np.min(sample.reshape((128*30,)))
print("min of original", min_sample)
# Reshape reconstructed sample
recon_mean = recon_mean.reshape((30, 128))
recon_var = recon_var.reshape((30, 128))
original_samples.append(sample[:, :, 0])
recon_means.append(recon_mean)
recon_vars.append(recon_var)
vmin = min(vmin, min_reconstructed, min_sample)
vmax = max(vmax, max_reconstructed, max_sample)
vmin_var = min(vmin_var, min_recon_var)
vmax_var = max(vmax_var, max_recon_var)
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
fig_size[1] = 8
plt.rcParams["figure.figsize"] = fig_size
fig, axs = plt.subplots(len(targets), 2, sharex=True, sharey=True, constrained_layout=True)
def fmt(x, pos):
return "{:.2f}".format(x)
np.save("CI_Figure_Data/True_Means.npy", original_samples)
np.save("CI_Figure_Data/Reconstruct_Means.npy", recon_means)
for i in range(len(targets)):
y_ticks = np.arange(1400, 0, -400)
#print("y ticks", y_ticks)
sub_img = axs[i, 0].imshow(original_samples[i], cmap='RdBu_r', vmin=vmin, vmax=vmax)
axs[i, 0].invert_yaxis()
axs[i, 0].set_yticklabels(y_ticks)
if i == 2:
axs[i, 0].set_ylabel("Pressure (hpa)", fontsize=12, labelpad=10)
sub_img = axs[i, 1].imshow(recon_means[i], cmap='RdBu_r', vmin=vmin, vmax=vmax)
axs[i, 1].invert_yaxis()
if i == 0:
axs[i, 0].set_title("Original", fontsize = 12)
axs[i, 1].set_title("VAE Reconstruction Mean",fontsize=12)
if i == len(targets) - 1:
axs[i, 0].set_xlabel('CRMs', fontsize=12, labelpad=5)
axs[i, 1].set_xlabel('CRMs', fontsize=12, labelpad=5)
fig.colorbar(sub_img, ax=axs[:, 1], label="Vertical Velocity", shrink=0.6)
#axs[i,1].set_yticks([])
#if i < len(targets) - 2:
#axs[i, 0].set_xticks([])
#axs[i, 1].set_xticks([])
# Hide x labels and tick labels for all but bottom plot.
for row in axs:
for ax in row:
ax.label_outer()
plt.savefig('./model_graphs/reconstructions/Paper_target_test_reconstructions_{}.png'.format(id))
plt.savefig('./model_graphs/reconstructions/Paper_target_test_reconstructions_{}.pdf'.format(id))
def reconstruct_targets(vae, test_data, targets, id, dataset_max, dataset_min):
"""
TODO
"""
original_samples = []
recon_means = []
recon_vars = []
vmin = 1000
vmax = -1
vmin_var = 1000
vmax_var = -1
for target in targets:
sample = test_data[target]
sample_mean_var = vae.predict(np.expand_dims(sample, 0))
sample_mean = sample_mean_var[0, :128*30]
sample_log_var = sample_mean_var[0, 128*30:]
# Sample reconstruction based on predicted mean and variance
recon_mean = sample_mean
recon_var = np.exp(sample_log_var)
recon_sample = recon_mean + recon_var
# recon_sample = np.random.multivariate_normal(sample_mean, np.exp(sample_log_var) * np.identity(128*30))
# Rescale original sample and reconstruction to original scale
sample = np.interp(sample, (0, 1), (dataset_min, dataset_max))
recon_mean = np.interp(recon_mean, (0, 1), (dataset_min, dataset_max))
recon_sample = np.interp(recon_sample, (0, 1), (dataset_min, dataset_max))
recon_var = recon_sample - recon_mean
# Get min and max of original and reconstructed
max_reconstructed = np.max(recon_mean)
max_recon_var = np.max(recon_var)
print("max of reconstructed", max_reconstructed)
max_sample = np.max(sample.reshape((128*30,)))
print("max of original", max_sample)
min_reconstructed = np.min(recon_mean)
min_recon_var = np.min(recon_var)
print("min of reconstructed", min_reconstructed)
min_sample = np.min(sample.reshape((128*30,)))
print("min of original", min_sample)
# Reshape reconstructed sample
recon_mean = recon_mean.reshape((30, 128))
recon_var = recon_var.reshape((30, 128))
original_samples.append(sample[:, :, 0])
recon_means.append(recon_mean)
recon_vars.append(recon_var)
vmin = min(vmin, min_reconstructed, min_sample)
vmax = max(vmax, max_reconstructed, max_sample)
vmin_var = min(vmin_var, min_recon_var)
vmax_var = max(vmax_var, max_recon_var)
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
fig_size[1] = 8
plt.rcParams["figure.figsize"] = fig_size
fig, axs = plt.subplots(len(targets), 3, sharex=True, sharey=True, constrained_layout=True)
def fmt(x, pos):
return "{:.2f}".format(x)
for i in range(len(targets)):
y_ticks = np.arange(1800, 0, -800)
print("y ticks", y_ticks)
sub_img = axs[i, 0].imshow(original_samples[i], cmap='RdBu_r', vmin=vmin, vmax=vmax)
axs[i, 0].invert_yaxis()
axs[i, 0].set_yticklabels(y_ticks)
if i == 2:
axs[i, 0].set_ylabel("Pressure (mbs)", fontsize=12, labelpad=10)
sub_img = axs[i, 1].imshow(recon_means[i], cmap='RdBu_r', vmin=vmin, vmax=vmax)
axs[i, 1].invert_yaxis()
sub_img_var = axs[i, 2].imshow(recon_vars[i], cmap='RdBu_r', vmin=vmin_var, vmax=vmax_var)
axs[i, 2].invert_yaxis()
if i == 0:
axs[i, 0].set_title("Original")
axs[i, 1].set_title("Reconstruction Mean")
axs[i, 2].set_title("Reconstruction Variance")
if i == len(targets) - 1:
axs[i, 0].set_xlabel('CRMs', fontsize=12, labelpad=5)
axs[i, 1].set_xlabel('CRMs', fontsize=12, labelpad=5)
axs[i, 2].set_xlabel('CRMs', fontsize=12, labelpad=5)
fig.colorbar(sub_img, ax=axs[:, 1], label="Vertical Velocity", shrink=0.6)
cb = fig.colorbar(sub_img_var, ax=axs[:, 2], shrink=0.6, format=ticker.FuncFormatter(fmt))
cb.set_label("Variance", labelpad=10)
# Hide x labels and tick labels for all but bottom plot.
for row in axs:
for ax in row:
ax.label_outer()
plt.savefig('./model_graphs/reconstructions/target_test_reconstructions_{}.png'.format(id))
def sample_reconstructions(vae, train_data, test_data, id, dataset_max, dataset_min):
"""
TODO
"""
original_samples = []
recon_samples = []
min_max = []
for i in range(5):
rand_sample = np.random.randint(0, len(train_data))
sample = train_data[rand_sample]
sample_mean_var = vae.predict(np.expand_dims(sample, 0))
sample_mean = sample_mean_var[0, :128*30]
sample_log_var = sample_mean_var[0, 128*30:]
recon_sample = sample_mean
sample = np.interp(sample, (0, 1), (dataset_min, dataset_max))
recon_sample = np.interp(recon_sample, (0, 1), (dataset_min, dataset_max))
print("original sample", sample.reshape((128*30,)))
print("reconstructed sample", recon_sample)
print(np.max(np.abs(sample.reshape((128*30,)) - recon_sample)))
max_reconstructed = np.max(np.abs(recon_sample))
print("max of reconstructed", max_reconstructed)
max_sample = np.max(sample.reshape((128*30,)))
print("max of original", max_sample)
min_reconstructed = np.min(recon_sample)
print("min of reconstructed", min_reconstructed)
min_sample = np.min(sample.reshape((128*30,)))
print("min of original", min_sample)
recon_sample = recon_sample.reshape((30, 128))
original_samples.append(sample[:, :, 0])
recon_samples.append(recon_sample)
min_max.append((min(min_reconstructed, min_sample), max(max_reconstructed, max_sample)))
fig, axs = plt.subplots(5, 2)
for i in range(5):
vmin = min_max[i][0]
vmax = min_max[i][1]
sub_img = axs[i, 0].imshow(original_samples[i], cmap='RdBu_r', vmin=vmin, vmax=vmax)
axs[i, 0].set_ylim(axs[i, 0].get_ylim()[::-1])
fig.colorbar(sub_img, ax=axs[i, 0])
sub_img = axs[i, 1].imshow(recon_samples[i], cmap='RdBu_r', vmin=vmin, vmax=vmax)
axs[i, 1].set_ylim(axs[i, 1].get_ylim()[::-1])
fig.colorbar(sub_img, ax=axs[i, 1])
plt.savefig('./model_graphs/reconstructions/reconstructed_train_samples_{}.png'.format(id))
def sample_latent_space(vae_encoder, train_data, test_data, id, dataset_min, dataset_max, test_labels, dataset_type):
"""
Create a scatter plot of the latent space containing all test samples.
"""
# Predict latent train & test data
_, _, z_test = vae_encoder.predict(test_data)
_, _, z_train = vae_encoder.predict(train_data)
# Apply scaling and tsne
sc = StandardScaler()
z_train_std = sc.fit_transform(z_train)
z_test_std = sc.transform(z_test)
# Instantiate PCA
pca = PCA(n_components=32)
pca.fit(z_train_std)
z_test_pca = pca.transform(z_test_std)
# Instantiate TSNE
tsne = TSNE(n_components=2)
z_test_tsne = tsne.fit_transform(z_test_pca)
np.save("/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/Saved_Data/2D_Latent_Space__{}".format(id), z_test_tsne)
if dataset_type == "half_deep_convection":
colors = ["#FF4940", "#3D9AD1"]
# Make plot of latent test data
#plt.scatter(x=z_test_tsne[np.where(test_labels == 0), 0], y=z_test_tsne[np.where(test_labels == 0), 1], c=colors[0],s=1, label="Deep Convection")
#plt.scatter(x=z_test_tsne[np.where(test_labels == 1), 0], y=z_test_tsne[np.where(test_labels == 1), 1], c=colors[1], s=1, label="Shallow Convection")
print("made it here")
convection = np.squeeze(z_test_tsne[np.where(test_labels == 0),:])
no_convection = np.squeeze(z_test_tsne[np.where(test_labels == 1),:])
#fake = np.squeeze(z_test_tsne[np.where(test_labels == 2),:])
plt.scatter(x=convection[:, 0], y=convection[:, 1], c="#FF4940", s=0.4, label="N0 convective Activity")
plt.scatter(x=no_convection[:, 0], y=no_convection[:, 1], c="#3D9AD1", s=0.4, label="Convective Activity")
#plt.scatter(x=fake[:, 0], y=fake[:, 1], c="yellow", s=0.4, label="White Noise")
plt.legend()
else:
plt.scatter(x=z_test_tsne[:, 0], y=z_test_tsne[:, 1], s=1)
plt.colorbar()
plt.savefig('./model_graphs/latent_space/Amazon_binary_latent_space_with_pca_{}.png'.format(id))
def sample_latent_space_var(vae_encoder, train_data, test_data, id, dataset_min, dataset_max, test_labels, dataset_type):
"""
Create a scatter plot of the latent space containing all test samples.
"""
# Predict latent train & test data
test_mean, test_log_var, z_test = vae_encoder.predict(test_data)
train_mean, train_log_var, z_train = vae_encoder.predict(train_data)
#np.save("PCA_Trials/Covariance_Test_Z_Samples.npy", z_test)
#np.save("PCA_Trials/Covariance_Test_Mean_Samples.npy", test_mean)
#np.save("PCA_Trials/Covariance_Test_Log_Var_Samples.npy", test_log_var)
train_mean_var = np.concatenate((train_mean, train_log_var), axis=1)
test_mean_var = np.concatenate((test_mean, test_log_var), axis=1)
np.save("PCA_Trials/83_PCA_Train.npy", train_mean_var)
np.save("PCA_Trials/83_PCA_Test.npy", test_mean_var)
print("Training data")
print(gdfgdfggd)
#print(dfsdsdgsdg)
#np.save("PCA_Trials/Covariance_Train_High_Dim_Latent_Space.npy", train_mean_var)
#np.save("PCA_Trials/Covariance_Test_High_Dim_Latent_Space.npy", test_mean_var)
# Apply scaling and tsne
sc = StandardScaler()
z_train_std = sc.fit_transform(train_mean_var)
#z_train_std = sc.fit_transform(train_log_var)
z_test_std = sc.transform(test_mean_var)
#z_test_std = sc.transform(test_log_var)
# Instantiate PCA
pca = PCA(n_components=2)
pca.fit(z_train_std)
z_test_pca = pca.transform(z_test_std)
np.save("/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/Synoptic_Latent_Spaces/2D_PCA_Diurnal_Interval_Composite_Anon_Ocean_Region_Latent_Space__{}".format(id), z_test_pca)
print("Made it to the save")
if dataset_type == "half_deep_convection":
colors = ["#FF4940", "#3D9AD1"]
print("made it here")
convection = np.squeeze(z_test_pca[np.where(test_labels == 0),:])
no_convection = np.squeeze(z_test_pca[np.where(test_labels == 1),:])
#fake = np.squeeze(z_test_tsne[np.where(test_labels == 2),:])
plt.scatter(x=convection[:, 0], y=convection[:, 1], c="#FF4940", s=0.4, label="No Convective Activity")
plt.scatter(x=no_convection[:, 0], y=no_convection[:, 1], c="#3D9AD1", s=0.4, label="Convective Activity")
#plt.scatter(x=fake[:, 0], y=fake[:, 1], c="yellow", s=0.4, label="Blue Noise")
plt.legend()
else:
#plt.scatter(x=z_test_tsne[:, 0], y=z_test_tsne[:, 1], c=test_labels, s=1)
plt.scatter(x=z_test_pca[:, 0], y=z_test_pca[:, 1], s=0.1)
plt.colorbar()
plt.savefig('./model_graphs/latent_space/Enthalpy_Covariance_PCA_Mean_Var_latent_space_with_pca_{}.png'.format(id))
def interpolate_points(p1, p2, n_steps=100):
"linear interpolation -- https://openreview.net/pdf?id=S1fQSiCcYm"
ratios = np.linspace(0, 1, num=n_steps)
vectors = list()
for ratio in ratios:
v = (1.0 - ratio) * p1 + ratio * p2
vectors.append(v)
return np.asarray(vectors)
def slerp(count, low, high):
"""Spherical interpolation. val has a range of 0 to 1."""
values = np.linspace(0, 1, num=count)
output_array = np.empty(shape=(count,low.size))
for i in range(len(values)):
val = values[i]
omega = np.arccos(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)))
so = np.sin(omega)
output_array[i,:] = np.sin((1.0-val)*omega) / so * low + np.sin(val*omega)/so * high
return output_array
#https://arxiv.org/pdf/1803.05428.pdf
#https://www.inference.vc/high-dimensional-gaussian-distributions-are-soap-bubble/
def original_slerp(val, low, high):
"""Spherical interpolation. val has a range of 0 to 1. https://github.com/dribnet/plat/blob/master/plat/interpolate.py"""
if val <= 0:
return low
elif val >= 1:
return high
elif np.allclose(low, high):
return low
omega = np.arccos(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)))
so = np.sin(omega)
return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega)/so * high
def numpy_slerp(t, p0, p1):
omega = np.arccos(np.dot(p0/np.linalg.norm(p0), p1/np.linalg.norm(p1)))
so = np.sin(omega)
return np.sin((1.0-t)*omega) / so * p0 + np.sin(t*omega)/so * p1
def latent_space_interpolation(vae, decoder, vae_encoder, train_data, test_data, id, dataset_min, dataset_max, test_labels, dataset_type):
sample_one = np.expand_dims(test_data[15880,:,:], axis=0)
sample_two = np.expand_dims(test_data[6548,:,:],axis=0)
test_mean_one, test_log_var_one, z_test_one = vae_encoder.predict(sample_one)
test_mean_two, test_log_var_two, z_test_two = vae_encoder.predict(sample_two)
count = 100
interpolated_images = np.empty(shape=(count,len(z_test_two[0])))
interpolated_orig_images = np.empty(shape=(count,len(sample_one[0])*len(sample_one[0][0])))
values = np.linspace(0, 1, num=count)
for i in range(count):
interpolated_images[i,:]= numpy_slerp(values[i], z_test_one.flatten(),z_test_two.flatten())
interpolated_orig_images[i,:]= numpy_slerp(values[i], sample_one.flatten(),sample_two.flatten())
reconstructed_Image_Series = decoder.predict(interpolated_images)
reconstructed_Image_finals = reconstructed_Image_Series[:,:3840]
np.save("/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/Interpolation_Data/203_Original_Images_W_Comp_15880_6548.npy", interpolated_orig_images)
np.save("/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/Interpolation_Data/203_Latent_Images_W_Comp_15880_6548.npy", interpolated_images)
np.save("/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/Interpolation_Data/203_Reconstructed_Images_W_Comp_15880_6548.npy", reconstructed_Image_finals)
print("Passed the saves")
interpolated_images.shape
num_images = 10
np.random.seed(42)
plt.figure(figsize=(30, 8))
for i, image_idx in enumerate(interpolated_images):
ax = plt.subplot(5, num_images, i + 1)
plt.imshow(interpolated_images[i].reshape(64, 16).T)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.set_title("Encoded: {}".format(i))
ax = plt.subplot(5, num_images,num_images+ i + 1)
reconstructed_image = decoder.predict(np.expand_dims(interpolated_images[i,:],axis=0))
plt.imshow(np.squeeze(reconstructed_image)[:3840].reshape(128,30).T)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.set_title("Latent: {}".format(i))
ax = plt.subplot(5, num_images,2*num_images+ i + 1)
plt.imshow(interpolated_orig_images[i].reshape(128,30).T)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.set_title("Image: {}".format(i))
plt.savefig("/fast/gmooers/gmooers_git/CBRAIN-CAM/MAPS/model_graphs/latent_space_interp/amazon_diurnal_trial.png")
def sample_frob_norm(vae, decoder, vae_encoder, train_data, test_data, id, dataset_min, dataset_max, test_labels, dataset_type):
"""
Create a scatter plot of the latent space containing all test samples.
"""
# Predict latent train & test data
test_mean, test_log_var, z_test = vae_encoder.predict(test_data)
print("made it here")
sample_mean_var = decoder.predict(z_test)
sample_mean = sample_mean_var[:, :128*30]
truths = np.reshape(test_data, (len(test_data),30*128))
Rough_Metric = f_norm(truths, sample_mean)
sess = tf.InteractiveSession()
RM = Rough_Metric.eval()
gc.collect()
print(RM.shape)
print(RM)
np.save("Saved_Data/Rough_Overall_FR_Norm__{}.npy".format(id), RM)
print("completed")
def generate_samples(decoder, dataset_min, dataset_max, latent_dim: int, id):
"""
Sample points from prior and send through decoder to get
sample images.
"""
# sample from prior
num_samples = 3
z = np.random.normal(size=(num_samples, latent_dim))
# Get output from decoder
sample_mean_var = decoder.predict(z)
# Extract mean and variance
sample_mean = sample_mean_var[:, :128*30]
sample_log_var = sample_mean_var[:, 128*30:]
fig, axs = plt.subplots(num_samples, 1)
recon_samples = []
for i in range(num_samples):
print(sample_mean[i])
print(sample_mean[i].shape)
# Sample from gaussian decoder outputs
recon_sample = np.random.multivariate_normal(sample_mean[i], np.exp(sample_log_var[i]) * np.identity(128*30))
# Unnormalize sample
recon_sample = np.interp(recon_sample, (0, 1), (dataset_min, dataset_max))
# Reshape
recon_sample = recon_sample.reshape((30, 128))
recon_samples.append(recon_sample)
vmin = np.min(recon_samples)
vmax = np.max(recon_samples)
for i in range(num_samples):
# Show image
sub_img = axs[i].imshow(recon_samples[i], cmap='coolwarm', vmin=vmin, vmax=vmax)
fig.colorbar(sub_img, ax=axs[i])
# Flip y-axis
axs[i].set_ylim(axs[i].get_ylim()[::-1])
# fig.colorbar(sub_img, ax=axs)
plt.tight_layout()
plt.savefig('./model_graphs/generated/generated_samples_{}.png'.format(id))
def main():
args = argument_parsing()
print("Command line args:", args)
f = open("./model_config/config_{}.json".format(args.id))
model_config = json.load(f)
f.close()
train_data = np.load(model_config["data"]["training_data_path"])
test_data = np.load(model_config["data"]["test_data_path"])
# test_labels = np.load(model_config["data"]["test_labels"])[:, 0, 0]
test_labels = np.load(model_config["data"]["test_labels"])
print("Test labels shape:", test_labels.shape, model_config["data"]["test_labels"])
dataset_max = np.load(model_config["data"]["max_scalar"])
dataset_min = np.load(model_config["data"]["min_scalar"])
print("dataset max", dataset_max)
print("dataset min", dataset_min)
img_width = train_data.shape[1]
img_height = train_data.shape[2]
print("Image shape:", img_width, img_height)
# Construct VAE Encoder
encoder_result = encoder_gen((img_width, img_height), model_config["encoder"], args.id)
# Construct VAE Decoder
vae_decoder = decoder_gen(
(img_width, img_height),
model_config["decoder"]
)
_, _, z = encoder_result.vae_encoder(encoder_result.inputs)
x_mu_var = vae_decoder(z)
vae = keras.Model(inputs=[encoder_result.inputs], outputs=[x_mu_var])
# load weights from file
vae.load_weights('./models/model_{}.th'.format(args.id))
print("weights loaded")
train_data = train_data.reshape(train_data.shape+(1,))
test_data = test_data.reshape(test_data.shape+(1,))
# get side by side plots of original vs. reconstructed
# sample_reconstructions(vae, train_data, test_data, args.id, dataset_max, dataset_min)
#reconstruct_targets(vae, test_data, [2, 15, 66 , 85, 94], args.id, dataset_max, dataset_min)
#reconstruct_targets_paper(vae, test_data, [23506, 66 , 23746], args.id, dataset_max, dataset_min)
#reconstruct_targets_paper(vae, test_data, [2, 15, 66 , 85, 94], args.id, dataset_max, dataset_min)
#sample_latent_space(encoder_result.vae_encoder, train_data, test_data, args.id, dataset_min, dataset_max, test_labels, args.dataset_type)
#sample_latent_space_var(encoder_result.vae_encoder, train_data, test_data, args.id, dataset_min, dataset_max, test_labels, args.dataset_type)
latent_space_interpolation(vae, vae_decoder, encoder_result.vae_encoder, train_data, test_data, args.id, dataset_min, dataset_max, test_labels, args.dataset_type)
#sample_frob_norm(vae, vae_decoder, encoder_result.vae_encoder, train_data, test_data, args.id, dataset_min, dataset_max, test_labels, args.dataset_type)
#generate_samples(vae_decoder, dataset_min, dataset_max, model_config["encoder"]["latent_dim"], args.id)
def argument_parsing():
parser = argparse.ArgumentParser()
parser.add_argument('--id', type=int, help='This option specifies the id of the config file to use to train the VAE.')
parser.add_argument('--dataset_type', type=str, help='Name of the dataset that model was trained on.')
args = parser.parse_args()
return args
if __name__ == "__main__":
main()
| en | 0.475202 | TODO # Sample reconstruction based on predicted mean and variance # recon_sample = np.random.multivariate_normal(sample_mean, np.exp(sample_log_var) * np.identity(128*30)) # Rescale original sample and reconstruction to original scale # Get min and max of original and reconstructed # Reshape reconstructed sample #print("y ticks", y_ticks) #axs[i,1].set_yticks([]) #if i < len(targets) - 2: #axs[i, 0].set_xticks([]) #axs[i, 1].set_xticks([]) # Hide x labels and tick labels for all but bottom plot. TODO # Sample reconstruction based on predicted mean and variance # recon_sample = np.random.multivariate_normal(sample_mean, np.exp(sample_log_var) * np.identity(128*30)) # Rescale original sample and reconstruction to original scale # Get min and max of original and reconstructed # Reshape reconstructed sample # Hide x labels and tick labels for all but bottom plot. TODO Create a scatter plot of the latent space containing all test samples. # Predict latent train & test data # Apply scaling and tsne # Instantiate PCA # Instantiate TSNE # Make plot of latent test data #plt.scatter(x=z_test_tsne[np.where(test_labels == 0), 0], y=z_test_tsne[np.where(test_labels == 0), 1], c=colors[0],s=1, label="Deep Convection") #plt.scatter(x=z_test_tsne[np.where(test_labels == 1), 0], y=z_test_tsne[np.where(test_labels == 1), 1], c=colors[1], s=1, label="Shallow Convection") #fake = np.squeeze(z_test_tsne[np.where(test_labels == 2),:]) #plt.scatter(x=fake[:, 0], y=fake[:, 1], c="yellow", s=0.4, label="White Noise") Create a scatter plot of the latent space containing all test samples. # Predict latent train & test data #np.save("PCA_Trials/Covariance_Test_Z_Samples.npy", z_test) #np.save("PCA_Trials/Covariance_Test_Mean_Samples.npy", test_mean) #np.save("PCA_Trials/Covariance_Test_Log_Var_Samples.npy", test_log_var) #print(dfsdsdgsdg) #np.save("PCA_Trials/Covariance_Train_High_Dim_Latent_Space.npy", train_mean_var) #np.save("PCA_Trials/Covariance_Test_High_Dim_Latent_Space.npy", test_mean_var) # Apply scaling and tsne #z_train_std = sc.fit_transform(train_log_var) #z_test_std = sc.transform(test_log_var) # Instantiate PCA #fake = np.squeeze(z_test_tsne[np.where(test_labels == 2),:]) #plt.scatter(x=fake[:, 0], y=fake[:, 1], c="yellow", s=0.4, label="Blue Noise") #plt.scatter(x=z_test_tsne[:, 0], y=z_test_tsne[:, 1], c=test_labels, s=1) Spherical interpolation. val has a range of 0 to 1. #https://arxiv.org/pdf/1803.05428.pdf #https://www.inference.vc/high-dimensional-gaussian-distributions-are-soap-bubble/ Spherical interpolation. val has a range of 0 to 1. https://github.com/dribnet/plat/blob/master/plat/interpolate.py Create a scatter plot of the latent space containing all test samples. # Predict latent train & test data Sample points from prior and send through decoder to get sample images. # sample from prior # Get output from decoder # Extract mean and variance # Sample from gaussian decoder outputs # Unnormalize sample # Reshape # Show image # Flip y-axis # fig.colorbar(sub_img, ax=axs) # test_labels = np.load(model_config["data"]["test_labels"])[:, 0, 0] # Construct VAE Encoder # Construct VAE Decoder # load weights from file # get side by side plots of original vs. reconstructed # sample_reconstructions(vae, train_data, test_data, args.id, dataset_max, dataset_min) #reconstruct_targets(vae, test_data, [2, 15, 66 , 85, 94], args.id, dataset_max, dataset_min) #reconstruct_targets_paper(vae, test_data, [23506, 66 , 23746], args.id, dataset_max, dataset_min) #reconstruct_targets_paper(vae, test_data, [2, 15, 66 , 85, 94], args.id, dataset_max, dataset_min) #sample_latent_space(encoder_result.vae_encoder, train_data, test_data, args.id, dataset_min, dataset_max, test_labels, args.dataset_type) #sample_latent_space_var(encoder_result.vae_encoder, train_data, test_data, args.id, dataset_min, dataset_max, test_labels, args.dataset_type) #sample_frob_norm(vae, vae_decoder, encoder_result.vae_encoder, train_data, test_data, args.id, dataset_min, dataset_max, test_labels, args.dataset_type) #generate_samples(vae_decoder, dataset_min, dataset_max, model_config["encoder"]["latent_dim"], args.id) | 2.074388 | 2 |
scripts/install/win/OpenKinect/freenect-examples/setup.py | ffsouza/SimpleCV | 2 | 6632929 | #!/usr/bin/env python
from distutils.core import setup
from distutils.extension import Extension
import re
def get_cython_version():
"""
Returns:
Version as a pair of ints (major, minor)
Raises:
ImportError: Can't load cython or find version
"""
import Cython.Compiler.Main
match = re.search('^([0-9]+)\.([0-9]+)',
Cython.Compiler.Main.Version.version)
try:
return map(int, match.groups())
except AttributeError:
raise ImportError
# Only use Cython if it is available, else just use the pre-generated files
try:
cython_version = get_cython_version()
# Requires Cython version 0.13 and up
if cython_version[0] == 0 and cython_version[1] < 13:
raise ImportError
from Cython.Distutils import build_ext
source_ext = '.pyx'
cmdclass = {'build_ext': build_ext}
except ImportError:
source_ext = '.c'
cmdclass = {}
ext_modules = [Extension("freenect", ["freenect" + source_ext],
libraries=['usb-1.0', 'freenect', 'freenect_sync'],
runtime_library_dirs=[ 'C:/Users/kscottz/Desktop/kinect/libusb-win32-bin-1.2.4.0/lib/msvc',
#'C:/Users/kscottz/Desktop/kinect/Pre-built.2/lib',
#
#'C:/Users/kscottz/Desktop/kinect/glut-3.7.6-bin', 'C:/Users/kscottz/Desktop/kinect/AugustBuild/libfreenect/build/lib/Release',
#'/usr/local/lib64',
#'/usr/lib/'
],
extra_compile_args=[
'-I','C:/Users/kscottz/Desktop/kinect/AugustBuild/libfreenect/include',
'-I','C:/Users/kscottz/Desktop/kinect/libusb-win32-bin-1.2.4.0/include',
'-I','C:/Users/kscottz/Desktop/kinect/AugustBuild/libfreenect/platform/windows',
'-I','C:/Users/kscottz/Desktop/kinect/AugustBuild/libfreenect/platform/windows/libusb10emu',
'-I','C:/Users/kscottz/Desktop/kinect/AugustBuild/libfreenect/src',
'-I','C:/Python27/Lib/site-packages/numpy/core/include',
'-I','C:/Program Files/Microsoft Visual Studio 10.0/VC/include',
'-I', '../../include/',
'-I','C:/Users/kscottz/Desktop/kinect/AugustBuild/libfreenect/wrappers/c_sync'
])]
setup(name='freenect',
cmdclass=cmdclass,
ext_modules=ext_modules)
| #!/usr/bin/env python
from distutils.core import setup
from distutils.extension import Extension
import re
def get_cython_version():
"""
Returns:
Version as a pair of ints (major, minor)
Raises:
ImportError: Can't load cython or find version
"""
import Cython.Compiler.Main
match = re.search('^([0-9]+)\.([0-9]+)',
Cython.Compiler.Main.Version.version)
try:
return map(int, match.groups())
except AttributeError:
raise ImportError
# Only use Cython if it is available, else just use the pre-generated files
try:
cython_version = get_cython_version()
# Requires Cython version 0.13 and up
if cython_version[0] == 0 and cython_version[1] < 13:
raise ImportError
from Cython.Distutils import build_ext
source_ext = '.pyx'
cmdclass = {'build_ext': build_ext}
except ImportError:
source_ext = '.c'
cmdclass = {}
ext_modules = [Extension("freenect", ["freenect" + source_ext],
libraries=['usb-1.0', 'freenect', 'freenect_sync'],
runtime_library_dirs=[ 'C:/Users/kscottz/Desktop/kinect/libusb-win32-bin-1.2.4.0/lib/msvc',
#'C:/Users/kscottz/Desktop/kinect/Pre-built.2/lib',
#
#'C:/Users/kscottz/Desktop/kinect/glut-3.7.6-bin', 'C:/Users/kscottz/Desktop/kinect/AugustBuild/libfreenect/build/lib/Release',
#'/usr/local/lib64',
#'/usr/lib/'
],
extra_compile_args=[
'-I','C:/Users/kscottz/Desktop/kinect/AugustBuild/libfreenect/include',
'-I','C:/Users/kscottz/Desktop/kinect/libusb-win32-bin-1.2.4.0/include',
'-I','C:/Users/kscottz/Desktop/kinect/AugustBuild/libfreenect/platform/windows',
'-I','C:/Users/kscottz/Desktop/kinect/AugustBuild/libfreenect/platform/windows/libusb10emu',
'-I','C:/Users/kscottz/Desktop/kinect/AugustBuild/libfreenect/src',
'-I','C:/Python27/Lib/site-packages/numpy/core/include',
'-I','C:/Program Files/Microsoft Visual Studio 10.0/VC/include',
'-I', '../../include/',
'-I','C:/Users/kscottz/Desktop/kinect/AugustBuild/libfreenect/wrappers/c_sync'
])]
setup(name='freenect',
cmdclass=cmdclass,
ext_modules=ext_modules)
| en | 0.630395 | #!/usr/bin/env python Returns:
Version as a pair of ints (major, minor)
Raises:
ImportError: Can't load cython or find version # Only use Cython if it is available, else just use the pre-generated files # Requires Cython version 0.13 and up #'C:/Users/kscottz/Desktop/kinect/Pre-built.2/lib', # #'C:/Users/kscottz/Desktop/kinect/glut-3.7.6-bin', 'C:/Users/kscottz/Desktop/kinect/AugustBuild/libfreenect/build/lib/Release', #'/usr/local/lib64', #'/usr/lib/' | 2.141176 | 2 |
Concurrency/codeSample/Part11_Alternatives_to_Threads_and_Processes/generator.py | Chyi341152/pyConPaper | 1 | 6632930 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# generator.py
# A very simple example of using generators to implement a form of cooperative multitasking
def countdown_task(n):
while n > 0:
print(n)
yield
n -= 1
# A list of tasks to run
from collections import deque # list-like container with fast appends and pops on either end
tasks = deque([
countdown_task(5), # Each task is a generator function
countdown_task(10),
countdown_task(15)
])
def scheduler(tasks):
while tasks:
task = tasks.popleft()
try:
next(task) # Run to the next yield
tasks.append(task) # Reschedule
except StopIteration:
pass
# Run it
scheduler(tasks) | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# generator.py
# A very simple example of using generators to implement a form of cooperative multitasking
def countdown_task(n):
while n > 0:
print(n)
yield
n -= 1
# A list of tasks to run
from collections import deque # list-like container with fast appends and pops on either end
tasks = deque([
countdown_task(5), # Each task is a generator function
countdown_task(10),
countdown_task(15)
])
def scheduler(tasks):
while tasks:
task = tasks.popleft()
try:
next(task) # Run to the next yield
tasks.append(task) # Reschedule
except StopIteration:
pass
# Run it
scheduler(tasks) | en | 0.744852 | #!/usr/bin/env python3 # -*- coding:utf-8 -*- # generator.py # A very simple example of using generators to implement a form of cooperative multitasking # A list of tasks to run # list-like container with fast appends and pops on either end # Each task is a generator function # Run to the next yield # Reschedule # Run it | 4.152406 | 4 |
backend_django/posts/migrations/0015_postrating.py | Neo1277/blogDjangoAndReact | 0 | 6632931 | # Generated by Django 3.1 on 2021-05-22 15:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0014_auto_20210101_1839'),
]
operations = [
migrations.CreateModel(
name='PostRating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.CharField(choices=[('1', 'One star'), ('2', 'Two starst'), ('3', 'Three stars'), ('4', 'Three stars'), ('5', 'Five stars')], max_length=1)),
('datetime', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ratingus', to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ratingps', to='posts.post')),
],
),
]
| # Generated by Django 3.1 on 2021-05-22 15:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0014_auto_20210101_1839'),
]
operations = [
migrations.CreateModel(
name='PostRating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.CharField(choices=[('1', 'One star'), ('2', 'Two starst'), ('3', 'Three stars'), ('4', 'Three stars'), ('5', 'Five stars')], max_length=1)),
('datetime', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ratingus', to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ratingps', to='posts.post')),
],
),
]
| en | 0.830103 | # Generated by Django 3.1 on 2021-05-22 15:14 | 1.793839 | 2 |
dexp/processing/color/crop_resize_pad.py | haesleinhuepf/dexp | 16 | 6632932 | <reponame>haesleinhuepf/dexp<filename>dexp/processing/color/crop_resize_pad.py
from typing import Tuple, Union
from dexp.utils import xpArray
from dexp.utils.backends import Backend
def crop_resize_pad_color_image(
image: xpArray,
crop: Union[int, Tuple[int, ...], Tuple[Tuple[int, int], ...]] = None,
resize: Tuple[int, ...] = None,
resize_order: int = 3,
resize_mode: str = "constant",
pad_width: Tuple[Tuple[int, int], ...] = None,
pad_mode: str = "constant",
pad_color: Tuple[float, float, float, float] = (0, 0, 0, 0),
rgba_value_max: float = 255,
):
"""
Crops, resizes and then pad an RGB(A) image.
Parameters
----------
image: image to resize.
crop: Crop image by removing a given number of pixels/voxels per axis. For example: ((10,20),(10,20))
crops 10 pixels on the left for axis 0, 20 pixels from the right of axis 0, and the same for axis 2.
resize: After cropping, the image is resized to the given shape. If any entry in the tuple is -1 then that
position in the shape is automatically determined based on the existing shape to preserve aspect ratio.
resize_order: The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
resize_mode: optional The mode parameter determines how the input array is extended beyond its boundaries.
Can be: ‘reflect’, ‘constant’, ‘nearest’, ‘mirror’, ‘wrap’.
pad_width: After cropping and resizing, padding is performed.
The provided tuple is interpreted similarly to cropping.
pad_mode: Padding mode, see numpy.pad for the available modes.
pad_color: Padding color as tuple of normalised floats: (R,G,B,A). Default is transparent black.
rgba_value_max: max value for rgba values.
Returns
-------
Cropped, resized, and padded image.
"""
xp = Backend.get_xp_module()
sp = Backend.get_sp_module()
# Move to backend:
image = Backend.to_backend(image)
# Normalisation of crop parameter:
if crop is not None:
if type(crop) is int:
crop = (crop,) * (image.ndim - 1)
if type(crop[0]) is int:
crop = tuple((c, c) for c in crop)
# build the slice object to crop the image
slicing = tuple(slice(l if l > 0 else None, -r if r > 0 else None) for l, r in crop) + (slice(None),)
# Crop:
image = image[slicing]
# Normalise resize:
if resize is not None:
# computing resize factors:
factors = tuple(ns / s for ns, s in zip(resize, image.shape[:-1]))
# find all non negative factors:
factors_no_negatives = tuple(factor for factor in factors if factor > 0)
# compute the average (most case all factors are equal!)
avg_factor = sum(factors_no_negatives) / len(factors_no_negatives)
# we replace the negative values with the average:
factors = tuple((factor if factor > 0 else avg_factor) for factor in factors)
# handle channel dim:
factors = factors + (1,)
# Resizing:
image = sp.ndimage.zoom(input=image, zoom=factors, order=resize_order, mode=resize_mode)
# Number of channels:
nb_channels = image.shape[-1]
# Normalise pad_width:
if pad_width is not None:
# Adding a colored border:
padded_channels = []
for channel_index in range(nb_channels):
channel = image[..., channel_index]
value = pad_color[channel_index] * rgba_value_max
padded_channel = xp.pad(channel, pad_width=pad_width, mode=pad_mode, constant_values=value)
padded_channels.append(padded_channel)
# Stacking:
image = xp.stack(padded_channels, axis=-1)
return image
| from typing import Tuple, Union
from dexp.utils import xpArray
from dexp.utils.backends import Backend
def crop_resize_pad_color_image(
image: xpArray,
crop: Union[int, Tuple[int, ...], Tuple[Tuple[int, int], ...]] = None,
resize: Tuple[int, ...] = None,
resize_order: int = 3,
resize_mode: str = "constant",
pad_width: Tuple[Tuple[int, int], ...] = None,
pad_mode: str = "constant",
pad_color: Tuple[float, float, float, float] = (0, 0, 0, 0),
rgba_value_max: float = 255,
):
"""
Crops, resizes and then pad an RGB(A) image.
Parameters
----------
image: image to resize.
crop: Crop image by removing a given number of pixels/voxels per axis. For example: ((10,20),(10,20))
crops 10 pixels on the left for axis 0, 20 pixels from the right of axis 0, and the same for axis 2.
resize: After cropping, the image is resized to the given shape. If any entry in the tuple is -1 then that
position in the shape is automatically determined based on the existing shape to preserve aspect ratio.
resize_order: The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
resize_mode: optional The mode parameter determines how the input array is extended beyond its boundaries.
Can be: ‘reflect’, ‘constant’, ‘nearest’, ‘mirror’, ‘wrap’.
pad_width: After cropping and resizing, padding is performed.
The provided tuple is interpreted similarly to cropping.
pad_mode: Padding mode, see numpy.pad for the available modes.
pad_color: Padding color as tuple of normalised floats: (R,G,B,A). Default is transparent black.
rgba_value_max: max value for rgba values.
Returns
-------
Cropped, resized, and padded image.
"""
xp = Backend.get_xp_module()
sp = Backend.get_sp_module()
# Move to backend:
image = Backend.to_backend(image)
# Normalisation of crop parameter:
if crop is not None:
if type(crop) is int:
crop = (crop,) * (image.ndim - 1)
if type(crop[0]) is int:
crop = tuple((c, c) for c in crop)
# build the slice object to crop the image
slicing = tuple(slice(l if l > 0 else None, -r if r > 0 else None) for l, r in crop) + (slice(None),)
# Crop:
image = image[slicing]
# Normalise resize:
if resize is not None:
# computing resize factors:
factors = tuple(ns / s for ns, s in zip(resize, image.shape[:-1]))
# find all non negative factors:
factors_no_negatives = tuple(factor for factor in factors if factor > 0)
# compute the average (most case all factors are equal!)
avg_factor = sum(factors_no_negatives) / len(factors_no_negatives)
# we replace the negative values with the average:
factors = tuple((factor if factor > 0 else avg_factor) for factor in factors)
# handle channel dim:
factors = factors + (1,)
# Resizing:
image = sp.ndimage.zoom(input=image, zoom=factors, order=resize_order, mode=resize_mode)
# Number of channels:
nb_channels = image.shape[-1]
# Normalise pad_width:
if pad_width is not None:
# Adding a colored border:
padded_channels = []
for channel_index in range(nb_channels):
channel = image[..., channel_index]
value = pad_color[channel_index] * rgba_value_max
padded_channel = xp.pad(channel, pad_width=pad_width, mode=pad_mode, constant_values=value)
padded_channels.append(padded_channel)
# Stacking:
image = xp.stack(padded_channels, axis=-1)
return image | en | 0.757597 | Crops, resizes and then pad an RGB(A) image. Parameters ---------- image: image to resize. crop: Crop image by removing a given number of pixels/voxels per axis. For example: ((10,20),(10,20)) crops 10 pixels on the left for axis 0, 20 pixels from the right of axis 0, and the same for axis 2. resize: After cropping, the image is resized to the given shape. If any entry in the tuple is -1 then that position in the shape is automatically determined based on the existing shape to preserve aspect ratio. resize_order: The order of the spline interpolation, default is 3. The order has to be in the range 0-5. resize_mode: optional The mode parameter determines how the input array is extended beyond its boundaries. Can be: ‘reflect’, ‘constant’, ‘nearest’, ‘mirror’, ‘wrap’. pad_width: After cropping and resizing, padding is performed. The provided tuple is interpreted similarly to cropping. pad_mode: Padding mode, see numpy.pad for the available modes. pad_color: Padding color as tuple of normalised floats: (R,G,B,A). Default is transparent black. rgba_value_max: max value for rgba values. Returns ------- Cropped, resized, and padded image. # Move to backend: # Normalisation of crop parameter: # build the slice object to crop the image # Crop: # Normalise resize: # computing resize factors: # find all non negative factors: # compute the average (most case all factors are equal!) # we replace the negative values with the average: # handle channel dim: # Resizing: # Number of channels: # Normalise pad_width: # Adding a colored border: # Stacking: | 2.955824 | 3 |
src/Interpreter/frame.py | Sword-And-Rose/Simple-Interpreter | 1 | 6632933 | from Semantic.symbol import *
import logging
class Frame(object):
def __init__(self, scope, enclosing_frame=None):
self.scope = scope
self.data = {}
self.enclosing_frame = enclosing_frame
self.return_value = None
def set(self, name, value):
if name == self.scope.scope_name:
self.return_value = value
return
symbol = self.scope.lookup(name, True)
if symbol is None:
self.enclosing_frame.set(name, value)
else:
logging.debug(f' set {symbol} to {value}')
self.data[name] = value
def get(self, name):
symbol = self.scope.lookup(name, True)
if symbol is None:
return self.enclosing_frame.get(name)
return self.data.get(name)
| from Semantic.symbol import *
import logging
class Frame(object):
def __init__(self, scope, enclosing_frame=None):
self.scope = scope
self.data = {}
self.enclosing_frame = enclosing_frame
self.return_value = None
def set(self, name, value):
if name == self.scope.scope_name:
self.return_value = value
return
symbol = self.scope.lookup(name, True)
if symbol is None:
self.enclosing_frame.set(name, value)
else:
logging.debug(f' set {symbol} to {value}')
self.data[name] = value
def get(self, name):
symbol = self.scope.lookup(name, True)
if symbol is None:
return self.enclosing_frame.get(name)
return self.data.get(name)
| none | 1 | 2.723328 | 3 |
|
tests/integration-tests/tests/ad_integration/cluster_user.py | enrico-usai/cfncluster | 0 | 6632934 | import logging
import os
from assertpy import assert_that
from paramiko import AutoAddPolicy, SSHClient
from remote_command_executor import RemoteCommandExecutor
from utils import run_command
class ClusterUser:
"""Class to represent a cluster user in a multi-user environment."""
def __init__(
self,
user_num,
test_datadir,
cluster,
scheduler,
default_user_remote_command_executor,
password,
scheduler_commands_factory,
):
self._default_user_remote_command_executor = default_user_remote_command_executor
self.cluster = cluster
self.scheduler = scheduler
self.user_num = user_num # TODO: don't need to keep this?
self.alias = f"PclusterUser{user_num}"
self.ssh_keypair_path_prefix = str(test_datadir / self.alias)
self.ssh_private_key_path = self.ssh_keypair_path_prefix
self.ssh_public_key_path = f"{self.ssh_private_key_path}.pub"
# TODO: randomly generate this. It's hardcoded here because it's also hard-coded in the script
# that creates users as part of the directory stack.
self.password = password
self._personalized_remote_command_executor = RemoteCommandExecutor(
self.cluster, username=self.alias, alternate_ssh_key=self.ssh_private_key_path
)
self._personalized_scheduler_commands = scheduler_commands_factory(self._personalized_remote_command_executor)
self.validate_password_auth_and_automatic_homedir_creation()
self._configure_public_ssh_keys()
def _generate_ssh_keypair(self):
"""Create an RSA SSH keypair for the user."""
logging.info("Creating SSH keypair for user %s", self.alias)
cmd = [
"ssh-keygen",
"-q",
"-f",
self.ssh_keypair_path_prefix,
"-t",
"rsa",
"-N",
"",
"-C",
f"multi-user integ test {self.alias}",
]
run_command(cmd)
def copy_public_ssh_key_to_authorized_keys(self):
"""Copy user's public SSH key to authorized keys file on cluster's head node."""
user_home_dir = f"/home/{self.alias}"
user_ssh_dir = f"{user_home_dir}/.ssh"
public_key_basename = os.path.basename(self.ssh_public_key_path)
authorized_keys_path = f"{user_ssh_dir}/authorized_keys"
cmd = " && ".join(
[
f"sudo mkdir -p {user_ssh_dir}",
f"sudo chmod 700 {user_ssh_dir}",
f"cat {public_key_basename} | sudo tee -a {authorized_keys_path}",
f"sudo chmod 644 {authorized_keys_path}",
f"sudo chown -R {self.alias} {user_home_dir}",
]
)
self._default_user_remote_command_executor.run_remote_command(cmd, additional_files=[self.ssh_public_key_path])
def _configure_public_ssh_keys(self):
self._generate_ssh_keypair()
self.copy_public_ssh_key_to_authorized_keys()
def submit_script(self, script, **submit_command_kwargs):
"""Wrapper around SchedulerCommand's submit_script method."""
return self._personalized_scheduler_commands.submit_script(script, **submit_command_kwargs)
def run_remote_command(self, command, **submit_command_kwargs):
"""Wrapper around RemoteCommandExecutor's run_command method."""
return self._personalized_remote_command_executor.run_remote_command(command, **submit_command_kwargs)
def assert_job_submitted(self, stdout):
"""Wrapper around SchedulerCommand's assert_job_submitted method."""
return self._personalized_scheduler_commands.assert_job_submitted(stdout)
def wait_job_completed(self, job_id):
"""Wrapper around SchedulerCommand's wait_job_completed method."""
return self._personalized_scheduler_commands.wait_job_completed(job_id)
def assert_job_succeeded(self, job_id):
"""Wrapper around SchedulerCommand's assert_job_succeded method."""
self._personalized_scheduler_commands.assert_job_succeeded(job_id)
def cleanup(self):
"""Cleanup resources associated with this user."""
user_home_dir = f"/home/{self.alias}"
logging.info("Removing home directory for user %s (%s)", self.alias, user_home_dir)
self._default_user_remote_command_executor.run_remote_command(f"sudo rm -rf {user_home_dir}")
def ssh_connect(self, port=22):
"""Establish a SSH connection to the cluster head node with the current user."""
ssh = SSHClient()
ssh.set_missing_host_key_policy(AutoAddPolicy())
ssh.connect(self.cluster.head_node_ip, port, self.alias, self.password, allow_agent=False, look_for_keys=False)
return ssh
def validate_password_auth_and_automatic_homedir_creation(self, port=22):
"""Ensure password can be used to login to cluster and that user's home directory is created."""
ssh = self.ssh_connect()
homedir = f"/home/{self.alias}"
command = f"[ -d {homedir} ] || echo failure"
logging.info(
"Verifying home directory for user %s is automatically created at %s before running command: %s",
self.alias,
homedir,
command,
)
_, stdout, stderr = ssh.exec_command(command)
stdout_str = stdout.read().decode()
stderr_str = stderr.read().decode()
logging.info("Output from command %s\nstdout:\n%s\nstderr:\n%s", command, stdout_str, stderr_str)
assert_that(stdout.read().decode()).does_not_contain("failure")
def reset_stateful_connection_objects(self, default_user_remote_command_executor, scheduler_commands_factory):
"""Reset objects that might maintain an open SSH connection."""
del self._default_user_remote_command_executor
del self._personalized_remote_command_executor
del self._personalized_scheduler_commands
self._default_user_remote_command_executor = default_user_remote_command_executor
self._personalized_remote_command_executor = RemoteCommandExecutor(
self.cluster, username=self.alias, alternate_ssh_key=self.ssh_private_key_path
)
self._personalized_scheduler_commands = scheduler_commands_factory(self._personalized_remote_command_executor)
def remote_command_executor(self):
"""Get remote command executor."""
return self._personalized_remote_command_executor
def scheduler_commands(self):
"""Get scheduler commands."""
return self._personalized_scheduler_commands
| import logging
import os
from assertpy import assert_that
from paramiko import AutoAddPolicy, SSHClient
from remote_command_executor import RemoteCommandExecutor
from utils import run_command
class ClusterUser:
"""Class to represent a cluster user in a multi-user environment."""
def __init__(
self,
user_num,
test_datadir,
cluster,
scheduler,
default_user_remote_command_executor,
password,
scheduler_commands_factory,
):
self._default_user_remote_command_executor = default_user_remote_command_executor
self.cluster = cluster
self.scheduler = scheduler
self.user_num = user_num # TODO: don't need to keep this?
self.alias = f"PclusterUser{user_num}"
self.ssh_keypair_path_prefix = str(test_datadir / self.alias)
self.ssh_private_key_path = self.ssh_keypair_path_prefix
self.ssh_public_key_path = f"{self.ssh_private_key_path}.pub"
# TODO: randomly generate this. It's hardcoded here because it's also hard-coded in the script
# that creates users as part of the directory stack.
self.password = password
self._personalized_remote_command_executor = RemoteCommandExecutor(
self.cluster, username=self.alias, alternate_ssh_key=self.ssh_private_key_path
)
self._personalized_scheduler_commands = scheduler_commands_factory(self._personalized_remote_command_executor)
self.validate_password_auth_and_automatic_homedir_creation()
self._configure_public_ssh_keys()
def _generate_ssh_keypair(self):
"""Create an RSA SSH keypair for the user."""
logging.info("Creating SSH keypair for user %s", self.alias)
cmd = [
"ssh-keygen",
"-q",
"-f",
self.ssh_keypair_path_prefix,
"-t",
"rsa",
"-N",
"",
"-C",
f"multi-user integ test {self.alias}",
]
run_command(cmd)
def copy_public_ssh_key_to_authorized_keys(self):
"""Copy user's public SSH key to authorized keys file on cluster's head node."""
user_home_dir = f"/home/{self.alias}"
user_ssh_dir = f"{user_home_dir}/.ssh"
public_key_basename = os.path.basename(self.ssh_public_key_path)
authorized_keys_path = f"{user_ssh_dir}/authorized_keys"
cmd = " && ".join(
[
f"sudo mkdir -p {user_ssh_dir}",
f"sudo chmod 700 {user_ssh_dir}",
f"cat {public_key_basename} | sudo tee -a {authorized_keys_path}",
f"sudo chmod 644 {authorized_keys_path}",
f"sudo chown -R {self.alias} {user_home_dir}",
]
)
self._default_user_remote_command_executor.run_remote_command(cmd, additional_files=[self.ssh_public_key_path])
def _configure_public_ssh_keys(self):
self._generate_ssh_keypair()
self.copy_public_ssh_key_to_authorized_keys()
def submit_script(self, script, **submit_command_kwargs):
"""Wrapper around SchedulerCommand's submit_script method."""
return self._personalized_scheduler_commands.submit_script(script, **submit_command_kwargs)
def run_remote_command(self, command, **submit_command_kwargs):
"""Wrapper around RemoteCommandExecutor's run_command method."""
return self._personalized_remote_command_executor.run_remote_command(command, **submit_command_kwargs)
def assert_job_submitted(self, stdout):
"""Wrapper around SchedulerCommand's assert_job_submitted method."""
return self._personalized_scheduler_commands.assert_job_submitted(stdout)
def wait_job_completed(self, job_id):
"""Wrapper around SchedulerCommand's wait_job_completed method."""
return self._personalized_scheduler_commands.wait_job_completed(job_id)
def assert_job_succeeded(self, job_id):
"""Wrapper around SchedulerCommand's assert_job_succeded method."""
self._personalized_scheduler_commands.assert_job_succeeded(job_id)
def cleanup(self):
"""Cleanup resources associated with this user."""
user_home_dir = f"/home/{self.alias}"
logging.info("Removing home directory for user %s (%s)", self.alias, user_home_dir)
self._default_user_remote_command_executor.run_remote_command(f"sudo rm -rf {user_home_dir}")
def ssh_connect(self, port=22):
"""Establish a SSH connection to the cluster head node with the current user."""
ssh = SSHClient()
ssh.set_missing_host_key_policy(AutoAddPolicy())
ssh.connect(self.cluster.head_node_ip, port, self.alias, self.password, allow_agent=False, look_for_keys=False)
return ssh
def validate_password_auth_and_automatic_homedir_creation(self, port=22):
"""Ensure password can be used to login to cluster and that user's home directory is created."""
ssh = self.ssh_connect()
homedir = f"/home/{self.alias}"
command = f"[ -d {homedir} ] || echo failure"
logging.info(
"Verifying home directory for user %s is automatically created at %s before running command: %s",
self.alias,
homedir,
command,
)
_, stdout, stderr = ssh.exec_command(command)
stdout_str = stdout.read().decode()
stderr_str = stderr.read().decode()
logging.info("Output from command %s\nstdout:\n%s\nstderr:\n%s", command, stdout_str, stderr_str)
assert_that(stdout.read().decode()).does_not_contain("failure")
def reset_stateful_connection_objects(self, default_user_remote_command_executor, scheduler_commands_factory):
"""Reset objects that might maintain an open SSH connection."""
del self._default_user_remote_command_executor
del self._personalized_remote_command_executor
del self._personalized_scheduler_commands
self._default_user_remote_command_executor = default_user_remote_command_executor
self._personalized_remote_command_executor = RemoteCommandExecutor(
self.cluster, username=self.alias, alternate_ssh_key=self.ssh_private_key_path
)
self._personalized_scheduler_commands = scheduler_commands_factory(self._personalized_remote_command_executor)
def remote_command_executor(self):
"""Get remote command executor."""
return self._personalized_remote_command_executor
def scheduler_commands(self):
"""Get scheduler commands."""
return self._personalized_scheduler_commands
| en | 0.891967 | Class to represent a cluster user in a multi-user environment. # TODO: don't need to keep this? # TODO: randomly generate this. It's hardcoded here because it's also hard-coded in the script # that creates users as part of the directory stack. Create an RSA SSH keypair for the user. Copy user's public SSH key to authorized keys file on cluster's head node. Wrapper around SchedulerCommand's submit_script method. Wrapper around RemoteCommandExecutor's run_command method. Wrapper around SchedulerCommand's assert_job_submitted method. Wrapper around SchedulerCommand's wait_job_completed method. Wrapper around SchedulerCommand's assert_job_succeded method. Cleanup resources associated with this user. Establish a SSH connection to the cluster head node with the current user. Ensure password can be used to login to cluster and that user's home directory is created. Reset objects that might maintain an open SSH connection. Get remote command executor. Get scheduler commands. | 2.522488 | 3 |
beast/physicsmodel/tests/test_stellar_prior_weights.py | cmurray-astro/beast | 0 | 6632935 | import numpy as np
from beast.physicsmodel.prior_weights_stars import (
compute_distance_prior_weights,
compute_age_prior_weights,
compute_mass_prior_weights,
compute_metallicity_prior_weights,
imf_kroupa,
)
def test_flat_age_prior_weights():
"""
Test for flat age prior
"""
log_age = np.array([6.0, 7.0, 8.0, 9.0, 10.0])
log_age_prior_model = {"name": "flat"}
log_age_prior = compute_age_prior_weights(log_age, log_age_prior_model)
expected_log_age_prior = [1, 1, 1, 1, 1]
np.testing.assert_allclose(
log_age_prior, expected_log_age_prior, err_msg=("Flat age prior error")
)
def test_flat_log_age_prior_weights():
"""
Test for flat log age prior
"""
log_age = np.array([6.0, 7.0, 8.0, 9.0, 10.0])
log_age_prior_model = {"name": "flat_log"}
log_age_prior = compute_age_prior_weights(log_age, log_age_prior_model)
expected_log_age_prior = [
4.500045e00,
4.500045e-01,
4.500045e-02,
4.500045e-03,
4.500045e-04,
]
np.testing.assert_allclose(
log_age_prior, expected_log_age_prior, err_msg=("Flat log, log age prior error")
)
def test_bins_histo_age_prior_weights():
"""
Test for bin histogram age prior
"""
log_age = np.array([7.0, 8.0, 9.0])
log_age_prior_model = {
"name": "bins_histo",
"logages": [6.0, 7.0, 8.0, 9.0, 10.0],
"values": [1.0, 2.0, 1.0, 5.0, 3.0],
}
log_age_prior = compute_age_prior_weights(log_age, log_age_prior_model)
expected_log_age_prior = [0.75, 0.375, 1.875]
np.testing.assert_allclose(
log_age_prior,
expected_log_age_prior,
err_msg=("Bin histogram log age prior error"),
)
def test_bins_interp_age_prior_weights():
"""
Test for bin interpolation age prior
"""
log_age = np.array([6.0, 7.0, 8.0, 9.0, 10.0])
log_age_prior_model = {
"name": "bins_interp",
"logages": [6.0, 7.0, 8.0, 9.0, 10.0],
"values": [1.0, 2.0, 1.0, 5.0, 3.0],
}
log_age_prior = compute_age_prior_weights(log_age, log_age_prior_model)
expected_log_age_prior = [0.41666667, 0.83333333, 0.41666667, 2.08333333, 1.25]
np.testing.assert_allclose(
log_age_prior,
expected_log_age_prior,
err_msg=("Bin histogram log age prior error"),
)
def test_exp_age_prior_weights():
"""
Test for exponential age prior with a tau = 0.1
"""
log_age = np.array([6.0, 7.0, 8.0, 9.0, 10.0])
log_age_prior_model = {"name": "exp", "tau": 0.1}
log_age_prior = compute_age_prior_weights(log_age, log_age_prior_model)
expected_log_age_prior = [
2.18765367e00,
1.99936491e00,
8.12881110e-01,
1.00317499e-04,
8.22002849e-44,
]
np.testing.assert_allclose(
log_age_prior,
expected_log_age_prior,
err_msg=("Exponential log age prior error"),
)
def test_imf_kroupa():
"""
Test for creating kroupa IMF
"""
mass = np.array([0.1, 1, 2, 3, 4, 50])
imf = imf_kroupa(mass)
expected_imf = [
3.99052463e01,
1.00000000e00,
2.03063099e-01,
7.99136770e-02,
4.12346222e-02,
1.23699798e-04,
]
np.testing.assert_allclose(
imf, expected_imf, err_msg=("Kroupa IMF calculation error")
)
def test_kroupa_mass_prior_weight():
"""
Test the kroupa mass prior
"""
mass = np.array([1, 2, 3, 4, 5])
mass_prior_model = {"name": "kroupa"}
weights = compute_mass_prior_weights(mass, mass_prior_model)
expected_weights = [3.97740709, 0.60861986, 0.22874078, 0.11618704, 0.06904523]
np.testing.assert_allclose(
weights,
expected_weights,
err_msg=("Stellar mass prior weight error (kroupa IMF)"),
)
def test_salpeter_mass_prior_weight():
"""
Test the salpeter mass prior
"""
mass = np.array([1, 2, 3, 4, 5])
mass_prior_model = {"name": "salpeter"}
weights = compute_mass_prior_weights(mass, mass_prior_model)
expected_weights = [4.02338441, 0.58842044, 0.21633931, 0.10825509, 0.06360075]
np.testing.assert_allclose(
weights,
expected_weights,
err_msg=("Stellar mass prior weight error (salpeter IMF)"),
)
def test_flat_mass_prior_weight():
"""
Test the flat mass prior
"""
mass = np.array([1, 2, 3, 4, 5])
mass_prior_model = {"name": "flat"}
weights = compute_mass_prior_weights(mass, mass_prior_model)
np.testing.assert_allclose(
weights,
np.full((len(weights)), 1.0),
err_msg=("Stellar mass prior weight error (flat IMF)"),
)
def test_flat_metallicity_prior_weight():
"""
Test the flat metallicity prior
"""
z = [10.0, 100.0, 1000.0]
z_prior_model = {"name": "flat"}
weights = compute_metallicity_prior_weights(z, z_prior_model)
np.testing.assert_allclose(
weights,
np.full((len(weights)), 1.0),
err_msg=("Stellar flat metallicity prior weight error"),
)
def test_flat_distance_prior_weight():
"""
Test the flat distance prior
"""
dists = [10.0, 100.0, 1000.0]
dist_prior_model = {"name": "flat"}
weights = compute_distance_prior_weights(dists, dist_prior_model)
np.testing.assert_allclose(
weights,
np.full((len(weights)), 1.0),
err_msg=("Stellar flat distance prior weight error"),
)
| import numpy as np
from beast.physicsmodel.prior_weights_stars import (
compute_distance_prior_weights,
compute_age_prior_weights,
compute_mass_prior_weights,
compute_metallicity_prior_weights,
imf_kroupa,
)
def test_flat_age_prior_weights():
"""
Test for flat age prior
"""
log_age = np.array([6.0, 7.0, 8.0, 9.0, 10.0])
log_age_prior_model = {"name": "flat"}
log_age_prior = compute_age_prior_weights(log_age, log_age_prior_model)
expected_log_age_prior = [1, 1, 1, 1, 1]
np.testing.assert_allclose(
log_age_prior, expected_log_age_prior, err_msg=("Flat age prior error")
)
def test_flat_log_age_prior_weights():
"""
Test for flat log age prior
"""
log_age = np.array([6.0, 7.0, 8.0, 9.0, 10.0])
log_age_prior_model = {"name": "flat_log"}
log_age_prior = compute_age_prior_weights(log_age, log_age_prior_model)
expected_log_age_prior = [
4.500045e00,
4.500045e-01,
4.500045e-02,
4.500045e-03,
4.500045e-04,
]
np.testing.assert_allclose(
log_age_prior, expected_log_age_prior, err_msg=("Flat log, log age prior error")
)
def test_bins_histo_age_prior_weights():
"""
Test for bin histogram age prior
"""
log_age = np.array([7.0, 8.0, 9.0])
log_age_prior_model = {
"name": "bins_histo",
"logages": [6.0, 7.0, 8.0, 9.0, 10.0],
"values": [1.0, 2.0, 1.0, 5.0, 3.0],
}
log_age_prior = compute_age_prior_weights(log_age, log_age_prior_model)
expected_log_age_prior = [0.75, 0.375, 1.875]
np.testing.assert_allclose(
log_age_prior,
expected_log_age_prior,
err_msg=("Bin histogram log age prior error"),
)
def test_bins_interp_age_prior_weights():
"""
Test for bin interpolation age prior
"""
log_age = np.array([6.0, 7.0, 8.0, 9.0, 10.0])
log_age_prior_model = {
"name": "bins_interp",
"logages": [6.0, 7.0, 8.0, 9.0, 10.0],
"values": [1.0, 2.0, 1.0, 5.0, 3.0],
}
log_age_prior = compute_age_prior_weights(log_age, log_age_prior_model)
expected_log_age_prior = [0.41666667, 0.83333333, 0.41666667, 2.08333333, 1.25]
np.testing.assert_allclose(
log_age_prior,
expected_log_age_prior,
err_msg=("Bin histogram log age prior error"),
)
def test_exp_age_prior_weights():
"""
Test for exponential age prior with a tau = 0.1
"""
log_age = np.array([6.0, 7.0, 8.0, 9.0, 10.0])
log_age_prior_model = {"name": "exp", "tau": 0.1}
log_age_prior = compute_age_prior_weights(log_age, log_age_prior_model)
expected_log_age_prior = [
2.18765367e00,
1.99936491e00,
8.12881110e-01,
1.00317499e-04,
8.22002849e-44,
]
np.testing.assert_allclose(
log_age_prior,
expected_log_age_prior,
err_msg=("Exponential log age prior error"),
)
def test_imf_kroupa():
"""
Test for creating kroupa IMF
"""
mass = np.array([0.1, 1, 2, 3, 4, 50])
imf = imf_kroupa(mass)
expected_imf = [
3.99052463e01,
1.00000000e00,
2.03063099e-01,
7.99136770e-02,
4.12346222e-02,
1.23699798e-04,
]
np.testing.assert_allclose(
imf, expected_imf, err_msg=("Kroupa IMF calculation error")
)
def test_kroupa_mass_prior_weight():
"""
Test the kroupa mass prior
"""
mass = np.array([1, 2, 3, 4, 5])
mass_prior_model = {"name": "kroupa"}
weights = compute_mass_prior_weights(mass, mass_prior_model)
expected_weights = [3.97740709, 0.60861986, 0.22874078, 0.11618704, 0.06904523]
np.testing.assert_allclose(
weights,
expected_weights,
err_msg=("Stellar mass prior weight error (kroupa IMF)"),
)
def test_salpeter_mass_prior_weight():
"""
Test the salpeter mass prior
"""
mass = np.array([1, 2, 3, 4, 5])
mass_prior_model = {"name": "salpeter"}
weights = compute_mass_prior_weights(mass, mass_prior_model)
expected_weights = [4.02338441, 0.58842044, 0.21633931, 0.10825509, 0.06360075]
np.testing.assert_allclose(
weights,
expected_weights,
err_msg=("Stellar mass prior weight error (salpeter IMF)"),
)
def test_flat_mass_prior_weight():
"""
Test the flat mass prior
"""
mass = np.array([1, 2, 3, 4, 5])
mass_prior_model = {"name": "flat"}
weights = compute_mass_prior_weights(mass, mass_prior_model)
np.testing.assert_allclose(
weights,
np.full((len(weights)), 1.0),
err_msg=("Stellar mass prior weight error (flat IMF)"),
)
def test_flat_metallicity_prior_weight():
"""
Test the flat metallicity prior
"""
z = [10.0, 100.0, 1000.0]
z_prior_model = {"name": "flat"}
weights = compute_metallicity_prior_weights(z, z_prior_model)
np.testing.assert_allclose(
weights,
np.full((len(weights)), 1.0),
err_msg=("Stellar flat metallicity prior weight error"),
)
def test_flat_distance_prior_weight():
"""
Test the flat distance prior
"""
dists = [10.0, 100.0, 1000.0]
dist_prior_model = {"name": "flat"}
weights = compute_distance_prior_weights(dists, dist_prior_model)
np.testing.assert_allclose(
weights,
np.full((len(weights)), 1.0),
err_msg=("Stellar flat distance prior weight error"),
)
| en | 0.716171 | Test for flat age prior Test for flat log age prior Test for bin histogram age prior Test for bin interpolation age prior Test for exponential age prior with a tau = 0.1 Test for creating kroupa IMF Test the kroupa mass prior Test the salpeter mass prior Test the flat mass prior Test the flat metallicity prior Test the flat distance prior | 2.353952 | 2 |
scan.py | tmwctw/nclpokemap | 139 | 6632936 | #!/usr/bin/env python3
import monocle.sanitized as conf
from asyncio import gather, set_event_loop_policy, Task, wait_for, TimeoutError
try:
if conf.UVLOOP:
from uvloop import EventLoopPolicy
set_event_loop_policy(EventLoopPolicy())
except ImportError:
pass
from multiprocessing.managers import BaseManager, DictProxy
from queue import Queue, Full
from argparse import ArgumentParser
from signal import signal, SIGINT, SIGTERM, SIG_IGN
from logging import getLogger, basicConfig, WARNING, INFO
from logging.handlers import RotatingFileHandler
from os.path import exists, join
from sys import platform
from time import monotonic, sleep
from sqlalchemy.exc import DBAPIError
from aiopogo import close_sessions, activate_hash_server
from monocle.shared import LOOP, get_logger, SessionManager, ACCOUNTS
from monocle.utils import get_address, dump_pickle
from monocle.worker import Worker
from monocle.overseer import Overseer
from monocle.db import FORT_CACHE
from monocle import altitudes, db_proc, spawns
class AccountManager(BaseManager):
pass
class CustomQueue(Queue):
def full_wait(self, maxsize=0, timeout=None):
'''Block until queue size falls below maxsize'''
starttime = monotonic()
with self.not_full:
if maxsize > 0:
if timeout is None:
while self._qsize() >= maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = monotonic() + timeout
while self._qsize() >= maxsize:
remaining = endtime - monotonic()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self.not_empty.notify()
endtime = monotonic()
return endtime - starttime
_captcha_queue = CustomQueue()
_extra_queue = Queue()
_worker_dict = {}
def get_captchas():
return _captcha_queue
def get_extras():
return _extra_queue
def get_workers():
return _worker_dict
def mgr_init():
signal(SIGINT, SIG_IGN)
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'--no-status-bar',
dest='status_bar',
help='Log to console instead of displaying status bar',
action='store_false'
)
parser.add_argument(
'--log-level',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
default=WARNING
)
parser.add_argument(
'--bootstrap',
dest='bootstrap',
help='Bootstrap even if spawns are known.',
action='store_true'
)
parser.add_argument(
'--no-pickle',
dest='pickle',
help='Do not load spawns from pickle',
action='store_false'
)
return parser.parse_args()
def configure_logger(filename='scan.log'):
if filename:
handlers = (RotatingFileHandler(filename, maxBytes=500000, backupCount=4),)
else:
handlers = None
basicConfig(
format='[{asctime}][{levelname:>8s}][{name}] {message}',
datefmt='%Y-%m-%d %X',
style='{',
level=INFO,
handlers=handlers
)
def exception_handler(loop, context):
try:
log = getLogger('eventloop')
log.error('A wild exception appeared!')
log.error(context)
except Exception:
print('Exception in exception handler.')
def cleanup(overseer, manager):
try:
overseer.print_handle.cancel()
overseer.running = False
print('Exiting, please wait until all tasks finish')
log = get_logger('cleanup')
print('Finishing tasks...')
LOOP.create_task(overseer.exit_progress())
pending = gather(*Task.all_tasks(loop=LOOP), return_exceptions=True)
try:
LOOP.run_until_complete(wait_for(pending, 40))
except TimeoutError as e:
print('Coroutine completion timed out, moving on.')
except Exception as e:
log = get_logger('cleanup')
log.exception('A wild {} appeared during exit!', e.__class__.__name__)
db_proc.stop()
overseer.refresh_dict()
print('Dumping pickles...')
dump_pickle('accounts', ACCOUNTS)
FORT_CACHE.pickle()
altitudes.pickle()
if conf.CACHE_CELLS:
dump_pickle('cells', Worker.cells)
spawns.pickle()
while not db_proc.queue.empty():
pending = db_proc.queue.qsize()
# Spaces at the end are important, as they clear previously printed
# output - \r doesn't clean whole line
print('{} DB items pending '.format(pending), end='\r')
sleep(.5)
finally:
print('Closing pipes, sessions, and event loop...')
manager.shutdown()
SessionManager.close()
close_sessions()
LOOP.close()
print('Done.')
def main():
args = parse_args()
log = get_logger()
if args.status_bar:
configure_logger(filename=join(conf.DIRECTORY, 'scan.log'))
log.info('-' * 37)
log.info('Starting up!')
else:
configure_logger(filename=None)
log.setLevel(args.log_level)
AccountManager.register('captcha_queue', callable=get_captchas)
AccountManager.register('extra_queue', callable=get_extras)
if conf.MAP_WORKERS:
AccountManager.register('worker_dict', callable=get_workers,
proxytype=DictProxy)
address = get_address()
manager = AccountManager(address=address, authkey=conf.AUTHKEY)
try:
manager.start(mgr_init)
except (OSError, EOFError) as e:
if platform == 'win32' or not isinstance(address, str):
raise OSError('Another instance is running with the same manager address. Stop that process or change your MANAGER_ADDRESS.') from e
else:
raise OSError('Another instance is running with the same socket. Stop that process or: rm {}'.format(address)) from e
LOOP.set_exception_handler(exception_handler)
overseer = Overseer(manager)
overseer.start(args.status_bar)
launcher = LOOP.create_task(overseer.launch(args.bootstrap, args.pickle))
activate_hash_server(conf.HASH_KEY)
if platform != 'win32':
LOOP.add_signal_handler(SIGINT, launcher.cancel)
LOOP.add_signal_handler(SIGTERM, launcher.cancel)
try:
LOOP.run_until_complete(launcher)
except (KeyboardInterrupt, SystemExit):
launcher.cancel()
finally:
cleanup(overseer, manager)
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
import monocle.sanitized as conf
from asyncio import gather, set_event_loop_policy, Task, wait_for, TimeoutError
try:
if conf.UVLOOP:
from uvloop import EventLoopPolicy
set_event_loop_policy(EventLoopPolicy())
except ImportError:
pass
from multiprocessing.managers import BaseManager, DictProxy
from queue import Queue, Full
from argparse import ArgumentParser
from signal import signal, SIGINT, SIGTERM, SIG_IGN
from logging import getLogger, basicConfig, WARNING, INFO
from logging.handlers import RotatingFileHandler
from os.path import exists, join
from sys import platform
from time import monotonic, sleep
from sqlalchemy.exc import DBAPIError
from aiopogo import close_sessions, activate_hash_server
from monocle.shared import LOOP, get_logger, SessionManager, ACCOUNTS
from monocle.utils import get_address, dump_pickle
from monocle.worker import Worker
from monocle.overseer import Overseer
from monocle.db import FORT_CACHE
from monocle import altitudes, db_proc, spawns
class AccountManager(BaseManager):
pass
class CustomQueue(Queue):
def full_wait(self, maxsize=0, timeout=None):
'''Block until queue size falls below maxsize'''
starttime = monotonic()
with self.not_full:
if maxsize > 0:
if timeout is None:
while self._qsize() >= maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = monotonic() + timeout
while self._qsize() >= maxsize:
remaining = endtime - monotonic()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self.not_empty.notify()
endtime = monotonic()
return endtime - starttime
_captcha_queue = CustomQueue()
_extra_queue = Queue()
_worker_dict = {}
def get_captchas():
return _captcha_queue
def get_extras():
return _extra_queue
def get_workers():
return _worker_dict
def mgr_init():
signal(SIGINT, SIG_IGN)
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'--no-status-bar',
dest='status_bar',
help='Log to console instead of displaying status bar',
action='store_false'
)
parser.add_argument(
'--log-level',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
default=WARNING
)
parser.add_argument(
'--bootstrap',
dest='bootstrap',
help='Bootstrap even if spawns are known.',
action='store_true'
)
parser.add_argument(
'--no-pickle',
dest='pickle',
help='Do not load spawns from pickle',
action='store_false'
)
return parser.parse_args()
def configure_logger(filename='scan.log'):
if filename:
handlers = (RotatingFileHandler(filename, maxBytes=500000, backupCount=4),)
else:
handlers = None
basicConfig(
format='[{asctime}][{levelname:>8s}][{name}] {message}',
datefmt='%Y-%m-%d %X',
style='{',
level=INFO,
handlers=handlers
)
def exception_handler(loop, context):
try:
log = getLogger('eventloop')
log.error('A wild exception appeared!')
log.error(context)
except Exception:
print('Exception in exception handler.')
def cleanup(overseer, manager):
try:
overseer.print_handle.cancel()
overseer.running = False
print('Exiting, please wait until all tasks finish')
log = get_logger('cleanup')
print('Finishing tasks...')
LOOP.create_task(overseer.exit_progress())
pending = gather(*Task.all_tasks(loop=LOOP), return_exceptions=True)
try:
LOOP.run_until_complete(wait_for(pending, 40))
except TimeoutError as e:
print('Coroutine completion timed out, moving on.')
except Exception as e:
log = get_logger('cleanup')
log.exception('A wild {} appeared during exit!', e.__class__.__name__)
db_proc.stop()
overseer.refresh_dict()
print('Dumping pickles...')
dump_pickle('accounts', ACCOUNTS)
FORT_CACHE.pickle()
altitudes.pickle()
if conf.CACHE_CELLS:
dump_pickle('cells', Worker.cells)
spawns.pickle()
while not db_proc.queue.empty():
pending = db_proc.queue.qsize()
# Spaces at the end are important, as they clear previously printed
# output - \r doesn't clean whole line
print('{} DB items pending '.format(pending), end='\r')
sleep(.5)
finally:
print('Closing pipes, sessions, and event loop...')
manager.shutdown()
SessionManager.close()
close_sessions()
LOOP.close()
print('Done.')
def main():
args = parse_args()
log = get_logger()
if args.status_bar:
configure_logger(filename=join(conf.DIRECTORY, 'scan.log'))
log.info('-' * 37)
log.info('Starting up!')
else:
configure_logger(filename=None)
log.setLevel(args.log_level)
AccountManager.register('captcha_queue', callable=get_captchas)
AccountManager.register('extra_queue', callable=get_extras)
if conf.MAP_WORKERS:
AccountManager.register('worker_dict', callable=get_workers,
proxytype=DictProxy)
address = get_address()
manager = AccountManager(address=address, authkey=conf.AUTHKEY)
try:
manager.start(mgr_init)
except (OSError, EOFError) as e:
if platform == 'win32' or not isinstance(address, str):
raise OSError('Another instance is running with the same manager address. Stop that process or change your MANAGER_ADDRESS.') from e
else:
raise OSError('Another instance is running with the same socket. Stop that process or: rm {}'.format(address)) from e
LOOP.set_exception_handler(exception_handler)
overseer = Overseer(manager)
overseer.start(args.status_bar)
launcher = LOOP.create_task(overseer.launch(args.bootstrap, args.pickle))
activate_hash_server(conf.HASH_KEY)
if platform != 'win32':
LOOP.add_signal_handler(SIGINT, launcher.cancel)
LOOP.add_signal_handler(SIGTERM, launcher.cancel)
try:
LOOP.run_until_complete(launcher)
except (KeyboardInterrupt, SystemExit):
launcher.cancel()
finally:
cleanup(overseer, manager)
if __name__ == '__main__':
main()
| en | 0.895674 | #!/usr/bin/env python3 Block until queue size falls below maxsize # Spaces at the end are important, as they clear previously printed # output - \r doesn't clean whole line | 2.003925 | 2 |
google/monitoring/v3/monitoring-v3-py/google/cloud/monitoring_v3/types/common.py | googleapis/googleapis-gen | 7 | 6632937 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.api import distribution_pb2 # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.monitoring.v3',
manifest={
'ComparisonType',
'ServiceTier',
'TypedValue',
'TimeInterval',
'Aggregation',
},
)
class ComparisonType(proto.Enum):
r"""Specifies an ordering relationship on two arguments, called ``left``
and ``right``.
"""
COMPARISON_UNSPECIFIED = 0
COMPARISON_GT = 1
COMPARISON_GE = 2
COMPARISON_LT = 3
COMPARISON_LE = 4
COMPARISON_EQ = 5
COMPARISON_NE = 6
class ServiceTier(proto.Enum):
r"""The tier of service for a Workspace. Please see the `service tiers
documentation <https://cloud.google.com/monitoring/workspaces/tiers>`__
for more details.
"""
_pb_options = {'deprecated': True}
SERVICE_TIER_UNSPECIFIED = 0
SERVICE_TIER_BASIC = 1
SERVICE_TIER_PREMIUM = 2
class TypedValue(proto.Message):
r"""A single strongly-typed value.
Attributes:
bool_value (bool):
A Boolean value: ``true`` or ``false``.
int64_value (int):
A 64-bit integer. Its range is approximately
±9.2x10<sup>18</sup>.
double_value (float):
A 64-bit double-precision floating-point
number. Its magnitude is approximately
±10<sup>±300</sup> and it has 16
significant digits of precision.
string_value (str):
A variable-length string value.
distribution_value (google.api.distribution_pb2.Distribution):
A distribution value.
"""
bool_value = proto.Field(
proto.BOOL,
number=1,
oneof='value',
)
int64_value = proto.Field(
proto.INT64,
number=2,
oneof='value',
)
double_value = proto.Field(
proto.DOUBLE,
number=3,
oneof='value',
)
string_value = proto.Field(
proto.STRING,
number=4,
oneof='value',
)
distribution_value = proto.Field(
proto.MESSAGE,
number=5,
oneof='value',
message=distribution_pb2.Distribution,
)
class TimeInterval(proto.Message):
r"""A closed time interval. It extends from the start time to the end
time, and includes both: ``[startTime, endTime]``. Valid time
intervals depend on the
```MetricKind`` <https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind>`__
of the metric value. The end time must not be earlier than the start
time. When writing data points, the start time must not be more than
25 hours in the past and the end time must not be more than five
minutes in the future.
- For ``GAUGE`` metrics, the ``startTime`` value is technically
optional; if no value is specified, the start time defaults to
the value of the end time, and the interval represents a single
point in time. If both start and end times are specified, they
must be identical. Such an interval is valid only for ``GAUGE``
metrics, which are point-in-time measurements. The end time of a
new interval must be at least a millisecond after the end time of
the previous interval.
- For ``DELTA`` metrics, the start time and end time must specify a
non-zero interval, with subsequent points specifying contiguous
and non-overlapping intervals. For ``DELTA`` metrics, the start
time of the next interval must be at least a millisecond after
the end time of the previous interval.
- For ``CUMULATIVE`` metrics, the start time and end time must
specify a a non-zero interval, with subsequent points specifying
the same start time and increasing end times, until an event
resets the cumulative value to zero and sets a new start time for
the following points. The new start time must be at least a
millisecond after the end time of the previous interval.
- The start time of a new interval must be at least a millisecond
after the end time of the previous interval because intervals are
closed. If the start time of a new interval is the same as the
end time of the previous interval, then data written at the new
start time could overwrite data written at the previous end time.
Attributes:
end_time (google.protobuf.timestamp_pb2.Timestamp):
Required. The end of the time interval.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Optional. The beginning of the time interval.
The default value for the start time is the end
time. The start time must not be later than the
end time.
"""
end_time = proto.Field(
proto.MESSAGE,
number=2,
message=timestamp_pb2.Timestamp,
)
start_time = proto.Field(
proto.MESSAGE,
number=1,
message=timestamp_pb2.Timestamp,
)
class Aggregation(proto.Message):
r"""Describes how to combine multiple time series to provide a different
view of the data. Aggregation of time series is done in two steps.
First, each time series in the set is *aligned* to the same time
interval boundaries, then the set of time series is optionally
*reduced* in number.
Alignment consists of applying the ``per_series_aligner`` operation
to each time series after its data has been divided into regular
``alignment_period`` time intervals. This process takes *all* of the
data points in an alignment period, applies a mathematical
transformation such as averaging, minimum, maximum, delta, etc., and
converts them into a single data point per period.
Reduction is when the aligned and transformed time series can
optionally be combined, reducing the number of time series through
similar mathematical transformations. Reduction involves applying a
``cross_series_reducer`` to all the time series, optionally sorting
the time series into subsets with ``group_by_fields``, and applying
the reducer to each subset.
The raw time series data can contain a huge amount of information
from multiple sources. Alignment and reduction transforms this mass
of data into a more manageable and representative collection of
data, for example "the 95% latency across the average of all tasks
in a cluster". This representative data can be more easily graphed
and comprehended, and the individual time series data is still
available for later drilldown. For more details, see `Filtering and
aggregation <https://cloud.google.com/monitoring/api/v3/aggregation>`__.
Attributes:
alignment_period (google.protobuf.duration_pb2.Duration):
The ``alignment_period`` specifies a time interval, in
seconds, that is used to divide the data in all the [time
series][google.monitoring.v3.TimeSeries] into consistent
blocks of time. This will be done before the per-series
aligner can be applied to the data.
The value must be at least 60 seconds. If a per-series
aligner other than ``ALIGN_NONE`` is specified, this field
is required or an error is returned. If no per-series
aligner is specified, or the aligner ``ALIGN_NONE`` is
specified, then this field is ignored.
The maximum value of the ``alignment_period`` is 104 weeks
(2 years) for charts, and 90,000 seconds (25 hours) for
alerting policies.
per_series_aligner (google.cloud.monitoring_v3.types.Aggregation.Aligner):
An ``Aligner`` describes how to bring the data points in a
single time series into temporal alignment. Except for
``ALIGN_NONE``, all alignments cause all the data points in
an ``alignment_period`` to be mathematically grouped
together, resulting in a single data point for each
``alignment_period`` with end timestamp at the end of the
period.
Not all alignment operations may be applied to all time
series. The valid choices depend on the ``metric_kind`` and
``value_type`` of the original time series. Alignment can
change the ``metric_kind`` or the ``value_type`` of the time
series.
Time series data must be aligned in order to perform
cross-time series reduction. If ``cross_series_reducer`` is
specified, then ``per_series_aligner`` must be specified and
not equal to ``ALIGN_NONE`` and ``alignment_period`` must be
specified; otherwise, an error is returned.
cross_series_reducer (google.cloud.monitoring_v3.types.Aggregation.Reducer):
The reduction operation to be used to combine time series
into a single time series, where the value of each data
point in the resulting series is a function of all the
already aligned values in the input time series.
Not all reducer operations can be applied to all time
series. The valid choices depend on the ``metric_kind`` and
the ``value_type`` of the original time series. Reduction
can yield a time series with a different ``metric_kind`` or
``value_type`` than the input time series.
Time series data must first be aligned (see
``per_series_aligner``) in order to perform cross-time
series reduction. If ``cross_series_reducer`` is specified,
then ``per_series_aligner`` must be specified, and must not
be ``ALIGN_NONE``. An ``alignment_period`` must also be
specified; otherwise, an error is returned.
group_by_fields (Sequence[str]):
The set of fields to preserve when ``cross_series_reducer``
is specified. The ``group_by_fields`` determine how the time
series are partitioned into subsets prior to applying the
aggregation operation. Each subset contains time series that
have the same value for each of the grouping fields. Each
individual time series is a member of exactly one subset.
The ``cross_series_reducer`` is applied to each subset of
time series. It is not possible to reduce across different
resource types, so this field implicitly contains
``resource.type``. Fields not specified in
``group_by_fields`` are aggregated away. If
``group_by_fields`` is not specified and all the time series
have the same resource type, then the time series are
aggregated into a single output time series. If
``cross_series_reducer`` is not defined, this field is
ignored.
"""
class Aligner(proto.Enum):
r"""The ``Aligner`` specifies the operation that will be applied to the
data points in each alignment period in a time series. Except for
``ALIGN_NONE``, which specifies that no operation be applied, each
alignment operation replaces the set of data values in each
alignment period with a single value: the result of applying the
operation to the data values. An aligned time series has a single
data value at the end of each ``alignment_period``.
An alignment operation can change the data type of the values, too.
For example, if you apply a counting operation to boolean values,
the data ``value_type`` in the original time series is ``BOOLEAN``,
but the ``value_type`` in the aligned result is ``INT64``.
"""
ALIGN_NONE = 0
ALIGN_DELTA = 1
ALIGN_RATE = 2
ALIGN_INTERPOLATE = 3
ALIGN_NEXT_OLDER = 4
ALIGN_MIN = 10
ALIGN_MAX = 11
ALIGN_MEAN = 12
ALIGN_COUNT = 13
ALIGN_SUM = 14
ALIGN_STDDEV = 15
ALIGN_COUNT_TRUE = 16
ALIGN_COUNT_FALSE = 24
ALIGN_FRACTION_TRUE = 17
ALIGN_PERCENTILE_99 = 18
ALIGN_PERCENTILE_95 = 19
ALIGN_PERCENTILE_50 = 20
ALIGN_PERCENTILE_05 = 21
ALIGN_PERCENT_CHANGE = 23
class Reducer(proto.Enum):
r"""A Reducer operation describes how to aggregate data points
from multiple time series into a single time series, where the
value of each data point in the resulting series is a function
of all the already aligned values in the input time series.
"""
REDUCE_NONE = 0
REDUCE_MEAN = 1
REDUCE_MIN = 2
REDUCE_MAX = 3
REDUCE_SUM = 4
REDUCE_STDDEV = 5
REDUCE_COUNT = 6
REDUCE_COUNT_TRUE = 7
REDUCE_COUNT_FALSE = 15
REDUCE_FRACTION_TRUE = 8
REDUCE_PERCENTILE_99 = 9
REDUCE_PERCENTILE_95 = 10
REDUCE_PERCENTILE_50 = 11
REDUCE_PERCENTILE_05 = 12
alignment_period = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
per_series_aligner = proto.Field(
proto.ENUM,
number=2,
enum=Aligner,
)
cross_series_reducer = proto.Field(
proto.ENUM,
number=4,
enum=Reducer,
)
group_by_fields = proto.RepeatedField(
proto.STRING,
number=5,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.api import distribution_pb2 # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.monitoring.v3',
manifest={
'ComparisonType',
'ServiceTier',
'TypedValue',
'TimeInterval',
'Aggregation',
},
)
class ComparisonType(proto.Enum):
r"""Specifies an ordering relationship on two arguments, called ``left``
and ``right``.
"""
COMPARISON_UNSPECIFIED = 0
COMPARISON_GT = 1
COMPARISON_GE = 2
COMPARISON_LT = 3
COMPARISON_LE = 4
COMPARISON_EQ = 5
COMPARISON_NE = 6
class ServiceTier(proto.Enum):
r"""The tier of service for a Workspace. Please see the `service tiers
documentation <https://cloud.google.com/monitoring/workspaces/tiers>`__
for more details.
"""
_pb_options = {'deprecated': True}
SERVICE_TIER_UNSPECIFIED = 0
SERVICE_TIER_BASIC = 1
SERVICE_TIER_PREMIUM = 2
class TypedValue(proto.Message):
r"""A single strongly-typed value.
Attributes:
bool_value (bool):
A Boolean value: ``true`` or ``false``.
int64_value (int):
A 64-bit integer. Its range is approximately
±9.2x10<sup>18</sup>.
double_value (float):
A 64-bit double-precision floating-point
number. Its magnitude is approximately
±10<sup>±300</sup> and it has 16
significant digits of precision.
string_value (str):
A variable-length string value.
distribution_value (google.api.distribution_pb2.Distribution):
A distribution value.
"""
bool_value = proto.Field(
proto.BOOL,
number=1,
oneof='value',
)
int64_value = proto.Field(
proto.INT64,
number=2,
oneof='value',
)
double_value = proto.Field(
proto.DOUBLE,
number=3,
oneof='value',
)
string_value = proto.Field(
proto.STRING,
number=4,
oneof='value',
)
distribution_value = proto.Field(
proto.MESSAGE,
number=5,
oneof='value',
message=distribution_pb2.Distribution,
)
class TimeInterval(proto.Message):
r"""A closed time interval. It extends from the start time to the end
time, and includes both: ``[startTime, endTime]``. Valid time
intervals depend on the
```MetricKind`` <https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind>`__
of the metric value. The end time must not be earlier than the start
time. When writing data points, the start time must not be more than
25 hours in the past and the end time must not be more than five
minutes in the future.
- For ``GAUGE`` metrics, the ``startTime`` value is technically
optional; if no value is specified, the start time defaults to
the value of the end time, and the interval represents a single
point in time. If both start and end times are specified, they
must be identical. Such an interval is valid only for ``GAUGE``
metrics, which are point-in-time measurements. The end time of a
new interval must be at least a millisecond after the end time of
the previous interval.
- For ``DELTA`` metrics, the start time and end time must specify a
non-zero interval, with subsequent points specifying contiguous
and non-overlapping intervals. For ``DELTA`` metrics, the start
time of the next interval must be at least a millisecond after
the end time of the previous interval.
- For ``CUMULATIVE`` metrics, the start time and end time must
specify a a non-zero interval, with subsequent points specifying
the same start time and increasing end times, until an event
resets the cumulative value to zero and sets a new start time for
the following points. The new start time must be at least a
millisecond after the end time of the previous interval.
- The start time of a new interval must be at least a millisecond
after the end time of the previous interval because intervals are
closed. If the start time of a new interval is the same as the
end time of the previous interval, then data written at the new
start time could overwrite data written at the previous end time.
Attributes:
end_time (google.protobuf.timestamp_pb2.Timestamp):
Required. The end of the time interval.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Optional. The beginning of the time interval.
The default value for the start time is the end
time. The start time must not be later than the
end time.
"""
end_time = proto.Field(
proto.MESSAGE,
number=2,
message=timestamp_pb2.Timestamp,
)
start_time = proto.Field(
proto.MESSAGE,
number=1,
message=timestamp_pb2.Timestamp,
)
class Aggregation(proto.Message):
r"""Describes how to combine multiple time series to provide a different
view of the data. Aggregation of time series is done in two steps.
First, each time series in the set is *aligned* to the same time
interval boundaries, then the set of time series is optionally
*reduced* in number.
Alignment consists of applying the ``per_series_aligner`` operation
to each time series after its data has been divided into regular
``alignment_period`` time intervals. This process takes *all* of the
data points in an alignment period, applies a mathematical
transformation such as averaging, minimum, maximum, delta, etc., and
converts them into a single data point per period.
Reduction is when the aligned and transformed time series can
optionally be combined, reducing the number of time series through
similar mathematical transformations. Reduction involves applying a
``cross_series_reducer`` to all the time series, optionally sorting
the time series into subsets with ``group_by_fields``, and applying
the reducer to each subset.
The raw time series data can contain a huge amount of information
from multiple sources. Alignment and reduction transforms this mass
of data into a more manageable and representative collection of
data, for example "the 95% latency across the average of all tasks
in a cluster". This representative data can be more easily graphed
and comprehended, and the individual time series data is still
available for later drilldown. For more details, see `Filtering and
aggregation <https://cloud.google.com/monitoring/api/v3/aggregation>`__.
Attributes:
alignment_period (google.protobuf.duration_pb2.Duration):
The ``alignment_period`` specifies a time interval, in
seconds, that is used to divide the data in all the [time
series][google.monitoring.v3.TimeSeries] into consistent
blocks of time. This will be done before the per-series
aligner can be applied to the data.
The value must be at least 60 seconds. If a per-series
aligner other than ``ALIGN_NONE`` is specified, this field
is required or an error is returned. If no per-series
aligner is specified, or the aligner ``ALIGN_NONE`` is
specified, then this field is ignored.
The maximum value of the ``alignment_period`` is 104 weeks
(2 years) for charts, and 90,000 seconds (25 hours) for
alerting policies.
per_series_aligner (google.cloud.monitoring_v3.types.Aggregation.Aligner):
An ``Aligner`` describes how to bring the data points in a
single time series into temporal alignment. Except for
``ALIGN_NONE``, all alignments cause all the data points in
an ``alignment_period`` to be mathematically grouped
together, resulting in a single data point for each
``alignment_period`` with end timestamp at the end of the
period.
Not all alignment operations may be applied to all time
series. The valid choices depend on the ``metric_kind`` and
``value_type`` of the original time series. Alignment can
change the ``metric_kind`` or the ``value_type`` of the time
series.
Time series data must be aligned in order to perform
cross-time series reduction. If ``cross_series_reducer`` is
specified, then ``per_series_aligner`` must be specified and
not equal to ``ALIGN_NONE`` and ``alignment_period`` must be
specified; otherwise, an error is returned.
cross_series_reducer (google.cloud.monitoring_v3.types.Aggregation.Reducer):
The reduction operation to be used to combine time series
into a single time series, where the value of each data
point in the resulting series is a function of all the
already aligned values in the input time series.
Not all reducer operations can be applied to all time
series. The valid choices depend on the ``metric_kind`` and
the ``value_type`` of the original time series. Reduction
can yield a time series with a different ``metric_kind`` or
``value_type`` than the input time series.
Time series data must first be aligned (see
``per_series_aligner``) in order to perform cross-time
series reduction. If ``cross_series_reducer`` is specified,
then ``per_series_aligner`` must be specified, and must not
be ``ALIGN_NONE``. An ``alignment_period`` must also be
specified; otherwise, an error is returned.
group_by_fields (Sequence[str]):
The set of fields to preserve when ``cross_series_reducer``
is specified. The ``group_by_fields`` determine how the time
series are partitioned into subsets prior to applying the
aggregation operation. Each subset contains time series that
have the same value for each of the grouping fields. Each
individual time series is a member of exactly one subset.
The ``cross_series_reducer`` is applied to each subset of
time series. It is not possible to reduce across different
resource types, so this field implicitly contains
``resource.type``. Fields not specified in
``group_by_fields`` are aggregated away. If
``group_by_fields`` is not specified and all the time series
have the same resource type, then the time series are
aggregated into a single output time series. If
``cross_series_reducer`` is not defined, this field is
ignored.
"""
class Aligner(proto.Enum):
r"""The ``Aligner`` specifies the operation that will be applied to the
data points in each alignment period in a time series. Except for
``ALIGN_NONE``, which specifies that no operation be applied, each
alignment operation replaces the set of data values in each
alignment period with a single value: the result of applying the
operation to the data values. An aligned time series has a single
data value at the end of each ``alignment_period``.
An alignment operation can change the data type of the values, too.
For example, if you apply a counting operation to boolean values,
the data ``value_type`` in the original time series is ``BOOLEAN``,
but the ``value_type`` in the aligned result is ``INT64``.
"""
ALIGN_NONE = 0
ALIGN_DELTA = 1
ALIGN_RATE = 2
ALIGN_INTERPOLATE = 3
ALIGN_NEXT_OLDER = 4
ALIGN_MIN = 10
ALIGN_MAX = 11
ALIGN_MEAN = 12
ALIGN_COUNT = 13
ALIGN_SUM = 14
ALIGN_STDDEV = 15
ALIGN_COUNT_TRUE = 16
ALIGN_COUNT_FALSE = 24
ALIGN_FRACTION_TRUE = 17
ALIGN_PERCENTILE_99 = 18
ALIGN_PERCENTILE_95 = 19
ALIGN_PERCENTILE_50 = 20
ALIGN_PERCENTILE_05 = 21
ALIGN_PERCENT_CHANGE = 23
class Reducer(proto.Enum):
r"""A Reducer operation describes how to aggregate data points
from multiple time series into a single time series, where the
value of each data point in the resulting series is a function
of all the already aligned values in the input time series.
"""
REDUCE_NONE = 0
REDUCE_MEAN = 1
REDUCE_MIN = 2
REDUCE_MAX = 3
REDUCE_SUM = 4
REDUCE_STDDEV = 5
REDUCE_COUNT = 6
REDUCE_COUNT_TRUE = 7
REDUCE_COUNT_FALSE = 15
REDUCE_FRACTION_TRUE = 8
REDUCE_PERCENTILE_99 = 9
REDUCE_PERCENTILE_95 = 10
REDUCE_PERCENTILE_50 = 11
REDUCE_PERCENTILE_05 = 12
alignment_period = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
per_series_aligner = proto.Field(
proto.ENUM,
number=2,
enum=Aligner,
)
cross_series_reducer = proto.Field(
proto.ENUM,
number=4,
enum=Reducer,
)
group_by_fields = proto.RepeatedField(
proto.STRING,
number=5,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| en | 0.841408 | # -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # type: ignore # type: ignore # type: ignore # type: ignore Specifies an ordering relationship on two arguments, called ``left`` and ``right``. The tier of service for a Workspace. Please see the `service tiers documentation <https://cloud.google.com/monitoring/workspaces/tiers>`__ for more details. A single strongly-typed value. Attributes: bool_value (bool): A Boolean value: ``true`` or ``false``. int64_value (int): A 64-bit integer. Its range is approximately ±9.2x10<sup>18</sup>. double_value (float): A 64-bit double-precision floating-point number. Its magnitude is approximately ±10<sup>±300</sup> and it has 16 significant digits of precision. string_value (str): A variable-length string value. distribution_value (google.api.distribution_pb2.Distribution): A distribution value. A closed time interval. It extends from the start time to the end time, and includes both: ``[startTime, endTime]``. Valid time intervals depend on the ```MetricKind`` <https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind>`__ of the metric value. The end time must not be earlier than the start time. When writing data points, the start time must not be more than 25 hours in the past and the end time must not be more than five minutes in the future. - For ``GAUGE`` metrics, the ``startTime`` value is technically optional; if no value is specified, the start time defaults to the value of the end time, and the interval represents a single point in time. If both start and end times are specified, they must be identical. Such an interval is valid only for ``GAUGE`` metrics, which are point-in-time measurements. The end time of a new interval must be at least a millisecond after the end time of the previous interval. - For ``DELTA`` metrics, the start time and end time must specify a non-zero interval, with subsequent points specifying contiguous and non-overlapping intervals. For ``DELTA`` metrics, the start time of the next interval must be at least a millisecond after the end time of the previous interval. - For ``CUMULATIVE`` metrics, the start time and end time must specify a a non-zero interval, with subsequent points specifying the same start time and increasing end times, until an event resets the cumulative value to zero and sets a new start time for the following points. The new start time must be at least a millisecond after the end time of the previous interval. - The start time of a new interval must be at least a millisecond after the end time of the previous interval because intervals are closed. If the start time of a new interval is the same as the end time of the previous interval, then data written at the new start time could overwrite data written at the previous end time. Attributes: end_time (google.protobuf.timestamp_pb2.Timestamp): Required. The end of the time interval. start_time (google.protobuf.timestamp_pb2.Timestamp): Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time. Describes how to combine multiple time series to provide a different view of the data. Aggregation of time series is done in two steps. First, each time series in the set is *aligned* to the same time interval boundaries, then the set of time series is optionally *reduced* in number. Alignment consists of applying the ``per_series_aligner`` operation to each time series after its data has been divided into regular ``alignment_period`` time intervals. This process takes *all* of the data points in an alignment period, applies a mathematical transformation such as averaging, minimum, maximum, delta, etc., and converts them into a single data point per period. Reduction is when the aligned and transformed time series can optionally be combined, reducing the number of time series through similar mathematical transformations. Reduction involves applying a ``cross_series_reducer`` to all the time series, optionally sorting the time series into subsets with ``group_by_fields``, and applying the reducer to each subset. The raw time series data can contain a huge amount of information from multiple sources. Alignment and reduction transforms this mass of data into a more manageable and representative collection of data, for example "the 95% latency across the average of all tasks in a cluster". This representative data can be more easily graphed and comprehended, and the individual time series data is still available for later drilldown. For more details, see `Filtering and aggregation <https://cloud.google.com/monitoring/api/v3/aggregation>`__. Attributes: alignment_period (google.protobuf.duration_pb2.Duration): The ``alignment_period`` specifies a time interval, in seconds, that is used to divide the data in all the [time series][google.monitoring.v3.TimeSeries] into consistent blocks of time. This will be done before the per-series aligner can be applied to the data. The value must be at least 60 seconds. If a per-series aligner other than ``ALIGN_NONE`` is specified, this field is required or an error is returned. If no per-series aligner is specified, or the aligner ``ALIGN_NONE`` is specified, then this field is ignored. The maximum value of the ``alignment_period`` is 104 weeks (2 years) for charts, and 90,000 seconds (25 hours) for alerting policies. per_series_aligner (google.cloud.monitoring_v3.types.Aggregation.Aligner): An ``Aligner`` describes how to bring the data points in a single time series into temporal alignment. Except for ``ALIGN_NONE``, all alignments cause all the data points in an ``alignment_period`` to be mathematically grouped together, resulting in a single data point for each ``alignment_period`` with end timestamp at the end of the period. Not all alignment operations may be applied to all time series. The valid choices depend on the ``metric_kind`` and ``value_type`` of the original time series. Alignment can change the ``metric_kind`` or the ``value_type`` of the time series. Time series data must be aligned in order to perform cross-time series reduction. If ``cross_series_reducer`` is specified, then ``per_series_aligner`` must be specified and not equal to ``ALIGN_NONE`` and ``alignment_period`` must be specified; otherwise, an error is returned. cross_series_reducer (google.cloud.monitoring_v3.types.Aggregation.Reducer): The reduction operation to be used to combine time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. Not all reducer operations can be applied to all time series. The valid choices depend on the ``metric_kind`` and the ``value_type`` of the original time series. Reduction can yield a time series with a different ``metric_kind`` or ``value_type`` than the input time series. Time series data must first be aligned (see ``per_series_aligner``) in order to perform cross-time series reduction. If ``cross_series_reducer`` is specified, then ``per_series_aligner`` must be specified, and must not be ``ALIGN_NONE``. An ``alignment_period`` must also be specified; otherwise, an error is returned. group_by_fields (Sequence[str]): The set of fields to preserve when ``cross_series_reducer`` is specified. The ``group_by_fields`` determine how the time series are partitioned into subsets prior to applying the aggregation operation. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The ``cross_series_reducer`` is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains ``resource.type``. Fields not specified in ``group_by_fields`` are aggregated away. If ``group_by_fields`` is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If ``cross_series_reducer`` is not defined, this field is ignored. The ``Aligner`` specifies the operation that will be applied to the data points in each alignment period in a time series. Except for ``ALIGN_NONE``, which specifies that no operation be applied, each alignment operation replaces the set of data values in each alignment period with a single value: the result of applying the operation to the data values. An aligned time series has a single data value at the end of each ``alignment_period``. An alignment operation can change the data type of the values, too. For example, if you apply a counting operation to boolean values, the data ``value_type`` in the original time series is ``BOOLEAN``, but the ``value_type`` in the aligned result is ``INT64``. A Reducer operation describes how to aggregate data points from multiple time series into a single time series, where the value of each data point in the resulting series is a function of all the already aligned values in the input time series. | 1.644666 | 2 |
inclearn/lib/network/calibrators.py | Zotkin/incremental_learning.pytorch | 277 | 6632938 | import torch
from torch import nn
class CalibrationWrapper(nn.Module):
"""Wraps several calibration models, each being applied on different targets."""
def __init__(self):
super().__init__()
self.start_indexes = []
self.end_indexes = []
self.models = nn.ModuleList([])
def add_model(self, model, start_index, end_index):
"""Adds a calibration model that will applies on target between the two indexes.
The models must be added in the right targets order!
"""
self.models.append(model)
self.start_indexes.append(start_index)
self.end_indexes.append(end_index)
def forward(self, inputs):
corrected_inputs = []
if self.start_indexes[0] != 0:
corrected_inputs.append(inputs[..., :self.start_indexes[0]])
for model, start_index, end_index in zip(self.models, self.start_indexes, self.end_indexes):
corrected_inputs.append(model(inputs[..., start_index:end_index]))
if self.end_indexes[-1] != inputs.shape[1]:
corrected_inputs.append(inputs[..., self.end_indexes[-1]:])
corrected_inputs = torch.cat(corrected_inputs, dim=-1)
return corrected_inputs
class LinearModel(nn.Module):
"""Linear model applying on the logits alpha * x + beta.
By default, this model is initialized as an identity operation.
See https://arxiv.org/abs/1905.13260 for an example usage.
:param alpha: A learned scalar.
:param beta: A learned scalar.
"""
def __init__(self, alpha=1., beta=0.):
super().__init__()
self.alpha = nn.Parameter(torch.tensor(alpha))
self.beta = nn.Parameter(torch.tensor(beta))
def forward(self, inputs):
return self.alpha * inputs + self.beta
class TemperatureScaling(nn.Module):
"""Applies a learned temperature on the logits.
See https://arxiv.org/abs/1706.04599.
"""
def __init__(self, temperature=1):
super().__init__()
self.temperature = nn.Parameter(torch.tensor(temperature))
def forward(self, inputs):
return inputs / self.temperature
| import torch
from torch import nn
class CalibrationWrapper(nn.Module):
"""Wraps several calibration models, each being applied on different targets."""
def __init__(self):
super().__init__()
self.start_indexes = []
self.end_indexes = []
self.models = nn.ModuleList([])
def add_model(self, model, start_index, end_index):
"""Adds a calibration model that will applies on target between the two indexes.
The models must be added in the right targets order!
"""
self.models.append(model)
self.start_indexes.append(start_index)
self.end_indexes.append(end_index)
def forward(self, inputs):
corrected_inputs = []
if self.start_indexes[0] != 0:
corrected_inputs.append(inputs[..., :self.start_indexes[0]])
for model, start_index, end_index in zip(self.models, self.start_indexes, self.end_indexes):
corrected_inputs.append(model(inputs[..., start_index:end_index]))
if self.end_indexes[-1] != inputs.shape[1]:
corrected_inputs.append(inputs[..., self.end_indexes[-1]:])
corrected_inputs = torch.cat(corrected_inputs, dim=-1)
return corrected_inputs
class LinearModel(nn.Module):
"""Linear model applying on the logits alpha * x + beta.
By default, this model is initialized as an identity operation.
See https://arxiv.org/abs/1905.13260 for an example usage.
:param alpha: A learned scalar.
:param beta: A learned scalar.
"""
def __init__(self, alpha=1., beta=0.):
super().__init__()
self.alpha = nn.Parameter(torch.tensor(alpha))
self.beta = nn.Parameter(torch.tensor(beta))
def forward(self, inputs):
return self.alpha * inputs + self.beta
class TemperatureScaling(nn.Module):
"""Applies a learned temperature on the logits.
See https://arxiv.org/abs/1706.04599.
"""
def __init__(self, temperature=1):
super().__init__()
self.temperature = nn.Parameter(torch.tensor(temperature))
def forward(self, inputs):
return inputs / self.temperature
| en | 0.842926 | Wraps several calibration models, each being applied on different targets. Adds a calibration model that will applies on target between the two indexes. The models must be added in the right targets order! Linear model applying on the logits alpha * x + beta. By default, this model is initialized as an identity operation. See https://arxiv.org/abs/1905.13260 for an example usage. :param alpha: A learned scalar. :param beta: A learned scalar. Applies a learned temperature on the logits. See https://arxiv.org/abs/1706.04599. | 2.832772 | 3 |
heat/engine/resources/openstack/manila/share.py | soma-micro-service/heat | 0 | 6632939 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import encodeutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
LOG = logging.getLogger(__name__)
class ManilaShare(resource.Resource):
"""A resource that creates shared mountable file system.
The resource creates a manila share - shared mountable filesystem that
can be attached to any client(or clients) that has a network access and
permission to mount filesystem. Share is a unit of storage with specific
size that supports pre-defined share protocol and advanced security model
(access lists, share networks and security services).
"""
support_status = support.SupportStatus(version='5.0.0')
_ACCESS_RULE_PROPERTIES = (
ACCESS_TO, ACCESS_TYPE, ACCESS_LEVEL
) = (
'access_to', 'access_type', 'access_level')
_SHARE_STATUSES = (
STATUS_CREATING, STATUS_DELETING, STATUS_ERROR, STATUS_ERROR_DELETING,
STATUS_AVAILABLE
) = (
'creating', 'deleting', 'error', 'error_deleting',
'available'
)
PROPERTIES = (
SHARE_PROTOCOL, SIZE, SHARE_SNAPSHOT, NAME, METADATA,
SHARE_NETWORK, DESCRIPTION, SHARE_TYPE, IS_PUBLIC,
ACCESS_RULES
) = (
'share_protocol', 'size', 'snapshot', 'name', 'metadata',
'share_network', 'description', 'share_type', 'is_public',
'access_rules'
)
ATTRIBUTES = (
AVAILABILITY_ZONE_ATTR, HOST_ATTR, EXPORT_LOCATIONS_ATTR,
SHARE_SERVER_ID_ATTR, CREATED_AT_ATTR, SHARE_STATUS_ATTR,
PROJECT_ID_ATTR
) = (
'availability_zone', 'host', 'export_locations',
'share_server_id', 'created_at', 'status',
'project_id'
)
properties_schema = {
SHARE_PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('Share protocol supported by shared filesystem.'),
required=True,
constraints=[constraints.AllowedValues(
['NFS', 'CIFS', 'GlusterFS', 'HDFS'])]
),
SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Share storage size in GB.'),
required=True
),
SHARE_SNAPSHOT: properties.Schema(
properties.Schema.STRING,
_('Name or ID of shared file system snapshot that '
'will be restored and created as a new share.'),
constraints=[constraints.CustomConstraint('manila.share_snapshot')]
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Share name.'),
update_allowed=True
),
METADATA: properties.Schema(
properties.Schema.MAP,
_('Metadata key-values defined for share.'),
update_allowed=True
),
SHARE_NETWORK: properties.Schema(
properties.Schema.STRING,
_('Name or ID of shared network defined for shared filesystem.'),
constraints=[constraints.CustomConstraint('manila.share_network')]
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Share description.'),
update_allowed=True
),
SHARE_TYPE: properties.Schema(
properties.Schema.STRING,
_('Name or ID of shared filesystem type. Types defines some share '
'filesystem profiles that will be used for share creation.'),
constraints=[constraints.CustomConstraint("manila.share_type")]
),
IS_PUBLIC: properties.Schema(
properties.Schema.BOOLEAN,
_('Defines if shared filesystem is public or private.'),
default=False,
update_allowed=True
),
ACCESS_RULES: properties.Schema(
properties.Schema.LIST,
_('A list of access rules that define access from IP to Share.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
ACCESS_TO: properties.Schema(
properties.Schema.STRING,
_('IP or other address information about guest that '
'allowed to access to Share.'),
required=True
),
ACCESS_TYPE: properties.Schema(
properties.Schema.STRING,
_('Type of access that should be provided to guest.'),
constraints=[constraints.AllowedValues(
['ip', 'domain'])],
required=True
),
ACCESS_LEVEL: properties.Schema(
properties.Schema.STRING,
_('Level of access that need to be provided for '
'guest.'),
constraints=[constraints.AllowedValues(['ro', 'rw'])]
)
}
),
update_allowed=True,
default=[]
)
}
attributes_schema = {
AVAILABILITY_ZONE_ATTR: attributes.Schema(
_('The availability zone of shared filesystem.'),
type=attributes.Schema.STRING
),
HOST_ATTR: attributes.Schema(
_('Share host.'),
type=attributes.Schema.STRING
),
EXPORT_LOCATIONS_ATTR: attributes.Schema(
_('Export locations of share.'),
type=attributes.Schema.LIST
),
SHARE_SERVER_ID_ATTR: attributes.Schema(
_('ID of server (VM, etc...) on host that is used for '
'exporting network file-system.'),
type=attributes.Schema.STRING
),
CREATED_AT_ATTR: attributes.Schema(
_('Datetime when a share was created.'),
type=attributes.Schema.STRING
),
SHARE_STATUS_ATTR: attributes.Schema(
_('Current share status.'),
type=attributes.Schema.STRING
),
PROJECT_ID_ATTR: attributes.Schema(
_('Share project ID.'),
type=attributes.Schema.STRING
)
}
default_client_name = 'manila'
entity = 'shares'
def _request_share(self):
return self.client().shares.get(self.resource_id)
def _resolve_attribute(self, name):
if self.resource_id is None:
return
share = self._request_share()
return six.text_type(getattr(share, name))
def handle_create(self):
# Request IDs of entities from manila
# if name of the entity defined in template
share_net_identity = self.properties[self.SHARE_NETWORK]
if share_net_identity:
share_net_identity = self.client_plugin().get_share_network(
share_net_identity).id
snapshot_identity = self.properties[self.SHARE_SNAPSHOT]
if snapshot_identity:
snapshot_identity = self.client_plugin().get_share_snapshot(
snapshot_identity).id
share_type_identity = self.properties[self.SHARE_TYPE]
if share_type_identity:
share_type_identity = self.client_plugin().get_share_type(
share_type_identity).id
share = self.client().shares.create(
share_proto=self.properties[self.SHARE_PROTOCOL],
size=self.properties[self.SIZE],
snapshot_id=snapshot_identity,
name=self.properties[self.NAME],
description=self.properties[self.DESCRIPTION],
metadata=self.properties[self.METADATA],
share_network=share_net_identity,
share_type=share_type_identity,
is_public=self.properties[self.IS_PUBLIC])
self.resource_id_set(share.id)
def check_create_complete(self, *args):
share_status = self._request_share().status
if share_status == self.STATUS_CREATING:
return False
elif share_status == self.STATUS_AVAILABLE:
LOG.info(_LI('Applying access rules to created Share.'))
# apply access rules to created share. please note that it is not
# possible to define rules for share with share_status = creating
access_rules = self.properties.get(self.ACCESS_RULES)
try:
if access_rules:
for rule in access_rules:
self.client().shares.allow(
share=self.resource_id,
access_type=rule.get(self.ACCESS_TYPE),
access=rule.get(self.ACCESS_TO),
access_level=rule.get(self.ACCESS_LEVEL))
return True
except Exception as ex:
err_msg = encodeutils.exception_to_unicode(ex)
reason = _(
'Error during applying access rules to share "{0}". '
'The root cause of the problem is the following: {1}.'
).format(self.resource_id, err_msg)
raise exception.ResourceInError(
status_reason=reason, resource_status=share_status)
elif share_status == self.STATUS_ERROR:
reason = _('Error during creation of share "{0}"').format(
self.resource_id)
raise exception.ResourceInError(status_reason=reason,
resource_status=share_status)
else:
reason = _('Unknown share_status during creation of share "{0}"'
).format(self.resource_id)
raise exception.ResourceUnknownStatus(
status_reason=reason, resource_status=share_status)
def check_delete_complete(self, *args):
if not self.resource_id:
return True
try:
share = self._request_share()
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
return True
else:
# when share creation is not finished proceed listening
if share.status == self.STATUS_DELETING:
return False
elif share.status in (self.STATUS_ERROR,
self.STATUS_ERROR_DELETING):
raise exception.ResourceInError(
status_reason=_(
'Error during deleting share "{0}".'
).format(self.resource_id),
resource_status=share.status)
else:
reason = _('Unknown status during deleting share '
'"{0}"').format(self.resource_id)
raise exception.ResourceUnknownStatus(
status_reason=reason, resource_status=share.status)
def handle_check(self):
share = self._request_share()
expected_statuses = [self.STATUS_AVAILABLE]
checks = [{'attr': 'status', 'expected': expected_statuses,
'current': share.status}]
self._verify_check_conditions(checks)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
kwargs = {}
if self.IS_PUBLIC in prop_diff:
kwargs['is_public'] = prop_diff.get(self.IS_PUBLIC)
if self.NAME in prop_diff:
kwargs['display_name'] = prop_diff.get(self.NAME)
if self.DESCRIPTION in prop_diff:
kwargs['display_description'] = prop_diff.get(self.DESCRIPTION)
if kwargs:
self.client().shares.update(self.resource_id,
**kwargs)
if self.METADATA in prop_diff:
metadata = prop_diff.get(self.METADATA)
self.client().shares.update_all_metadata(
self.resource_id, metadata)
if self.ACCESS_RULES in prop_diff:
actual_old_rules = []
for rule in self.client().shares.access_list(self.resource_id):
old_rule = {
self.ACCESS_TO: getattr(rule, self.ACCESS_TO),
self.ACCESS_TYPE: getattr(rule, self.ACCESS_TYPE),
self.ACCESS_LEVEL: getattr(rule, self.ACCESS_LEVEL)
}
if old_rule in prop_diff[self.ACCESS_RULES]:
actual_old_rules.append(old_rule)
else:
self.client().shares.deny(share=self.resource_id,
id=rule.id)
for rule in prop_diff[self.ACCESS_RULES]:
if rule not in actual_old_rules:
self.client().shares.allow(
share=self.resource_id,
access_type=rule.get(self.ACCESS_TYPE),
access=rule.get(self.ACCESS_TO),
access_level=rule.get(self.ACCESS_LEVEL)
)
def resource_mapping():
return {'OS::Manila::Share': ManilaShare}
| #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import encodeutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
LOG = logging.getLogger(__name__)
class ManilaShare(resource.Resource):
"""A resource that creates shared mountable file system.
The resource creates a manila share - shared mountable filesystem that
can be attached to any client(or clients) that has a network access and
permission to mount filesystem. Share is a unit of storage with specific
size that supports pre-defined share protocol and advanced security model
(access lists, share networks and security services).
"""
support_status = support.SupportStatus(version='5.0.0')
_ACCESS_RULE_PROPERTIES = (
ACCESS_TO, ACCESS_TYPE, ACCESS_LEVEL
) = (
'access_to', 'access_type', 'access_level')
_SHARE_STATUSES = (
STATUS_CREATING, STATUS_DELETING, STATUS_ERROR, STATUS_ERROR_DELETING,
STATUS_AVAILABLE
) = (
'creating', 'deleting', 'error', 'error_deleting',
'available'
)
PROPERTIES = (
SHARE_PROTOCOL, SIZE, SHARE_SNAPSHOT, NAME, METADATA,
SHARE_NETWORK, DESCRIPTION, SHARE_TYPE, IS_PUBLIC,
ACCESS_RULES
) = (
'share_protocol', 'size', 'snapshot', 'name', 'metadata',
'share_network', 'description', 'share_type', 'is_public',
'access_rules'
)
ATTRIBUTES = (
AVAILABILITY_ZONE_ATTR, HOST_ATTR, EXPORT_LOCATIONS_ATTR,
SHARE_SERVER_ID_ATTR, CREATED_AT_ATTR, SHARE_STATUS_ATTR,
PROJECT_ID_ATTR
) = (
'availability_zone', 'host', 'export_locations',
'share_server_id', 'created_at', 'status',
'project_id'
)
properties_schema = {
SHARE_PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('Share protocol supported by shared filesystem.'),
required=True,
constraints=[constraints.AllowedValues(
['NFS', 'CIFS', 'GlusterFS', 'HDFS'])]
),
SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Share storage size in GB.'),
required=True
),
SHARE_SNAPSHOT: properties.Schema(
properties.Schema.STRING,
_('Name or ID of shared file system snapshot that '
'will be restored and created as a new share.'),
constraints=[constraints.CustomConstraint('manila.share_snapshot')]
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Share name.'),
update_allowed=True
),
METADATA: properties.Schema(
properties.Schema.MAP,
_('Metadata key-values defined for share.'),
update_allowed=True
),
SHARE_NETWORK: properties.Schema(
properties.Schema.STRING,
_('Name or ID of shared network defined for shared filesystem.'),
constraints=[constraints.CustomConstraint('manila.share_network')]
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Share description.'),
update_allowed=True
),
SHARE_TYPE: properties.Schema(
properties.Schema.STRING,
_('Name or ID of shared filesystem type. Types defines some share '
'filesystem profiles that will be used for share creation.'),
constraints=[constraints.CustomConstraint("manila.share_type")]
),
IS_PUBLIC: properties.Schema(
properties.Schema.BOOLEAN,
_('Defines if shared filesystem is public or private.'),
default=False,
update_allowed=True
),
ACCESS_RULES: properties.Schema(
properties.Schema.LIST,
_('A list of access rules that define access from IP to Share.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
ACCESS_TO: properties.Schema(
properties.Schema.STRING,
_('IP or other address information about guest that '
'allowed to access to Share.'),
required=True
),
ACCESS_TYPE: properties.Schema(
properties.Schema.STRING,
_('Type of access that should be provided to guest.'),
constraints=[constraints.AllowedValues(
['ip', 'domain'])],
required=True
),
ACCESS_LEVEL: properties.Schema(
properties.Schema.STRING,
_('Level of access that need to be provided for '
'guest.'),
constraints=[constraints.AllowedValues(['ro', 'rw'])]
)
}
),
update_allowed=True,
default=[]
)
}
attributes_schema = {
AVAILABILITY_ZONE_ATTR: attributes.Schema(
_('The availability zone of shared filesystem.'),
type=attributes.Schema.STRING
),
HOST_ATTR: attributes.Schema(
_('Share host.'),
type=attributes.Schema.STRING
),
EXPORT_LOCATIONS_ATTR: attributes.Schema(
_('Export locations of share.'),
type=attributes.Schema.LIST
),
SHARE_SERVER_ID_ATTR: attributes.Schema(
_('ID of server (VM, etc...) on host that is used for '
'exporting network file-system.'),
type=attributes.Schema.STRING
),
CREATED_AT_ATTR: attributes.Schema(
_('Datetime when a share was created.'),
type=attributes.Schema.STRING
),
SHARE_STATUS_ATTR: attributes.Schema(
_('Current share status.'),
type=attributes.Schema.STRING
),
PROJECT_ID_ATTR: attributes.Schema(
_('Share project ID.'),
type=attributes.Schema.STRING
)
}
default_client_name = 'manila'
entity = 'shares'
def _request_share(self):
return self.client().shares.get(self.resource_id)
def _resolve_attribute(self, name):
if self.resource_id is None:
return
share = self._request_share()
return six.text_type(getattr(share, name))
def handle_create(self):
# Request IDs of entities from manila
# if name of the entity defined in template
share_net_identity = self.properties[self.SHARE_NETWORK]
if share_net_identity:
share_net_identity = self.client_plugin().get_share_network(
share_net_identity).id
snapshot_identity = self.properties[self.SHARE_SNAPSHOT]
if snapshot_identity:
snapshot_identity = self.client_plugin().get_share_snapshot(
snapshot_identity).id
share_type_identity = self.properties[self.SHARE_TYPE]
if share_type_identity:
share_type_identity = self.client_plugin().get_share_type(
share_type_identity).id
share = self.client().shares.create(
share_proto=self.properties[self.SHARE_PROTOCOL],
size=self.properties[self.SIZE],
snapshot_id=snapshot_identity,
name=self.properties[self.NAME],
description=self.properties[self.DESCRIPTION],
metadata=self.properties[self.METADATA],
share_network=share_net_identity,
share_type=share_type_identity,
is_public=self.properties[self.IS_PUBLIC])
self.resource_id_set(share.id)
def check_create_complete(self, *args):
share_status = self._request_share().status
if share_status == self.STATUS_CREATING:
return False
elif share_status == self.STATUS_AVAILABLE:
LOG.info(_LI('Applying access rules to created Share.'))
# apply access rules to created share. please note that it is not
# possible to define rules for share with share_status = creating
access_rules = self.properties.get(self.ACCESS_RULES)
try:
if access_rules:
for rule in access_rules:
self.client().shares.allow(
share=self.resource_id,
access_type=rule.get(self.ACCESS_TYPE),
access=rule.get(self.ACCESS_TO),
access_level=rule.get(self.ACCESS_LEVEL))
return True
except Exception as ex:
err_msg = encodeutils.exception_to_unicode(ex)
reason = _(
'Error during applying access rules to share "{0}". '
'The root cause of the problem is the following: {1}.'
).format(self.resource_id, err_msg)
raise exception.ResourceInError(
status_reason=reason, resource_status=share_status)
elif share_status == self.STATUS_ERROR:
reason = _('Error during creation of share "{0}"').format(
self.resource_id)
raise exception.ResourceInError(status_reason=reason,
resource_status=share_status)
else:
reason = _('Unknown share_status during creation of share "{0}"'
).format(self.resource_id)
raise exception.ResourceUnknownStatus(
status_reason=reason, resource_status=share_status)
def check_delete_complete(self, *args):
if not self.resource_id:
return True
try:
share = self._request_share()
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
return True
else:
# when share creation is not finished proceed listening
if share.status == self.STATUS_DELETING:
return False
elif share.status in (self.STATUS_ERROR,
self.STATUS_ERROR_DELETING):
raise exception.ResourceInError(
status_reason=_(
'Error during deleting share "{0}".'
).format(self.resource_id),
resource_status=share.status)
else:
reason = _('Unknown status during deleting share '
'"{0}"').format(self.resource_id)
raise exception.ResourceUnknownStatus(
status_reason=reason, resource_status=share.status)
def handle_check(self):
share = self._request_share()
expected_statuses = [self.STATUS_AVAILABLE]
checks = [{'attr': 'status', 'expected': expected_statuses,
'current': share.status}]
self._verify_check_conditions(checks)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
kwargs = {}
if self.IS_PUBLIC in prop_diff:
kwargs['is_public'] = prop_diff.get(self.IS_PUBLIC)
if self.NAME in prop_diff:
kwargs['display_name'] = prop_diff.get(self.NAME)
if self.DESCRIPTION in prop_diff:
kwargs['display_description'] = prop_diff.get(self.DESCRIPTION)
if kwargs:
self.client().shares.update(self.resource_id,
**kwargs)
if self.METADATA in prop_diff:
metadata = prop_diff.get(self.METADATA)
self.client().shares.update_all_metadata(
self.resource_id, metadata)
if self.ACCESS_RULES in prop_diff:
actual_old_rules = []
for rule in self.client().shares.access_list(self.resource_id):
old_rule = {
self.ACCESS_TO: getattr(rule, self.ACCESS_TO),
self.ACCESS_TYPE: getattr(rule, self.ACCESS_TYPE),
self.ACCESS_LEVEL: getattr(rule, self.ACCESS_LEVEL)
}
if old_rule in prop_diff[self.ACCESS_RULES]:
actual_old_rules.append(old_rule)
else:
self.client().shares.deny(share=self.resource_id,
id=rule.id)
for rule in prop_diff[self.ACCESS_RULES]:
if rule not in actual_old_rules:
self.client().shares.allow(
share=self.resource_id,
access_type=rule.get(self.ACCESS_TYPE),
access=rule.get(self.ACCESS_TO),
access_level=rule.get(self.ACCESS_LEVEL)
)
def resource_mapping():
return {'OS::Manila::Share': ManilaShare}
| en | 0.888773 | # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. A resource that creates shared mountable file system. The resource creates a manila share - shared mountable filesystem that can be attached to any client(or clients) that has a network access and permission to mount filesystem. Share is a unit of storage with specific size that supports pre-defined share protocol and advanced security model (access lists, share networks and security services). # Request IDs of entities from manila # if name of the entity defined in template # apply access rules to created share. please note that it is not # possible to define rules for share with share_status = creating # when share creation is not finished proceed listening | 1.925583 | 2 |
squeaknode/node/squeak_controller.py | gitter-badger/squeaknode | 0 | 6632940 | <filename>squeaknode/node/squeak_controller.py<gh_stars>0
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import threading
from typing import Iterable
from typing import List
from typing import Optional
from typing import Union
import squeak.params
from squeak.core import CSqueak
from squeak.core.signing import CSqueakAddress
from squeak.messages import msg_getdata
from squeak.messages import msg_inv
from squeak.messages import MSG_SECRET_KEY
from squeak.messages import MSG_SQUEAK
from squeak.messages import MsgSerializable
from squeak.net import CInterested
from squeak.net import CInv
from squeak.net import CSqueakLocator
from squeaknode.core.block_range import BlockRange
from squeaknode.core.connected_peer import ConnectedPeer
from squeaknode.core.download_result import DownloadResult
from squeaknode.core.interests import squeak_matches_interest
from squeaknode.core.lightning_address import LightningAddressHostPort
from squeaknode.core.offer import Offer
from squeaknode.core.peer_address import PeerAddress
from squeaknode.core.peers import create_saved_peer
from squeaknode.core.profiles import create_contact_profile
from squeaknode.core.profiles import create_signing_profile
from squeaknode.core.profiles import get_profile_private_key
from squeaknode.core.received_offer import ReceivedOffer
from squeaknode.core.received_payment import ReceivedPayment
from squeaknode.core.received_payment_summary import ReceivedPaymentSummary
from squeaknode.core.sent_offer import SentOffer
from squeaknode.core.sent_payment import SentPayment
from squeaknode.core.sent_payment_summary import SentPaymentSummary
from squeaknode.core.squeak_entry import SqueakEntry
from squeaknode.core.squeak_peer import SqueakPeer
from squeaknode.core.squeak_profile import SqueakProfile
from squeaknode.core.squeaks import get_hash
from squeaknode.core.update_subscriptions_event import UpdateSubscriptionsEvent
from squeaknode.node.active_download_manager import ActiveDownload
from squeaknode.node.listener_subscription_client import EventListener
from squeaknode.node.received_payments_subscription_client import ReceivedPaymentsSubscriptionClient
logger = logging.getLogger(__name__)
class SqueakController:
def __init__(
self,
squeak_db,
squeak_core,
payment_processor,
network_manager,
download_manager,
config,
):
self.squeak_db = squeak_db
self.squeak_core = squeak_core
self.payment_processor = payment_processor
self.network_manager = network_manager
self.new_squeak_listener = EventListener()
self.new_received_offer_listener = EventListener()
self.new_secret_key_listener = EventListener()
self.new_follow_listener = EventListener()
# self.temporary_interest_manager = TemporaryInterestManager()
self.active_download_manager = download_manager
self.config = config
def save_squeak(self, squeak: CSqueak) -> Optional[bytes]:
# Check if the squeak is valid
self.squeak_core.check_squeak(squeak)
# Get the block header for the squeak.
block_header = self.squeak_core.get_block_header(squeak)
# Check if limit exceeded.
if self.get_number_of_squeaks() >= self.config.node.max_squeaks:
raise Exception("Exceeded max number of squeaks.")
# Insert the squeak in db.
inserted_squeak_hash = self.squeak_db.insert_squeak(
squeak,
block_header,
)
if inserted_squeak_hash is None:
return None
logger.info("Saved squeak: {}".format(
inserted_squeak_hash.hex(),
))
# Notify the listener
self.new_squeak_listener.handle_new_item(squeak)
return inserted_squeak_hash
def unlock_squeak(self, squeak_hash: bytes, secret_key: bytes):
squeak = self.squeak_db.get_squeak(squeak_hash)
decrypted_content = self.squeak_core.get_decrypted_content(
squeak,
secret_key,
)
self.squeak_db.set_squeak_decryption_key(
squeak_hash,
secret_key,
decrypted_content,
)
logger.info("Unlocked squeak: {}".format(
squeak_hash.hex(),
))
# Notify the listener
self.new_secret_key_listener.handle_new_item(squeak)
def make_squeak(self, profile_id: int, content_str: str, replyto_hash: bytes) -> Optional[bytes]:
squeak_profile = self.squeak_db.get_profile(profile_id)
squeak, decryption_key = self.squeak_core.make_squeak(
squeak_profile, content_str, replyto_hash)
inserted_squeak_hash = self.save_squeak(squeak)
if inserted_squeak_hash is None:
return None
self.unlock_squeak(
inserted_squeak_hash,
decryption_key,
)
return inserted_squeak_hash
def get_squeak(self, squeak_hash: bytes) -> Optional[CSqueak]:
return self.squeak_db.get_squeak(squeak_hash)
def get_squeak_secret_key(self, squeak_hash: bytes) -> Optional[bytes]:
return self.squeak_db.get_squeak_secret_key(squeak_hash)
def delete_squeak(self, squeak_hash: bytes) -> None:
self.squeak_db.delete_squeak(squeak_hash)
def save_received_squeak(self, squeak: CSqueak) -> None:
# Try saving squeak as active download
saved_squeak_hash = self.save_active_download_squeak(squeak)
if saved_squeak_hash is None:
saved_squeak_hash = self.save_followed_squeak(squeak)
if saved_squeak_hash is not None:
self.request_offers(saved_squeak_hash)
def save_active_download_squeak(self, squeak: CSqueak) -> Optional[bytes]:
"""Save the given squeak as a result of an active download.
Returns:
bytes: the hash of the saved squeak.
"""
counter = self.get_temporary_interest_counter(squeak)
if counter is None:
return None
saved_squeak_hash = self.save_squeak(squeak)
if saved_squeak_hash is None:
return None
counter.increment()
return saved_squeak_hash
def save_followed_squeak(self, squeak: CSqueak) -> Optional[bytes]:
"""Save the given squeak because it matches the followed
interest criteria.
Returns:
bytes: the hash of the saved squeak.
"""
if not self.squeak_matches_interest(squeak):
return None
return self.save_squeak(squeak)
def squeak_matches_interest(self, squeak: CSqueak) -> bool:
locator = self.get_interested_locator()
for interest in locator.vInterested:
if squeak_matches_interest(squeak, interest) \
and self.squeak_in_limit_of_interest(squeak, interest):
return True
return False
def squeak_in_limit_of_interest(self, squeak: CSqueak, interest: CInterested) -> bool:
return self.squeak_db.number_of_squeaks_with_address_in_block_range(
str(squeak.GetAddress),
interest.nMinBlockHeight,
interest.nMaxBlockHeight,
) < self.config.node.max_squeaks_per_address_in_block_range
def get_temporary_interest_counter(self, squeak: CSqueak) -> Optional[ActiveDownload]:
# return self.temporary_interest_manager.lookup_counter(squeak)
return self.active_download_manager.lookup_counter(squeak)
def get_offer_or_secret_key(self, squeak_hash: bytes, peer_address: PeerAddress) -> Optional[Union[bytes, Offer]]:
squeak = self.get_squeak(squeak_hash)
if squeak is None:
return None
price = self.get_price_for_squeak(squeak)
if price == 0:
return self.get_squeak_secret_key(squeak_hash)
else:
return self.get_offer(
squeak=squeak,
peer_address=peer_address,
price_msat=price,
)
def get_offer(self, squeak: CSqueak, peer_address: PeerAddress, price_msat: int) -> Optional[Offer]:
sent_offer = self.get_sent_offer_for_peer(
squeak,
peer_address,
price_msat,
)
if sent_offer is None:
return None
lnd_external_address: Optional[LightningAddressHostPort] = None
if self.config.lnd.external_host:
lnd_external_address = LightningAddressHostPort(
host=self.config.lnd.external_host,
port=self.config.lnd.port,
)
try:
return self.squeak_core.package_offer(
sent_offer,
lnd_external_address,
)
except Exception:
return None
def get_sent_offer_for_peer(self, squeak: CSqueak, peer_address: PeerAddress, price_msat: int) -> Optional[SentOffer]:
squeak_hash = get_hash(squeak)
# Check if there is an existing offer for the hash/peer_address combination
sent_offer = self.squeak_db.get_sent_offer_by_squeak_hash_and_peer(
squeak_hash,
peer_address,
)
if sent_offer:
return sent_offer
secret_key = self.get_squeak_secret_key(squeak_hash)
if squeak is None or secret_key is None:
return None
try:
sent_offer = self.squeak_core.create_offer(
squeak,
secret_key,
peer_address,
price_msat,
)
except Exception:
logger.exception("Failed to create offer.")
return None
self.squeak_db.insert_sent_offer(sent_offer)
return sent_offer
def get_price_for_squeak(self, squeak: CSqueak) -> int:
squeak_address = str(squeak.GetAddress())
squeak_profile = self.get_squeak_profile_by_address(squeak_address)
if squeak_profile is not None and squeak_profile.use_custom_price:
return squeak_profile.custom_price_msat
return self.config.node.price_msat
def create_signing_profile(self, profile_name: str) -> int:
squeak_profile = create_signing_profile(
profile_name,
)
profile_id = self.squeak_db.insert_profile(squeak_profile)
self.create_update_subscriptions_event()
return profile_id
def import_signing_profile(self, profile_name: str, private_key: str) -> int:
squeak_profile = create_signing_profile(
profile_name,
private_key,
)
profile_id = self.squeak_db.insert_profile(squeak_profile)
self.create_update_subscriptions_event()
return profile_id
def create_contact_profile(self, profile_name: str, squeak_address: str) -> int:
squeak_profile = create_contact_profile(
profile_name,
squeak_address,
)
profile_id = self.squeak_db.insert_profile(squeak_profile)
self.create_update_subscriptions_event()
return profile_id
def get_profiles(self) -> List[SqueakProfile]:
return self.squeak_db.get_profiles()
def get_signing_profiles(self) -> List[SqueakProfile]:
return self.squeak_db.get_signing_profiles()
def get_contact_profiles(self) -> List[SqueakProfile]:
return self.squeak_db.get_contact_profiles()
def get_squeak_profile(self, profile_id: int) -> Optional[SqueakProfile]:
return self.squeak_db.get_profile(profile_id)
def get_squeak_profile_by_address(self, address: str) -> Optional[SqueakProfile]:
return self.squeak_db.get_profile_by_address(address)
def get_squeak_profile_by_name(self, name: str) -> Optional[SqueakProfile]:
return self.squeak_db.get_profile_by_name(name)
def set_squeak_profile_following(self, profile_id: int, following: bool) -> None:
self.squeak_db.set_profile_following(profile_id, following)
self.create_update_subscriptions_event()
def set_squeak_profile_use_custom_price(self, profile_id: int, use_custom_price: bool) -> None:
self.squeak_db.set_profile_use_custom_price(
profile_id, use_custom_price)
def set_squeak_profile_custom_price(self, profile_id: int, custom_price_msat: int) -> None:
self.squeak_db.set_profile_custom_price_msat(
profile_id, custom_price_msat)
def rename_squeak_profile(self, profile_id: int, profile_name: str) -> None:
self.squeak_db.set_profile_name(profile_id, profile_name)
def delete_squeak_profile(self, profile_id: int) -> None:
self.squeak_db.delete_profile(profile_id)
self.create_update_subscriptions_event()
def set_squeak_profile_image(self, profile_id: int, profile_image: bytes) -> None:
self.squeak_db.set_profile_image(profile_id, profile_image)
def clear_squeak_profile_image(self, profile_id: int) -> None:
self.squeak_db.set_profile_image(profile_id, None)
def get_squeak_profile_private_key(self, profile_id: int) -> bytes:
profile = self.get_squeak_profile(profile_id)
if profile is None:
raise Exception("Profile with id: {} does not exist.".format(
profile_id,
))
return get_profile_private_key(profile)
def create_peer(self, peer_name: str, peer_address: PeerAddress):
squeak_peer = create_saved_peer(
peer_name,
peer_address,
)
return self.squeak_db.insert_peer(squeak_peer)
def get_peer(self, peer_id: int) -> Optional[SqueakPeer]:
return self.squeak_db.get_peer(peer_id)
def get_peer_by_address(self, peer_address: PeerAddress) -> Optional[SqueakPeer]:
return self.squeak_db.get_peer_by_address(peer_address)
def get_peers(self):
return self.squeak_db.get_peers()
def get_autoconnect_peers(self) -> List[SqueakPeer]:
return self.squeak_db.get_autoconnect_peers()
def set_peer_autoconnect(self, peer_id: int, autoconnect: bool):
self.squeak_db.set_peer_autoconnect(peer_id, autoconnect)
def rename_peer(self, peer_id: int, peer_name: str):
self.squeak_db.set_peer_name(peer_id, peer_name)
def delete_peer(self, peer_id: int):
self.squeak_db.delete_peer(peer_id)
def get_received_offers(self, squeak_hash: bytes) -> List[ReceivedOffer]:
return self.squeak_db.get_received_offers(squeak_hash)
# def get_received_offer_for_squeak_and_peer(
# self,
# squeak_hash: bytes,
# peer_addresss: PeerAddress,
# ) -> Optional[ReceivedOffer]:
# return self.squeak_db.get_received_offer_for_squeak_and_peer(
# squeak_hash,
# peer_addresss,
# )
def get_received_offer(self, received_offer_id: int) -> Optional[ReceivedOffer]:
return self.squeak_db.get_received_offer(
received_offer_id)
def pay_offer(self, received_offer_id: int) -> int:
# Get the offer from the database
received_offer = self.squeak_db.get_received_offer(
received_offer_id)
if received_offer is None:
raise Exception("Received offer with id {} not found.".format(
received_offer_id,
))
logger.info("Paying received offer: {}".format(received_offer))
sent_payment = self.squeak_core.pay_offer(received_offer)
sent_payment_id = self.squeak_db.insert_sent_payment(sent_payment)
# # Delete the received offer
# self.squeak_db.delete_offer(sent_payment.payment_hash)
# Mark the received offer as paid
self.squeak_db.set_received_offer_paid(
sent_payment.payment_hash,
paid=True,
)
self.unlock_squeak(
received_offer.squeak_hash,
sent_payment.secret_key,
)
return sent_payment_id
def get_sent_payments(
self,
limit: int,
last_sent_payment: Optional[SentPayment],
) -> List[SentPayment]:
return self.squeak_db.get_sent_payments(
limit,
last_sent_payment,
)
def get_sent_payment(self, sent_payment_id: int) -> Optional[SentPayment]:
return self.squeak_db.get_sent_payment(sent_payment_id)
def get_sent_offers(self):
return self.squeak_db.get_sent_offers()
def get_received_payments(
self,
limit: int,
last_received_payment: Optional[ReceivedPayment],
) -> List[ReceivedPayment]:
return self.squeak_db.get_received_payments(
limit,
last_received_payment,
)
def delete_all_expired_offers(self):
self.delete_all_expired_received_offers()
self.delete_all_expired_sent_offers()
def delete_all_expired_received_offers(self):
received_offer_retention_s = self.config.node.received_offer_retention_s
num_expired_received_offers = self.squeak_db.delete_expired_received_offers(
received_offer_retention_s)
if num_expired_received_offers > 0:
logger.info("Deleted number of expired received offers: {}".format(
num_expired_received_offers))
def delete_all_expired_sent_offers(self):
sent_offer_retention_s = self.config.node.sent_offer_retention_s
num_expired_sent_offers = self.squeak_db.delete_expired_sent_offers(
sent_offer_retention_s,
)
if num_expired_sent_offers > 0:
logger.info(
"Deleted number of expired sent offers: {}".format(
num_expired_sent_offers)
)
def subscribe_received_payments(self, initial_index: int, stopped: threading.Event):
with ReceivedPaymentsSubscriptionClient(
self.squeak_db,
initial_index,
stopped,
).open_subscription() as client:
yield from client.get_received_payments()
def get_block_range(self) -> BlockRange:
max_block = self.squeak_core.get_best_block_height()
block_interval = self.config.node.interest_block_interval
min_block = max(0, max_block - block_interval)
return BlockRange(min_block, max_block)
def get_network(self) -> str:
return self.config.node.network
def get_squeak_entry(self, squeak_hash: bytes) -> Optional[SqueakEntry]:
return self.squeak_db.get_squeak_entry(squeak_hash)
def get_timeline_squeak_entries(
self,
limit: int,
last_entry: Optional[SqueakEntry],
) -> List[SqueakEntry]:
return self.squeak_db.get_timeline_squeak_entries(
limit,
last_entry,
)
def get_liked_squeak_entries(
self,
limit: int,
last_entry: Optional[SqueakEntry],
) -> List[SqueakEntry]:
return self.squeak_db.get_liked_squeak_entries(
limit,
last_entry,
)
def get_squeak_entries_for_address(
self,
address: str,
limit: int,
last_entry: Optional[SqueakEntry],
) -> List[SqueakEntry]:
return self.squeak_db.get_squeak_entries_for_address(
address,
limit,
last_entry,
)
def get_squeak_entries_for_text_search(
self,
search_text: str,
limit: int,
last_entry: Optional[SqueakEntry],
) -> List[SqueakEntry]:
return self.squeak_db.get_squeak_entries_for_text_search(
search_text,
limit,
last_entry,
)
def get_ancestor_squeak_entries(self, squeak_hash: bytes) -> List[SqueakEntry]:
return self.squeak_db.get_thread_ancestor_squeak_entries(
squeak_hash,
)
def get_reply_squeak_entries(
self,
squeak_hash: bytes,
limit: int,
last_entry: Optional[SqueakEntry],
) -> List[SqueakEntry]:
return self.squeak_db.get_thread_reply_squeak_entries(
squeak_hash,
limit,
last_entry,
)
def get_number_of_squeaks(self) -> int:
return self.squeak_db.get_number_of_squeaks()
def save_received_offer(self, offer: Offer, peer_address: PeerAddress) -> None:
squeak = self.get_squeak(offer.squeak_hash)
secret_key = self.get_squeak_secret_key(offer.squeak_hash)
if squeak is None or secret_key is not None:
return
try:
# TODO: Call unpack_offer with check_payment_point=True.
received_offer = self.squeak_core.unpack_offer(
squeak,
offer,
peer_address,
)
except Exception:
logger.exception("Failed to save received offer.")
return
received_offer_id = self.squeak_db.insert_received_offer(
received_offer)
if received_offer_id is None:
return
logger.info("Saved received offer: {}".format(received_offer))
counter = self.active_download_manager.lookup_counter(offer)
if counter is not None:
counter.increment()
received_offer = received_offer._replace(
received_offer_id=received_offer_id)
self.new_received_offer_listener.handle_new_item(received_offer)
def get_followed_addresses(self) -> List[str]:
followed_profiles = self.squeak_db.get_following_profiles()
return [profile.address for profile in followed_profiles]
def get_received_payment_summary(self) -> ReceivedPaymentSummary:
return self.squeak_db.get_received_payment_summary()
def get_sent_payment_summary(self) -> SentPaymentSummary:
return self.squeak_db.get_sent_payment_summary()
def reprocess_received_payments(self) -> None:
self.squeak_db.clear_received_payment_settle_indices()
self.payment_processor.start_processing()
def delete_old_squeaks(self):
squeaks_to_delete = self.squeak_db.get_old_squeaks_to_delete(
self.config.node.squeak_retention_s,
)
for squeak_hash in squeaks_to_delete:
self.squeak_db.delete_squeak(
squeak_hash,
)
logger.info("Deleted squeak: {}".format(
squeak_hash.hex(),
))
def like_squeak(self, squeak_hash: bytes):
logger.info("Liking squeak: {}".format(
squeak_hash.hex(),
))
self.squeak_db.set_squeak_liked(
squeak_hash,
)
def unlike_squeak(self, squeak_hash: bytes):
logger.info("Unliking squeak: {}".format(
squeak_hash.hex(),
))
self.squeak_db.set_squeak_unliked(
squeak_hash,
)
def connect_peer(self, peer_address: PeerAddress) -> None:
logger.info("Connect to peer: {}".format(
peer_address,
))
self.network_manager.connect_peer_sync(peer_address)
def connect_saved_peers(self) -> None:
peers = self.get_autoconnect_peers()
for peer in peers:
self.network_manager.connect_peer_async(
peer.address,
)
def get_connected_peer(self, peer_address: PeerAddress) -> Optional[ConnectedPeer]:
peer = self.network_manager.get_connected_peer(peer_address)
if peer is None:
return None
return ConnectedPeer(
peer=peer,
saved_peer=self.squeak_db.get_peer_by_address(
peer_address,
),
)
def get_connected_peers(self) -> List[ConnectedPeer]:
peers = self.network_manager.get_connected_peers()
return [
ConnectedPeer(
peer=peer,
saved_peer=self.squeak_db.get_peer_by_address(
peer.remote_address,
),
) for peer in peers
]
def lookup_squeaks(
self,
addresses: List[str],
min_block: Optional[int],
max_block: Optional[int],
reply_to_hash: Optional[bytes],
) -> List[bytes]:
return self.squeak_db.lookup_squeaks(
addresses,
min_block,
max_block,
reply_to_hash,
include_locked=True,
)
def lookup_secret_keys(
self,
addresses: List[str],
min_block: Optional[int],
max_block: Optional[int],
reply_to_hash: Optional[bytes],
) -> List[bytes]:
return self.squeak_db.lookup_squeaks(
addresses,
min_block,
max_block,
reply_to_hash,
)
def get_interested_locator(self) -> CSqueakLocator:
block_range = self.get_block_range()
followed_addresses = self.get_followed_addresses()
if len(followed_addresses) == 0:
return CSqueakLocator(
vInterested=[],
)
interests = [
CInterested(
addresses=[CSqueakAddress(address)
for address in followed_addresses],
nMinBlockHeight=block_range.min_block,
nMaxBlockHeight=block_range.max_block,
)
]
return CSqueakLocator(
vInterested=interests,
)
def download_squeaks(
self,
addresses: List[str],
min_block: int,
max_block: int,
replyto_hash: Optional[bytes],
) -> DownloadResult:
interest = CInterested(
addresses=[CSqueakAddress(address)
for address in addresses],
nMinBlockHeight=min_block,
nMaxBlockHeight=max_block,
replyto_squeak_hash=replyto_hash,
) if replyto_hash else CInterested(
addresses=[CSqueakAddress(address)
for address in addresses],
nMinBlockHeight=min_block,
nMaxBlockHeight=max_block,
)
return self.active_download_manager.download_interest(10, interest)
def download_single_squeak(self, squeak_hash: bytes) -> DownloadResult:
logger.info("Downloading single squeak: {}".format(
squeak_hash.hex(),
))
return self.active_download_manager.download_hash(squeak_hash)
def download_offers(self, squeak_hash: bytes) -> DownloadResult:
logger.info("Downloading offers for squeak: {}".format(
squeak_hash.hex(),
))
return self.active_download_manager.download_offers(10, squeak_hash)
def request_offers(self, squeak_hash: bytes):
logger.info("Requesting offers for squeak: {}".format(
squeak_hash.hex(),
))
invs = [
CInv(type=2, hash=squeak_hash)
]
getdata_msg = msg_getdata(inv=invs)
self.broadcast_msg(getdata_msg)
def download_replies(self, squeak_hash: bytes) -> DownloadResult:
logger.info("Downloading replies for squeak: {}".format(
squeak_hash.hex(),
))
interest = CInterested(
hashReplySqk=squeak_hash,
)
return self.active_download_manager.download_interest(10, interest)
def download_address_squeaks(self, squeak_address: str) -> DownloadResult:
logger.info("Downloading address squeaks for address: {}".format(
squeak_address,
))
interest = CInterested(
addresses=[CSqueakAddress(squeak_address)],
)
return self.active_download_manager.download_interest(10, interest)
def broadcast_msg(self, msg: MsgSerializable) -> int:
return self.network_manager.broadcast_msg(msg)
def disconnect_peer(self, peer_address: PeerAddress) -> None:
logger.info("Disconnect to peer: {}".format(
peer_address,
))
self.network_manager.disconnect_peer(peer_address)
def subscribe_connected_peers(self, stopped: threading.Event) -> Iterable[List[ConnectedPeer]]:
for peers in self.network_manager.subscribe_connected_peers(stopped):
yield [
ConnectedPeer(
peer=peer,
saved_peer=self.squeak_db.get_peer_by_address(
peer.remote_address,
),
) for peer in peers
]
def subscribe_connected_peer(self, peer_address: PeerAddress, stopped: threading.Event) -> Iterable[Optional[ConnectedPeer]]:
for peer in self.network_manager.subscribe_connected_peer(peer_address, stopped):
if peer is None:
yield None
else:
yield ConnectedPeer(
peer=peer,
saved_peer=self.squeak_db.get_peer_by_address(
peer.remote_address,
),
)
def subscribe_new_squeaks(self, stopped: threading.Event):
yield from self.new_squeak_listener.yield_items(stopped)
def subscribe_new_secret_keys(self, stopped: threading.Event):
yield from self.new_secret_key_listener.yield_items(stopped)
def subscribe_follows(self, stopped: threading.Event):
yield from self.new_follow_listener.yield_items(stopped)
def update_subscriptions(self):
locator = self.get_interested_locator()
self.network_manager.update_local_subscriptions(locator)
def create_update_subscriptions_event(self):
self.new_follow_listener.handle_new_item(UpdateSubscriptionsEvent())
def subscribe_received_offers_for_squeak(self, squeak_hash: bytes, stopped: threading.Event):
for received_offer in self.new_received_offer_listener.yield_items(stopped):
if received_offer.squeak_hash == squeak_hash:
yield received_offer
def subscribe_squeak_entry(self, squeak_hash: bytes, stopped: threading.Event):
for item in self.new_squeak_listener.yield_items(stopped):
if squeak_hash == get_hash(item):
yield self.get_squeak_entry(squeak_hash)
def subscribe_squeak_reply_entries(self, squeak_hash: bytes, stopped: threading.Event):
for item in self.new_squeak_listener.yield_items(stopped):
if squeak_hash == item.hashReplySqk:
reply_hash = get_hash(item)
yield self.get_squeak_entry(reply_hash)
def subscribe_squeak_address_entries(self, squeak_address: str, stopped: threading.Event):
for item in self.new_squeak_listener.yield_items(stopped):
if squeak_address == str(item.GetAddress()):
squeak_hash = get_hash(item)
yield self.get_squeak_entry(squeak_hash)
def subscribe_squeak_ancestor_entries(self, squeak_hash: bytes, stopped: threading.Event):
for item in self.new_squeak_listener.yield_items(stopped):
if squeak_hash == get_hash(item):
yield self.get_ancestor_squeak_entries(squeak_hash)
def subscribe_squeak_entries(self, stopped: threading.Event):
for item in self.new_squeak_listener.yield_items(stopped):
squeak_hash = get_hash(item)
yield self.get_squeak_entry(squeak_hash)
def subscribe_timeline_squeak_entries(self, stopped: threading.Event):
for item in self.new_squeak_listener.yield_items(stopped):
followed_addresses = self.get_followed_addresses()
if str(item.GetAddress()) in set(followed_addresses):
squeak_hash = get_hash(item)
yield self.get_squeak_entry(squeak_hash)
def get_external_address(self) -> PeerAddress:
return self.network_manager.external_address
def get_default_peer_port(self) -> int:
return squeak.params.params.DEFAULT_PORT
def forward_squeak(self, squeak):
logger.debug("Forward new squeak: {!r}".format(
get_hash(squeak).hex(),
))
for peer in self.network_manager.get_connected_peers():
if peer.is_remote_subscribed(squeak):
logger.debug("Forwarding to peer: {}".format(
peer,
))
squeak_hash = get_hash(squeak)
inv = CInv(type=MSG_SQUEAK, hash=squeak_hash)
inv_msg = msg_inv(inv=[inv])
peer.send_msg(inv_msg)
logger.debug("Finished checking peers to forward.")
def forward_secret_key(self, squeak):
logger.debug("Forward new secret key for hash: {!r}".format(
get_hash(squeak).hex(),
))
for peer in self.network_manager.get_connected_peers():
if peer.is_remote_subscribed(squeak):
logger.debug("Forwarding to peer: {}".format(
peer,
))
squeak_hash = get_hash(squeak)
inv = CInv(type=MSG_SECRET_KEY, hash=squeak_hash)
inv_msg = msg_inv(inv=[inv])
peer.send_msg(inv_msg)
logger.debug("Finished checking peers to forward.")
| <filename>squeaknode/node/squeak_controller.py<gh_stars>0
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import threading
from typing import Iterable
from typing import List
from typing import Optional
from typing import Union
import squeak.params
from squeak.core import CSqueak
from squeak.core.signing import CSqueakAddress
from squeak.messages import msg_getdata
from squeak.messages import msg_inv
from squeak.messages import MSG_SECRET_KEY
from squeak.messages import MSG_SQUEAK
from squeak.messages import MsgSerializable
from squeak.net import CInterested
from squeak.net import CInv
from squeak.net import CSqueakLocator
from squeaknode.core.block_range import BlockRange
from squeaknode.core.connected_peer import ConnectedPeer
from squeaknode.core.download_result import DownloadResult
from squeaknode.core.interests import squeak_matches_interest
from squeaknode.core.lightning_address import LightningAddressHostPort
from squeaknode.core.offer import Offer
from squeaknode.core.peer_address import PeerAddress
from squeaknode.core.peers import create_saved_peer
from squeaknode.core.profiles import create_contact_profile
from squeaknode.core.profiles import create_signing_profile
from squeaknode.core.profiles import get_profile_private_key
from squeaknode.core.received_offer import ReceivedOffer
from squeaknode.core.received_payment import ReceivedPayment
from squeaknode.core.received_payment_summary import ReceivedPaymentSummary
from squeaknode.core.sent_offer import SentOffer
from squeaknode.core.sent_payment import SentPayment
from squeaknode.core.sent_payment_summary import SentPaymentSummary
from squeaknode.core.squeak_entry import SqueakEntry
from squeaknode.core.squeak_peer import SqueakPeer
from squeaknode.core.squeak_profile import SqueakProfile
from squeaknode.core.squeaks import get_hash
from squeaknode.core.update_subscriptions_event import UpdateSubscriptionsEvent
from squeaknode.node.active_download_manager import ActiveDownload
from squeaknode.node.listener_subscription_client import EventListener
from squeaknode.node.received_payments_subscription_client import ReceivedPaymentsSubscriptionClient
logger = logging.getLogger(__name__)
class SqueakController:
def __init__(
self,
squeak_db,
squeak_core,
payment_processor,
network_manager,
download_manager,
config,
):
self.squeak_db = squeak_db
self.squeak_core = squeak_core
self.payment_processor = payment_processor
self.network_manager = network_manager
self.new_squeak_listener = EventListener()
self.new_received_offer_listener = EventListener()
self.new_secret_key_listener = EventListener()
self.new_follow_listener = EventListener()
# self.temporary_interest_manager = TemporaryInterestManager()
self.active_download_manager = download_manager
self.config = config
def save_squeak(self, squeak: CSqueak) -> Optional[bytes]:
# Check if the squeak is valid
self.squeak_core.check_squeak(squeak)
# Get the block header for the squeak.
block_header = self.squeak_core.get_block_header(squeak)
# Check if limit exceeded.
if self.get_number_of_squeaks() >= self.config.node.max_squeaks:
raise Exception("Exceeded max number of squeaks.")
# Insert the squeak in db.
inserted_squeak_hash = self.squeak_db.insert_squeak(
squeak,
block_header,
)
if inserted_squeak_hash is None:
return None
logger.info("Saved squeak: {}".format(
inserted_squeak_hash.hex(),
))
# Notify the listener
self.new_squeak_listener.handle_new_item(squeak)
return inserted_squeak_hash
def unlock_squeak(self, squeak_hash: bytes, secret_key: bytes):
squeak = self.squeak_db.get_squeak(squeak_hash)
decrypted_content = self.squeak_core.get_decrypted_content(
squeak,
secret_key,
)
self.squeak_db.set_squeak_decryption_key(
squeak_hash,
secret_key,
decrypted_content,
)
logger.info("Unlocked squeak: {}".format(
squeak_hash.hex(),
))
# Notify the listener
self.new_secret_key_listener.handle_new_item(squeak)
def make_squeak(self, profile_id: int, content_str: str, replyto_hash: bytes) -> Optional[bytes]:
squeak_profile = self.squeak_db.get_profile(profile_id)
squeak, decryption_key = self.squeak_core.make_squeak(
squeak_profile, content_str, replyto_hash)
inserted_squeak_hash = self.save_squeak(squeak)
if inserted_squeak_hash is None:
return None
self.unlock_squeak(
inserted_squeak_hash,
decryption_key,
)
return inserted_squeak_hash
def get_squeak(self, squeak_hash: bytes) -> Optional[CSqueak]:
return self.squeak_db.get_squeak(squeak_hash)
def get_squeak_secret_key(self, squeak_hash: bytes) -> Optional[bytes]:
return self.squeak_db.get_squeak_secret_key(squeak_hash)
def delete_squeak(self, squeak_hash: bytes) -> None:
self.squeak_db.delete_squeak(squeak_hash)
def save_received_squeak(self, squeak: CSqueak) -> None:
# Try saving squeak as active download
saved_squeak_hash = self.save_active_download_squeak(squeak)
if saved_squeak_hash is None:
saved_squeak_hash = self.save_followed_squeak(squeak)
if saved_squeak_hash is not None:
self.request_offers(saved_squeak_hash)
def save_active_download_squeak(self, squeak: CSqueak) -> Optional[bytes]:
"""Save the given squeak as a result of an active download.
Returns:
bytes: the hash of the saved squeak.
"""
counter = self.get_temporary_interest_counter(squeak)
if counter is None:
return None
saved_squeak_hash = self.save_squeak(squeak)
if saved_squeak_hash is None:
return None
counter.increment()
return saved_squeak_hash
def save_followed_squeak(self, squeak: CSqueak) -> Optional[bytes]:
"""Save the given squeak because it matches the followed
interest criteria.
Returns:
bytes: the hash of the saved squeak.
"""
if not self.squeak_matches_interest(squeak):
return None
return self.save_squeak(squeak)
def squeak_matches_interest(self, squeak: CSqueak) -> bool:
locator = self.get_interested_locator()
for interest in locator.vInterested:
if squeak_matches_interest(squeak, interest) \
and self.squeak_in_limit_of_interest(squeak, interest):
return True
return False
def squeak_in_limit_of_interest(self, squeak: CSqueak, interest: CInterested) -> bool:
return self.squeak_db.number_of_squeaks_with_address_in_block_range(
str(squeak.GetAddress),
interest.nMinBlockHeight,
interest.nMaxBlockHeight,
) < self.config.node.max_squeaks_per_address_in_block_range
def get_temporary_interest_counter(self, squeak: CSqueak) -> Optional[ActiveDownload]:
# return self.temporary_interest_manager.lookup_counter(squeak)
return self.active_download_manager.lookup_counter(squeak)
def get_offer_or_secret_key(self, squeak_hash: bytes, peer_address: PeerAddress) -> Optional[Union[bytes, Offer]]:
squeak = self.get_squeak(squeak_hash)
if squeak is None:
return None
price = self.get_price_for_squeak(squeak)
if price == 0:
return self.get_squeak_secret_key(squeak_hash)
else:
return self.get_offer(
squeak=squeak,
peer_address=peer_address,
price_msat=price,
)
def get_offer(self, squeak: CSqueak, peer_address: PeerAddress, price_msat: int) -> Optional[Offer]:
sent_offer = self.get_sent_offer_for_peer(
squeak,
peer_address,
price_msat,
)
if sent_offer is None:
return None
lnd_external_address: Optional[LightningAddressHostPort] = None
if self.config.lnd.external_host:
lnd_external_address = LightningAddressHostPort(
host=self.config.lnd.external_host,
port=self.config.lnd.port,
)
try:
return self.squeak_core.package_offer(
sent_offer,
lnd_external_address,
)
except Exception:
return None
def get_sent_offer_for_peer(self, squeak: CSqueak, peer_address: PeerAddress, price_msat: int) -> Optional[SentOffer]:
squeak_hash = get_hash(squeak)
# Check if there is an existing offer for the hash/peer_address combination
sent_offer = self.squeak_db.get_sent_offer_by_squeak_hash_and_peer(
squeak_hash,
peer_address,
)
if sent_offer:
return sent_offer
secret_key = self.get_squeak_secret_key(squeak_hash)
if squeak is None or secret_key is None:
return None
try:
sent_offer = self.squeak_core.create_offer(
squeak,
secret_key,
peer_address,
price_msat,
)
except Exception:
logger.exception("Failed to create offer.")
return None
self.squeak_db.insert_sent_offer(sent_offer)
return sent_offer
def get_price_for_squeak(self, squeak: CSqueak) -> int:
squeak_address = str(squeak.GetAddress())
squeak_profile = self.get_squeak_profile_by_address(squeak_address)
if squeak_profile is not None and squeak_profile.use_custom_price:
return squeak_profile.custom_price_msat
return self.config.node.price_msat
def create_signing_profile(self, profile_name: str) -> int:
squeak_profile = create_signing_profile(
profile_name,
)
profile_id = self.squeak_db.insert_profile(squeak_profile)
self.create_update_subscriptions_event()
return profile_id
def import_signing_profile(self, profile_name: str, private_key: str) -> int:
squeak_profile = create_signing_profile(
profile_name,
private_key,
)
profile_id = self.squeak_db.insert_profile(squeak_profile)
self.create_update_subscriptions_event()
return profile_id
def create_contact_profile(self, profile_name: str, squeak_address: str) -> int:
squeak_profile = create_contact_profile(
profile_name,
squeak_address,
)
profile_id = self.squeak_db.insert_profile(squeak_profile)
self.create_update_subscriptions_event()
return profile_id
def get_profiles(self) -> List[SqueakProfile]:
return self.squeak_db.get_profiles()
def get_signing_profiles(self) -> List[SqueakProfile]:
return self.squeak_db.get_signing_profiles()
def get_contact_profiles(self) -> List[SqueakProfile]:
return self.squeak_db.get_contact_profiles()
def get_squeak_profile(self, profile_id: int) -> Optional[SqueakProfile]:
return self.squeak_db.get_profile(profile_id)
def get_squeak_profile_by_address(self, address: str) -> Optional[SqueakProfile]:
return self.squeak_db.get_profile_by_address(address)
def get_squeak_profile_by_name(self, name: str) -> Optional[SqueakProfile]:
return self.squeak_db.get_profile_by_name(name)
def set_squeak_profile_following(self, profile_id: int, following: bool) -> None:
self.squeak_db.set_profile_following(profile_id, following)
self.create_update_subscriptions_event()
def set_squeak_profile_use_custom_price(self, profile_id: int, use_custom_price: bool) -> None:
self.squeak_db.set_profile_use_custom_price(
profile_id, use_custom_price)
def set_squeak_profile_custom_price(self, profile_id: int, custom_price_msat: int) -> None:
self.squeak_db.set_profile_custom_price_msat(
profile_id, custom_price_msat)
def rename_squeak_profile(self, profile_id: int, profile_name: str) -> None:
self.squeak_db.set_profile_name(profile_id, profile_name)
def delete_squeak_profile(self, profile_id: int) -> None:
self.squeak_db.delete_profile(profile_id)
self.create_update_subscriptions_event()
def set_squeak_profile_image(self, profile_id: int, profile_image: bytes) -> None:
self.squeak_db.set_profile_image(profile_id, profile_image)
def clear_squeak_profile_image(self, profile_id: int) -> None:
self.squeak_db.set_profile_image(profile_id, None)
def get_squeak_profile_private_key(self, profile_id: int) -> bytes:
profile = self.get_squeak_profile(profile_id)
if profile is None:
raise Exception("Profile with id: {} does not exist.".format(
profile_id,
))
return get_profile_private_key(profile)
def create_peer(self, peer_name: str, peer_address: PeerAddress):
squeak_peer = create_saved_peer(
peer_name,
peer_address,
)
return self.squeak_db.insert_peer(squeak_peer)
def get_peer(self, peer_id: int) -> Optional[SqueakPeer]:
return self.squeak_db.get_peer(peer_id)
def get_peer_by_address(self, peer_address: PeerAddress) -> Optional[SqueakPeer]:
return self.squeak_db.get_peer_by_address(peer_address)
def get_peers(self):
return self.squeak_db.get_peers()
def get_autoconnect_peers(self) -> List[SqueakPeer]:
return self.squeak_db.get_autoconnect_peers()
def set_peer_autoconnect(self, peer_id: int, autoconnect: bool):
self.squeak_db.set_peer_autoconnect(peer_id, autoconnect)
def rename_peer(self, peer_id: int, peer_name: str):
self.squeak_db.set_peer_name(peer_id, peer_name)
def delete_peer(self, peer_id: int):
self.squeak_db.delete_peer(peer_id)
def get_received_offers(self, squeak_hash: bytes) -> List[ReceivedOffer]:
return self.squeak_db.get_received_offers(squeak_hash)
# def get_received_offer_for_squeak_and_peer(
# self,
# squeak_hash: bytes,
# peer_addresss: PeerAddress,
# ) -> Optional[ReceivedOffer]:
# return self.squeak_db.get_received_offer_for_squeak_and_peer(
# squeak_hash,
# peer_addresss,
# )
def get_received_offer(self, received_offer_id: int) -> Optional[ReceivedOffer]:
return self.squeak_db.get_received_offer(
received_offer_id)
def pay_offer(self, received_offer_id: int) -> int:
# Get the offer from the database
received_offer = self.squeak_db.get_received_offer(
received_offer_id)
if received_offer is None:
raise Exception("Received offer with id {} not found.".format(
received_offer_id,
))
logger.info("Paying received offer: {}".format(received_offer))
sent_payment = self.squeak_core.pay_offer(received_offer)
sent_payment_id = self.squeak_db.insert_sent_payment(sent_payment)
# # Delete the received offer
# self.squeak_db.delete_offer(sent_payment.payment_hash)
# Mark the received offer as paid
self.squeak_db.set_received_offer_paid(
sent_payment.payment_hash,
paid=True,
)
self.unlock_squeak(
received_offer.squeak_hash,
sent_payment.secret_key,
)
return sent_payment_id
def get_sent_payments(
self,
limit: int,
last_sent_payment: Optional[SentPayment],
) -> List[SentPayment]:
return self.squeak_db.get_sent_payments(
limit,
last_sent_payment,
)
def get_sent_payment(self, sent_payment_id: int) -> Optional[SentPayment]:
return self.squeak_db.get_sent_payment(sent_payment_id)
def get_sent_offers(self):
return self.squeak_db.get_sent_offers()
def get_received_payments(
self,
limit: int,
last_received_payment: Optional[ReceivedPayment],
) -> List[ReceivedPayment]:
return self.squeak_db.get_received_payments(
limit,
last_received_payment,
)
def delete_all_expired_offers(self):
self.delete_all_expired_received_offers()
self.delete_all_expired_sent_offers()
def delete_all_expired_received_offers(self):
received_offer_retention_s = self.config.node.received_offer_retention_s
num_expired_received_offers = self.squeak_db.delete_expired_received_offers(
received_offer_retention_s)
if num_expired_received_offers > 0:
logger.info("Deleted number of expired received offers: {}".format(
num_expired_received_offers))
def delete_all_expired_sent_offers(self):
sent_offer_retention_s = self.config.node.sent_offer_retention_s
num_expired_sent_offers = self.squeak_db.delete_expired_sent_offers(
sent_offer_retention_s,
)
if num_expired_sent_offers > 0:
logger.info(
"Deleted number of expired sent offers: {}".format(
num_expired_sent_offers)
)
def subscribe_received_payments(self, initial_index: int, stopped: threading.Event):
with ReceivedPaymentsSubscriptionClient(
self.squeak_db,
initial_index,
stopped,
).open_subscription() as client:
yield from client.get_received_payments()
def get_block_range(self) -> BlockRange:
max_block = self.squeak_core.get_best_block_height()
block_interval = self.config.node.interest_block_interval
min_block = max(0, max_block - block_interval)
return BlockRange(min_block, max_block)
def get_network(self) -> str:
return self.config.node.network
def get_squeak_entry(self, squeak_hash: bytes) -> Optional[SqueakEntry]:
return self.squeak_db.get_squeak_entry(squeak_hash)
def get_timeline_squeak_entries(
self,
limit: int,
last_entry: Optional[SqueakEntry],
) -> List[SqueakEntry]:
return self.squeak_db.get_timeline_squeak_entries(
limit,
last_entry,
)
def get_liked_squeak_entries(
self,
limit: int,
last_entry: Optional[SqueakEntry],
) -> List[SqueakEntry]:
return self.squeak_db.get_liked_squeak_entries(
limit,
last_entry,
)
def get_squeak_entries_for_address(
self,
address: str,
limit: int,
last_entry: Optional[SqueakEntry],
) -> List[SqueakEntry]:
return self.squeak_db.get_squeak_entries_for_address(
address,
limit,
last_entry,
)
def get_squeak_entries_for_text_search(
self,
search_text: str,
limit: int,
last_entry: Optional[SqueakEntry],
) -> List[SqueakEntry]:
return self.squeak_db.get_squeak_entries_for_text_search(
search_text,
limit,
last_entry,
)
def get_ancestor_squeak_entries(self, squeak_hash: bytes) -> List[SqueakEntry]:
return self.squeak_db.get_thread_ancestor_squeak_entries(
squeak_hash,
)
def get_reply_squeak_entries(
self,
squeak_hash: bytes,
limit: int,
last_entry: Optional[SqueakEntry],
) -> List[SqueakEntry]:
return self.squeak_db.get_thread_reply_squeak_entries(
squeak_hash,
limit,
last_entry,
)
def get_number_of_squeaks(self) -> int:
return self.squeak_db.get_number_of_squeaks()
def save_received_offer(self, offer: Offer, peer_address: PeerAddress) -> None:
squeak = self.get_squeak(offer.squeak_hash)
secret_key = self.get_squeak_secret_key(offer.squeak_hash)
if squeak is None or secret_key is not None:
return
try:
# TODO: Call unpack_offer with check_payment_point=True.
received_offer = self.squeak_core.unpack_offer(
squeak,
offer,
peer_address,
)
except Exception:
logger.exception("Failed to save received offer.")
return
received_offer_id = self.squeak_db.insert_received_offer(
received_offer)
if received_offer_id is None:
return
logger.info("Saved received offer: {}".format(received_offer))
counter = self.active_download_manager.lookup_counter(offer)
if counter is not None:
counter.increment()
received_offer = received_offer._replace(
received_offer_id=received_offer_id)
self.new_received_offer_listener.handle_new_item(received_offer)
def get_followed_addresses(self) -> List[str]:
followed_profiles = self.squeak_db.get_following_profiles()
return [profile.address for profile in followed_profiles]
def get_received_payment_summary(self) -> ReceivedPaymentSummary:
return self.squeak_db.get_received_payment_summary()
def get_sent_payment_summary(self) -> SentPaymentSummary:
return self.squeak_db.get_sent_payment_summary()
def reprocess_received_payments(self) -> None:
self.squeak_db.clear_received_payment_settle_indices()
self.payment_processor.start_processing()
def delete_old_squeaks(self):
squeaks_to_delete = self.squeak_db.get_old_squeaks_to_delete(
self.config.node.squeak_retention_s,
)
for squeak_hash in squeaks_to_delete:
self.squeak_db.delete_squeak(
squeak_hash,
)
logger.info("Deleted squeak: {}".format(
squeak_hash.hex(),
))
def like_squeak(self, squeak_hash: bytes):
logger.info("Liking squeak: {}".format(
squeak_hash.hex(),
))
self.squeak_db.set_squeak_liked(
squeak_hash,
)
def unlike_squeak(self, squeak_hash: bytes):
logger.info("Unliking squeak: {}".format(
squeak_hash.hex(),
))
self.squeak_db.set_squeak_unliked(
squeak_hash,
)
def connect_peer(self, peer_address: PeerAddress) -> None:
logger.info("Connect to peer: {}".format(
peer_address,
))
self.network_manager.connect_peer_sync(peer_address)
def connect_saved_peers(self) -> None:
peers = self.get_autoconnect_peers()
for peer in peers:
self.network_manager.connect_peer_async(
peer.address,
)
def get_connected_peer(self, peer_address: PeerAddress) -> Optional[ConnectedPeer]:
peer = self.network_manager.get_connected_peer(peer_address)
if peer is None:
return None
return ConnectedPeer(
peer=peer,
saved_peer=self.squeak_db.get_peer_by_address(
peer_address,
),
)
def get_connected_peers(self) -> List[ConnectedPeer]:
peers = self.network_manager.get_connected_peers()
return [
ConnectedPeer(
peer=peer,
saved_peer=self.squeak_db.get_peer_by_address(
peer.remote_address,
),
) for peer in peers
]
def lookup_squeaks(
self,
addresses: List[str],
min_block: Optional[int],
max_block: Optional[int],
reply_to_hash: Optional[bytes],
) -> List[bytes]:
return self.squeak_db.lookup_squeaks(
addresses,
min_block,
max_block,
reply_to_hash,
include_locked=True,
)
def lookup_secret_keys(
self,
addresses: List[str],
min_block: Optional[int],
max_block: Optional[int],
reply_to_hash: Optional[bytes],
) -> List[bytes]:
return self.squeak_db.lookup_squeaks(
addresses,
min_block,
max_block,
reply_to_hash,
)
def get_interested_locator(self) -> CSqueakLocator:
block_range = self.get_block_range()
followed_addresses = self.get_followed_addresses()
if len(followed_addresses) == 0:
return CSqueakLocator(
vInterested=[],
)
interests = [
CInterested(
addresses=[CSqueakAddress(address)
for address in followed_addresses],
nMinBlockHeight=block_range.min_block,
nMaxBlockHeight=block_range.max_block,
)
]
return CSqueakLocator(
vInterested=interests,
)
def download_squeaks(
self,
addresses: List[str],
min_block: int,
max_block: int,
replyto_hash: Optional[bytes],
) -> DownloadResult:
interest = CInterested(
addresses=[CSqueakAddress(address)
for address in addresses],
nMinBlockHeight=min_block,
nMaxBlockHeight=max_block,
replyto_squeak_hash=replyto_hash,
) if replyto_hash else CInterested(
addresses=[CSqueakAddress(address)
for address in addresses],
nMinBlockHeight=min_block,
nMaxBlockHeight=max_block,
)
return self.active_download_manager.download_interest(10, interest)
def download_single_squeak(self, squeak_hash: bytes) -> DownloadResult:
logger.info("Downloading single squeak: {}".format(
squeak_hash.hex(),
))
return self.active_download_manager.download_hash(squeak_hash)
def download_offers(self, squeak_hash: bytes) -> DownloadResult:
logger.info("Downloading offers for squeak: {}".format(
squeak_hash.hex(),
))
return self.active_download_manager.download_offers(10, squeak_hash)
def request_offers(self, squeak_hash: bytes):
logger.info("Requesting offers for squeak: {}".format(
squeak_hash.hex(),
))
invs = [
CInv(type=2, hash=squeak_hash)
]
getdata_msg = msg_getdata(inv=invs)
self.broadcast_msg(getdata_msg)
def download_replies(self, squeak_hash: bytes) -> DownloadResult:
logger.info("Downloading replies for squeak: {}".format(
squeak_hash.hex(),
))
interest = CInterested(
hashReplySqk=squeak_hash,
)
return self.active_download_manager.download_interest(10, interest)
def download_address_squeaks(self, squeak_address: str) -> DownloadResult:
logger.info("Downloading address squeaks for address: {}".format(
squeak_address,
))
interest = CInterested(
addresses=[CSqueakAddress(squeak_address)],
)
return self.active_download_manager.download_interest(10, interest)
def broadcast_msg(self, msg: MsgSerializable) -> int:
return self.network_manager.broadcast_msg(msg)
def disconnect_peer(self, peer_address: PeerAddress) -> None:
logger.info("Disconnect to peer: {}".format(
peer_address,
))
self.network_manager.disconnect_peer(peer_address)
def subscribe_connected_peers(self, stopped: threading.Event) -> Iterable[List[ConnectedPeer]]:
for peers in self.network_manager.subscribe_connected_peers(stopped):
yield [
ConnectedPeer(
peer=peer,
saved_peer=self.squeak_db.get_peer_by_address(
peer.remote_address,
),
) for peer in peers
]
def subscribe_connected_peer(self, peer_address: PeerAddress, stopped: threading.Event) -> Iterable[Optional[ConnectedPeer]]:
for peer in self.network_manager.subscribe_connected_peer(peer_address, stopped):
if peer is None:
yield None
else:
yield ConnectedPeer(
peer=peer,
saved_peer=self.squeak_db.get_peer_by_address(
peer.remote_address,
),
)
def subscribe_new_squeaks(self, stopped: threading.Event):
yield from self.new_squeak_listener.yield_items(stopped)
def subscribe_new_secret_keys(self, stopped: threading.Event):
yield from self.new_secret_key_listener.yield_items(stopped)
def subscribe_follows(self, stopped: threading.Event):
yield from self.new_follow_listener.yield_items(stopped)
def update_subscriptions(self):
locator = self.get_interested_locator()
self.network_manager.update_local_subscriptions(locator)
def create_update_subscriptions_event(self):
self.new_follow_listener.handle_new_item(UpdateSubscriptionsEvent())
def subscribe_received_offers_for_squeak(self, squeak_hash: bytes, stopped: threading.Event):
for received_offer in self.new_received_offer_listener.yield_items(stopped):
if received_offer.squeak_hash == squeak_hash:
yield received_offer
def subscribe_squeak_entry(self, squeak_hash: bytes, stopped: threading.Event):
for item in self.new_squeak_listener.yield_items(stopped):
if squeak_hash == get_hash(item):
yield self.get_squeak_entry(squeak_hash)
def subscribe_squeak_reply_entries(self, squeak_hash: bytes, stopped: threading.Event):
for item in self.new_squeak_listener.yield_items(stopped):
if squeak_hash == item.hashReplySqk:
reply_hash = get_hash(item)
yield self.get_squeak_entry(reply_hash)
def subscribe_squeak_address_entries(self, squeak_address: str, stopped: threading.Event):
for item in self.new_squeak_listener.yield_items(stopped):
if squeak_address == str(item.GetAddress()):
squeak_hash = get_hash(item)
yield self.get_squeak_entry(squeak_hash)
def subscribe_squeak_ancestor_entries(self, squeak_hash: bytes, stopped: threading.Event):
for item in self.new_squeak_listener.yield_items(stopped):
if squeak_hash == get_hash(item):
yield self.get_ancestor_squeak_entries(squeak_hash)
def subscribe_squeak_entries(self, stopped: threading.Event):
for item in self.new_squeak_listener.yield_items(stopped):
squeak_hash = get_hash(item)
yield self.get_squeak_entry(squeak_hash)
def subscribe_timeline_squeak_entries(self, stopped: threading.Event):
for item in self.new_squeak_listener.yield_items(stopped):
followed_addresses = self.get_followed_addresses()
if str(item.GetAddress()) in set(followed_addresses):
squeak_hash = get_hash(item)
yield self.get_squeak_entry(squeak_hash)
def get_external_address(self) -> PeerAddress:
return self.network_manager.external_address
def get_default_peer_port(self) -> int:
return squeak.params.params.DEFAULT_PORT
def forward_squeak(self, squeak):
logger.debug("Forward new squeak: {!r}".format(
get_hash(squeak).hex(),
))
for peer in self.network_manager.get_connected_peers():
if peer.is_remote_subscribed(squeak):
logger.debug("Forwarding to peer: {}".format(
peer,
))
squeak_hash = get_hash(squeak)
inv = CInv(type=MSG_SQUEAK, hash=squeak_hash)
inv_msg = msg_inv(inv=[inv])
peer.send_msg(inv_msg)
logger.debug("Finished checking peers to forward.")
def forward_secret_key(self, squeak):
logger.debug("Forward new secret key for hash: {!r}".format(
get_hash(squeak).hex(),
))
for peer in self.network_manager.get_connected_peers():
if peer.is_remote_subscribed(squeak):
logger.debug("Forwarding to peer: {}".format(
peer,
))
squeak_hash = get_hash(squeak)
inv = CInv(type=MSG_SECRET_KEY, hash=squeak_hash)
inv_msg = msg_inv(inv=[inv])
peer.send_msg(inv_msg)
logger.debug("Finished checking peers to forward.")
| en | 0.745344 | # MIT License # # Copyright (c) 2020 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # self.temporary_interest_manager = TemporaryInterestManager() # Check if the squeak is valid # Get the block header for the squeak. # Check if limit exceeded. # Insert the squeak in db. # Notify the listener # Notify the listener # Try saving squeak as active download Save the given squeak as a result of an active download. Returns: bytes: the hash of the saved squeak. Save the given squeak because it matches the followed interest criteria. Returns: bytes: the hash of the saved squeak. # return self.temporary_interest_manager.lookup_counter(squeak) # Check if there is an existing offer for the hash/peer_address combination # def get_received_offer_for_squeak_and_peer( # self, # squeak_hash: bytes, # peer_addresss: PeerAddress, # ) -> Optional[ReceivedOffer]: # return self.squeak_db.get_received_offer_for_squeak_and_peer( # squeak_hash, # peer_addresss, # ) # Get the offer from the database # # Delete the received offer # self.squeak_db.delete_offer(sent_payment.payment_hash) # Mark the received offer as paid # TODO: Call unpack_offer with check_payment_point=True. | 1.410707 | 1 |
weatherfood/application.py | leeb24/CS411-s18 | 0 | 6632941 | <reponame>leeb24/CS411-s18
import flask
import flask_bootstrap
import flask_oauth2_login
app = flask.Flask(__name__)
app.config.from_object('config')
flask_bootstrap.Bootstrap(app)
google_login = flask_oauth2_login.GoogleLogin(app)
| import flask
import flask_bootstrap
import flask_oauth2_login
app = flask.Flask(__name__)
app.config.from_object('config')
flask_bootstrap.Bootstrap(app)
google_login = flask_oauth2_login.GoogleLogin(app) | none | 1 | 1.926366 | 2 |
|
dials/models.py | daonb/Open-Knesset | 69 | 6632942 | from django.db import models
class Dial(models.Model):
precent = models.IntegerField(default=0)
slug = models.SlugField(max_length=1000)
description = models.TextField(null=True, blank=True)
updated = models.DateTimeField(auto_now=True)
| from django.db import models
class Dial(models.Model):
precent = models.IntegerField(default=0)
slug = models.SlugField(max_length=1000)
description = models.TextField(null=True, blank=True)
updated = models.DateTimeField(auto_now=True)
| none | 1 | 2.120676 | 2 |
|
SystemTests/bin/das_logfile_parser.py | vkuznet/DBS | 8 | 6632943 | <filename>SystemTests/bin/das_logfile_parser.py
#!/usr/bin/env python
from __future__ import print_function
from optparse import OptionParser
import json, os, re, sys
def get_command_line_options(executable_name, arguments):
parser = OptionParser(usage="%s options" % executable_name)
parser.add_option("-i", "--in", type="string", dest="input", help="Input DAS Logfile")
parser.add_option("-o", "--out", type="string", dest="output", help="Output JSON")
(options, args) = parser.parse_args()
error_msg = """You need to provide following options, --in=input.txt (mandatory), --out=output.json (optional)\n"""
if not options.input:
parser.print_help()
parser.error(error_msg)
return options
def replace_values(log_entry):
if log_entry["user"] == "-":
log_entry["user"] = None
log_entry["status"] = int(log_entry["status"])
log_entry["size"] = 0 if log_entry["size"] == "-" else int(log_entry["size"])
if log_entry["referer"] == "-":
log_entry["referer"] = None
return log_entry
### using regular expressions from http://www.seehuhn.de/blog/52 by <NAME> (Creative Common License)
log_parts = [r'(?P<host>\S+)', # host %h
r'\S+', # indent %l (unused)
r'(?P<user>\S+)', # user %u
r'\[(?P<time>.+)\]', # time %t
r'"(?P<request>.+)"', # request "%r"
r'(?P<status>[0-9]+)', # status %>s
r'(?P<size>\S+)', # size %b (careful, can be '-')
r'"(?P<referer>.*)"', # referer "%{Referer}i"
r'"(?P<agent>.*)"', # user agent "%{User-agent}i"
]
log_pattern = re.compile(r'\s+'.join(log_parts)+r'\s*\Z')
options = get_command_line_options(os.path.basename(__file__), sys.argv)
log_entries = []
with open(options.input, 'r') as f:
for line in f:
match_obj = log_pattern.match(line)
log_entries.append(replace_values(match_obj.groupdict()))
if options.output:
with open(options.output, 'w') as f:
json.dump(log_entries, f)
else:
print(log_entries)
| <filename>SystemTests/bin/das_logfile_parser.py
#!/usr/bin/env python
from __future__ import print_function
from optparse import OptionParser
import json, os, re, sys
def get_command_line_options(executable_name, arguments):
parser = OptionParser(usage="%s options" % executable_name)
parser.add_option("-i", "--in", type="string", dest="input", help="Input DAS Logfile")
parser.add_option("-o", "--out", type="string", dest="output", help="Output JSON")
(options, args) = parser.parse_args()
error_msg = """You need to provide following options, --in=input.txt (mandatory), --out=output.json (optional)\n"""
if not options.input:
parser.print_help()
parser.error(error_msg)
return options
def replace_values(log_entry):
if log_entry["user"] == "-":
log_entry["user"] = None
log_entry["status"] = int(log_entry["status"])
log_entry["size"] = 0 if log_entry["size"] == "-" else int(log_entry["size"])
if log_entry["referer"] == "-":
log_entry["referer"] = None
return log_entry
### using regular expressions from http://www.seehuhn.de/blog/52 by <NAME> (Creative Common License)
log_parts = [r'(?P<host>\S+)', # host %h
r'\S+', # indent %l (unused)
r'(?P<user>\S+)', # user %u
r'\[(?P<time>.+)\]', # time %t
r'"(?P<request>.+)"', # request "%r"
r'(?P<status>[0-9]+)', # status %>s
r'(?P<size>\S+)', # size %b (careful, can be '-')
r'"(?P<referer>.*)"', # referer "%{Referer}i"
r'"(?P<agent>.*)"', # user agent "%{User-agent}i"
]
log_pattern = re.compile(r'\s+'.join(log_parts)+r'\s*\Z')
options = get_command_line_options(os.path.basename(__file__), sys.argv)
log_entries = []
with open(options.input, 'r') as f:
for line in f:
match_obj = log_pattern.match(line)
log_entries.append(replace_values(match_obj.groupdict()))
if options.output:
with open(options.output, 'w') as f:
json.dump(log_entries, f)
else:
print(log_entries)
| en | 0.570474 | #!/usr/bin/env python You need to provide following options, --in=input.txt (mandatory), --out=output.json (optional)\n ### using regular expressions from http://www.seehuhn.de/blog/52 by <NAME> (Creative Common License) # host %h # indent %l (unused) # user %u # time %t # request "%r" # status %>s # size %b (careful, can be '-') # referer "%{Referer}i" # user agent "%{User-agent}i" | 2.618254 | 3 |
Homework_4/D_Keyboard/D_Keyboard.py | dimk00z/summer_yandex_algorithmic_course | 8 | 6632944 | with open('input.txt') as file:
lines = file.readlines()
keyboard_buttons = int(lines[0])
buttons = {str(button_position+1): int(presses)
for button_position, presses in enumerate(lines[1].split())}
presses_number = int(lines[2])
for press in lines[3].split():
buttons[press] -= 1
result = []
for button_position in range(keyboard_buttons):
is_broken = 'NO' if buttons[str(button_position+1)] >= 0 else "YES"
result.append(is_broken)
with open('output.txt', 'w') as file:
file.write('\n'.join(result))
| with open('input.txt') as file:
lines = file.readlines()
keyboard_buttons = int(lines[0])
buttons = {str(button_position+1): int(presses)
for button_position, presses in enumerate(lines[1].split())}
presses_number = int(lines[2])
for press in lines[3].split():
buttons[press] -= 1
result = []
for button_position in range(keyboard_buttons):
is_broken = 'NO' if buttons[str(button_position+1)] >= 0 else "YES"
result.append(is_broken)
with open('output.txt', 'w') as file:
file.write('\n'.join(result))
| none | 1 | 3.33559 | 3 |
|
edl/records.py | jmeppley/py-metagenomics | 7 | 6632945 | from edl.util import parse_list_to_set
def recordIterator(stream, separatorRE, idRE=None):
"""
Given:
na file-like object (any iterator over strings)
1 or 2 regular expressions that define record boundaries
and identifiers
Return:
an iterator over records that returns a tuple of (id, [recordLines])
If only a separator given, it is assumed to match the record id
"""
recordId = None
recordLines = []
for line in stream:
m = separatorRE.search(line)
if m:
# is there a previous record?
if recordId is not None:
yield (recordId, recordLines)
recordId = None
recordLines = [line, ]
if idRE is None:
recordId = m.group(1)
continue
recordLines.append(line)
if idRE is not None:
m = idRE.search(line)
if m:
recordId = m.group(1)
if recordId is not None:
yield (recordId, recordLines)
def screenRecords(
stream,
separatorRE,
idRE=None,
keep=False,
screen_set=None,
screenFile=None):
"""
uses recordIterator(strean, separatorRE, idRE) to parse input into records
uses screen_set (can be read from screenFile) to identify records
identified records are kept or skipped based on the value of keep
"""
if screen_set is None:
if screenFile is None:
raise Exception(
"Please supply a hash(Python map) or file of record keys")
else:
screen_set = parse_list_to_set(screenFile)
for (
recordId,
recordLines) in recordIterator(
stream,
separatorRE,
idRE=idRE):
screened = recordId in screen_set
if screened == keep:
for line in recordLines:
yield line
| from edl.util import parse_list_to_set
def recordIterator(stream, separatorRE, idRE=None):
"""
Given:
na file-like object (any iterator over strings)
1 or 2 regular expressions that define record boundaries
and identifiers
Return:
an iterator over records that returns a tuple of (id, [recordLines])
If only a separator given, it is assumed to match the record id
"""
recordId = None
recordLines = []
for line in stream:
m = separatorRE.search(line)
if m:
# is there a previous record?
if recordId is not None:
yield (recordId, recordLines)
recordId = None
recordLines = [line, ]
if idRE is None:
recordId = m.group(1)
continue
recordLines.append(line)
if idRE is not None:
m = idRE.search(line)
if m:
recordId = m.group(1)
if recordId is not None:
yield (recordId, recordLines)
def screenRecords(
stream,
separatorRE,
idRE=None,
keep=False,
screen_set=None,
screenFile=None):
"""
uses recordIterator(strean, separatorRE, idRE) to parse input into records
uses screen_set (can be read from screenFile) to identify records
identified records are kept or skipped based on the value of keep
"""
if screen_set is None:
if screenFile is None:
raise Exception(
"Please supply a hash(Python map) or file of record keys")
else:
screen_set = parse_list_to_set(screenFile)
for (
recordId,
recordLines) in recordIterator(
stream,
separatorRE,
idRE=idRE):
screened = recordId in screen_set
if screened == keep:
for line in recordLines:
yield line
| en | 0.789396 | Given: na file-like object (any iterator over strings) 1 or 2 regular expressions that define record boundaries and identifiers Return: an iterator over records that returns a tuple of (id, [recordLines]) If only a separator given, it is assumed to match the record id # is there a previous record? uses recordIterator(strean, separatorRE, idRE) to parse input into records uses screen_set (can be read from screenFile) to identify records identified records are kept or skipped based on the value of keep | 3.559791 | 4 |
strawberry/schema/types/scalar.py | matEhickey/strawberry | 0 | 6632946 | import datetime
import decimal
from typing import Dict, Type, cast
from uuid import UUID
from graphql import (
GraphQLBoolean,
GraphQLFloat,
GraphQLID,
GraphQLInt,
GraphQLScalarType,
GraphQLString,
)
from strawberry.custom_scalar import SCALAR_REGISTRY, ScalarDefinition
from strawberry.file_uploads.scalars import Upload
from strawberry.scalars import ID
from .base_scalars import Date, DateTime, Decimal, Time
from .types import ConcreteType, TypeMap
def _make_scalar_type(definition: ScalarDefinition) -> GraphQLScalarType:
return GraphQLScalarType(
name=definition.name,
description=definition.description,
serialize=definition.serialize,
parse_value=definition.parse_value,
parse_literal=definition.parse_literal,
)
DEFAULT_SCALAR_REGISTRY: Dict[Type, GraphQLScalarType] = {
str: GraphQLString,
int: GraphQLInt,
float: GraphQLFloat,
bool: GraphQLBoolean,
ID: GraphQLID,
UUID: GraphQLID,
Upload: _make_scalar_type(Upload._scalar_definition),
datetime.date: _make_scalar_type(Date._scalar_definition),
datetime.datetime: _make_scalar_type(DateTime._scalar_definition),
datetime.time: _make_scalar_type(Time._scalar_definition),
decimal.Decimal: _make_scalar_type(Decimal._scalar_definition),
}
def get_scalar_type(annotation: Type, type_map: TypeMap) -> GraphQLScalarType:
if annotation in DEFAULT_SCALAR_REGISTRY:
return DEFAULT_SCALAR_REGISTRY[annotation]
if annotation in SCALAR_REGISTRY:
scalar_definition = SCALAR_REGISTRY[annotation]
else:
scalar_definition = annotation._scalar_definition
if scalar_definition.name not in type_map:
type_map[scalar_definition.name] = ConcreteType(
definition=scalar_definition,
implementation=_make_scalar_type(scalar_definition),
)
return cast(GraphQLScalarType, type_map[scalar_definition.name].implementation)
| import datetime
import decimal
from typing import Dict, Type, cast
from uuid import UUID
from graphql import (
GraphQLBoolean,
GraphQLFloat,
GraphQLID,
GraphQLInt,
GraphQLScalarType,
GraphQLString,
)
from strawberry.custom_scalar import SCALAR_REGISTRY, ScalarDefinition
from strawberry.file_uploads.scalars import Upload
from strawberry.scalars import ID
from .base_scalars import Date, DateTime, Decimal, Time
from .types import ConcreteType, TypeMap
def _make_scalar_type(definition: ScalarDefinition) -> GraphQLScalarType:
return GraphQLScalarType(
name=definition.name,
description=definition.description,
serialize=definition.serialize,
parse_value=definition.parse_value,
parse_literal=definition.parse_literal,
)
DEFAULT_SCALAR_REGISTRY: Dict[Type, GraphQLScalarType] = {
str: GraphQLString,
int: GraphQLInt,
float: GraphQLFloat,
bool: GraphQLBoolean,
ID: GraphQLID,
UUID: GraphQLID,
Upload: _make_scalar_type(Upload._scalar_definition),
datetime.date: _make_scalar_type(Date._scalar_definition),
datetime.datetime: _make_scalar_type(DateTime._scalar_definition),
datetime.time: _make_scalar_type(Time._scalar_definition),
decimal.Decimal: _make_scalar_type(Decimal._scalar_definition),
}
def get_scalar_type(annotation: Type, type_map: TypeMap) -> GraphQLScalarType:
if annotation in DEFAULT_SCALAR_REGISTRY:
return DEFAULT_SCALAR_REGISTRY[annotation]
if annotation in SCALAR_REGISTRY:
scalar_definition = SCALAR_REGISTRY[annotation]
else:
scalar_definition = annotation._scalar_definition
if scalar_definition.name not in type_map:
type_map[scalar_definition.name] = ConcreteType(
definition=scalar_definition,
implementation=_make_scalar_type(scalar_definition),
)
return cast(GraphQLScalarType, type_map[scalar_definition.name].implementation)
| none | 1 | 2.141537 | 2 |
|
bin/analysis/ipa/model/object.py | ncbray/pystream | 6 | 6632947 | <gh_stars>1-10
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from language.python import program
from .. constraints import node, qualifiers
from ..escape import objectescape
class Object(object):
__slots__ = 'context', 'name', 'fields', 'flags', 'dirty'
def __init__(self, context, name):
self.context = context
self.name = name
self.fields = {}
self.flags = 0
self.dirty = False
if name.qualifier is qualifiers.DN:
self.flags |= objectescape.escapeParam
elif name.qualifier is qualifiers.GLBL:
self.flags |= objectescape.escapeGlobal
def updateFlags(self, context, flags):
diff = ~self.flags & flags
if diff:
self.flags |= diff
if not self.dirty:
self.dirty = True
context.dirtyObject(self)
def initDownwardField(self, slot, fieldtype, name):
for invoke in self.context.invokeIn.itervalues():
invoke.copyFieldFromSources(slot, self.name, fieldtype, name)
def initExistingField(self, slot, fieldtype, fieldname):
analysis = self.context.analysis
if fieldtype == 'LowLevel' and fieldname.pyobj == 'type':
ao = analysis.existingPolicy.typeObject(analysis, self.name)
values, null = [ao], False
elif self.name.xtype.isExternal():
values, null = analysis.externalPolicy.fieldValues(analysis, slot, self.name, fieldtype, fieldname)
else:
values, null = analysis.existingPolicy.fieldValues(analysis, slot, self.name, fieldtype, fieldname)
if values: slot.updateValues(frozenset(values))
if null: slot.markNull()
def field(self, fieldType, name):
assert isinstance(fieldType, str), fieldType
assert isinstance(name, program.AbstractObject), name
key = (fieldType, name)
if key not in self.fields:
result = node.ConstraintNode(self.context, (self.name, fieldType, name))
self.fields[key] = result
if self.context.external:
self.initExistingField(result, fieldType, name)
elif self.name.qualifier is qualifiers.GLBL:
self.initExistingField(result, fieldType, name) # HACK unsound
elif self.name.qualifier is qualifiers.DN:
self.initDownwardField(result, fieldType, name)
else:
result.markNull()
else:
result = self.fields[key]
return result
| # Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from language.python import program
from .. constraints import node, qualifiers
from ..escape import objectescape
class Object(object):
__slots__ = 'context', 'name', 'fields', 'flags', 'dirty'
def __init__(self, context, name):
self.context = context
self.name = name
self.fields = {}
self.flags = 0
self.dirty = False
if name.qualifier is qualifiers.DN:
self.flags |= objectescape.escapeParam
elif name.qualifier is qualifiers.GLBL:
self.flags |= objectescape.escapeGlobal
def updateFlags(self, context, flags):
diff = ~self.flags & flags
if diff:
self.flags |= diff
if not self.dirty:
self.dirty = True
context.dirtyObject(self)
def initDownwardField(self, slot, fieldtype, name):
for invoke in self.context.invokeIn.itervalues():
invoke.copyFieldFromSources(slot, self.name, fieldtype, name)
def initExistingField(self, slot, fieldtype, fieldname):
analysis = self.context.analysis
if fieldtype == 'LowLevel' and fieldname.pyobj == 'type':
ao = analysis.existingPolicy.typeObject(analysis, self.name)
values, null = [ao], False
elif self.name.xtype.isExternal():
values, null = analysis.externalPolicy.fieldValues(analysis, slot, self.name, fieldtype, fieldname)
else:
values, null = analysis.existingPolicy.fieldValues(analysis, slot, self.name, fieldtype, fieldname)
if values: slot.updateValues(frozenset(values))
if null: slot.markNull()
def field(self, fieldType, name):
assert isinstance(fieldType, str), fieldType
assert isinstance(name, program.AbstractObject), name
key = (fieldType, name)
if key not in self.fields:
result = node.ConstraintNode(self.context, (self.name, fieldType, name))
self.fields[key] = result
if self.context.external:
self.initExistingField(result, fieldType, name)
elif self.name.qualifier is qualifiers.GLBL:
self.initExistingField(result, fieldType, name) # HACK unsound
elif self.name.qualifier is qualifiers.DN:
self.initDownwardField(result, fieldType, name)
else:
result.markNull()
else:
result = self.fields[key]
return result | en | 0.846806 | # Copyright 2011 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # HACK unsound | 1.805449 | 2 |
python/ray/util/collective/collective_group/nccl_collective_group.py | coreylowman/ray | 1 | 6632948 | import logging
import datetime
import time
import ray
import cupy
from ray.util.collective.collective_group import nccl_util
from ray.util.collective.collective_group.base_collective_group \
import BaseGroup
from ray.util.collective.types import AllReduceOptions, \
BarrierOptions, Backend, ReduceOptions, BroadcastOptions, \
AllGatherOptions, ReduceScatterOptions
from ray.util.collective.const import get_nccl_store_name
logger = logging.getLogger(__name__)
# TODO(Hao):
# (1) stream management, instead of using the default stream,
# using a dedicate stream
# (2) communicator management and support num_gpus > 2 per actor.
class Rendezvous:
"""A rendezvous class for different actor/task processes to meet.
To initialize an NCCL collective communication group, different
actors/tasks spawned in Ray in a collective group needs to meet
each other to synchronize the NCCLUniqueID. This class guarantees
they meet via the NCCLUniqueIDStore, initialized on the rank=0
process.
Args:
group_name (str): the unique user-specified group name.
"""
def __init__(self, group_name):
if not group_name:
raise ValueError("Invalid group name.")
self._group_name = group_name
self._store_name = None
self._store = None
def meet(self, timeout_s=180):
"""Meet at the named actor store.
Args:
timeout_s: timeout in seconds.
Return:
None
"""
if timeout_s <= 0:
raise ValueError("The 'timeout' argument must be positive. "
"Got '{}'.".format(timeout_s))
self._store_name = get_nccl_store_name(self._group_name)
timeout_delta = datetime.timedelta(seconds=timeout_s)
elapsed = datetime.timedelta(seconds=0)
start_time = datetime.datetime.now()
while elapsed < timeout_delta:
try:
logger.debug("Trying to meet at the store '{}'".format(
self._store_name))
self._store = ray.get_actor(self._store_name)
except ValueError:
logger.debug("Failed to meet at the store '{}'."
"Trying again...".format(self._store_name))
time.sleep(1)
elapsed = datetime.datetime.now() - start_time
continue
logger.debug("Successful rendezvous!")
break
if not self._store:
raise RuntimeError("Unable to meet other processes "
"at the rendezvous store.")
@property
def store(self):
return self._store
def get_nccl_id(self, timeout_s=180):
"""Get the NCCLUniqueID from the store through Ray.
Args:
timeout_s: timeout in seconds.
Return:
str: the NCCLUniqueID if successful.
"""
if not self._store:
raise ValueError("Rendezvous store is not setup.")
uid = None
timeout_delta = datetime.timedelta(seconds=timeout_s)
elapsed = datetime.timedelta(seconds=0)
start_time = datetime.datetime.now()
while elapsed < timeout_delta:
uid = ray.get(self._store.get_id.remote())
if not uid:
time.sleep(1)
elapsed = datetime.datetime.now() - start_time
continue
break
if not uid:
raise RuntimeError(
"Unable to get the NCCLUniqueID from the store.")
return uid
class NCCLGroup(BaseGroup):
def __init__(self, world_size, rank, group_name):
"""Init an NCCL collective group."""
super(NCCLGroup, self).__init__(world_size, rank, group_name)
self._nccl_uid = None
# TODO(Hao): change this to a be a cache
self._nccl_comm = None
if nccl_util.get_nccl_build_version() < 2000:
raise RuntimeError("NCCL in Ray requires NCCL >= 2.0.")
# TODO(Hao): check version here
if nccl_util.get_nccl_runtime_version() < 2704:
logger.warning("NCCL send/recv calls requires NCCL>=2.7.4")
self._rendezvous = Rendezvous(self.group_name)
self._rendezvous.meet()
# Setup the nccl uid using the store
self._init_nccl_unique_id()
# Setup a tensor for barrier calls
self._barrier_tensor = cupy.array([1])
def _init_nccl_unique_id(self):
"""Init the NCCLUniqueID required for creating NCCL communicators."""
self._nccl_uid = self._rendezvous.get_nccl_id()
@property
def nccl_uid(self):
return self._nccl_uid
def destroy_group(self):
"""Destroy the group and release the NCCL communicators safely."""
if self._nccl_comm is not None:
self.barrier()
# We also need a barrier call here.
stream = self._get_cuda_stream()
stream.synchronize()
# destroy the communicator
self._nccl_comm.destroy()
self._nccl_comm = None
super(NCCLGroup, self).destroy_group()
@classmethod
def backend(cls):
return Backend.NCCL
def allreduce(self, tensor, allreduce_options=AllReduceOptions()):
"""AllReduce the tensor across the collective group following options.
Args:
tensor: the tensor to be reduced, each tensor locates on a GPU
allreduce_options:
Returns:
"""
# obtain the communicator
comm = self._get_nccl_communicator()
# obtain the stream: using default stream by now
# TODO(Hao): implement a simple stream manager here
stream = self._get_cuda_stream()
dtype = nccl_util.get_nccl_tensor_dtype(tensor)
ptr = nccl_util.get_tensor_ptr(tensor)
n_elems = nccl_util.get_tensor_n_elements(tensor)
reduce_op = nccl_util.get_nccl_reduce_op(allreduce_options.reduceOp)
# in-place allreduce
comm.allReduce(ptr, ptr, n_elems, dtype, reduce_op, stream.ptr)
def barrier(self, barrier_options=BarrierOptions()):
"""Blocks until all processes reach this barrier.
Args:
barrier_options:
Returns:
"""
self.allreduce(self._barrier_tensor)
def reduce(self, tensor, reduce_options=ReduceOptions()):
"""Reduce tensor to a destination process following options.
Args:
tensor: the tensor to be reduced.
reduce_options: reduce options
Returns:
None
"""
comm = self._get_nccl_communicator()
stream = self._get_cuda_stream()
dtype = nccl_util.get_nccl_tensor_dtype(tensor)
ptr = nccl_util.get_tensor_ptr(tensor)
n_elems = nccl_util.get_tensor_n_elements(tensor)
reduce_op = nccl_util.get_nccl_reduce_op(reduce_options.reduceOp)
# in-place reduce
comm.reduce(ptr, ptr, n_elems, dtype, reduce_op,
reduce_options.root_rank, stream.ptr)
def broadcast(self, tensor, broadcast_options=BroadcastOptions()):
"""Broadcast tensor to all other processes following options.
Args:
tensor: the tensor to be broadcasted.
broadcast_options: broadcast options.
Returns:
None
"""
comm = self._get_nccl_communicator()
stream = self._get_cuda_stream()
dtype = nccl_util.get_nccl_tensor_dtype(tensor)
ptr = nccl_util.get_tensor_ptr(tensor)
n_elems = nccl_util.get_tensor_n_elements(tensor)
# in-place broadcast
comm.broadcast(ptr, ptr, n_elems, dtype, broadcast_options.root_rank,
stream.ptr)
def allgather(self,
tensor_list,
tensor,
allgather_options=AllGatherOptions()):
"""Allgather tensors across the group into a list of tensors.
Args:
tensor_list: the tensor list to store the results.
tensor: the tensor to be allgather-ed across the group.
allgather_options: allgather options.
Returns:
None
"""
_check_inputs_compatibility_for_scatter_gather(tensor, tensor_list)
comm = self._get_nccl_communicator()
stream = self._get_cuda_stream()
dtype = nccl_util.get_nccl_tensor_dtype(tensor)
send_ptr = nccl_util.get_tensor_ptr(tensor)
n_elems = nccl_util.get_tensor_n_elements(tensor)
flattened = _flatten_for_scatter_gather(tensor_list, copy=False)
recv_ptr = nccl_util.get_tensor_ptr(flattened)
comm.allGather(send_ptr, recv_ptr, n_elems, dtype, stream.ptr)
for i, t in enumerate(tensor_list):
nccl_util.copy_tensor(t, flattened[i])
def reducescatter(self,
tensor,
tensor_list,
reducescatter_options=ReduceScatterOptions()):
"""Reducescatter a list of tensors across the group.
Args:
tensor: the output after reducescatter (could be unspecified).
tensor_list: the list of tensor to be reduce and scattered.
reducescatter_options: reducescatter options.
Returns:
None
"""
_check_inputs_compatibility_for_scatter_gather(tensor, tensor_list)
comm = self._get_nccl_communicator()
stream = self._get_cuda_stream()
dtype = nccl_util.get_nccl_tensor_dtype(tensor_list[0])
n_elems = nccl_util.get_tensor_n_elements(tensor_list[0])
reduce_op = nccl_util.get_nccl_reduce_op(
reducescatter_options.reduceOp)
# get the send_ptr
flattened = _flatten_for_scatter_gather(tensor_list, copy=True)
send_ptr = nccl_util.get_tensor_ptr(flattened)
recv_ptr = nccl_util.get_tensor_ptr(tensor)
comm.reduceScatter(send_ptr, recv_ptr, n_elems, dtype, reduce_op,
stream.ptr)
def _get_nccl_communicator(self):
"""Create or use a cached NCCL communicator for the collective task.
"""
# TODO(Hao): later change this to use device keys and query from cache.
# TODO(Hao): implement a thin wrapper
if not self._nccl_comm:
self._nccl_comm = nccl_util.create_nccl_communicator(
self.world_size, self.nccl_uid, self.rank)
return self._nccl_comm
@staticmethod
def _get_cuda_stream():
"""Obtain an idle stream from a stream pool for the collective task."""
# TODO: implement a simple stream manager.
return cupy.cuda.Stream.null
# def _collective_call(self, *args):
# """Private method to encapsulate all collective calls"""
# pass
def _flatten_for_scatter_gather(tensor_list, copy=False):
"""Flatten the tensor for gather/scatter operations.
Args:
tensor_list: the list of tensors to be scattered/gathered.
copy: whether the copy the tensors in tensor_list into the buffer.
Returns:
The flattened tensor buffer.
"""
if not tensor_list:
raise RuntimeError("Received an empty list.")
t = tensor_list[0]
# note we need a cupy dtype here.
dtype = nccl_util.get_cupy_tensor_dtype(t)
buffer_shape = [len(tensor_list)] + nccl_util.get_tensor_shape(t)
buffer = cupy.empty(buffer_shape, dtype=dtype)
if copy:
for i, tensor in enumerate(tensor_list):
nccl_util.copy_tensor(buffer[i], tensor)
return buffer
def _check_inputs_compatibility_for_scatter_gather(tensor, tensor_list):
"""Check the compatibility between tensor input and tensor list inputs."""
if not tensor_list:
raise RuntimeError("Got empty list of tensors.")
dtype = nccl_util.get_nccl_tensor_dtype(tensor)
shape = nccl_util.get_tensor_shape(tensor)
for t in tensor_list:
# check dtype
dt = nccl_util.get_nccl_tensor_dtype(t)
if dt != dtype:
raise RuntimeError("All tensor operands to scatter/gather must "
"have the same dtype. Got '{}' and '{}'"
"".format(dt, dtype))
# Note: typically CCL libraries only requires they have the same
# number of elements;
# Here we make it more strict -- we require exact shape match.
if nccl_util.get_tensor_shape(t) != shape:
raise RuntimeError("All tensor operands to scatter/gather must "
"have the same shape.")
| import logging
import datetime
import time
import ray
import cupy
from ray.util.collective.collective_group import nccl_util
from ray.util.collective.collective_group.base_collective_group \
import BaseGroup
from ray.util.collective.types import AllReduceOptions, \
BarrierOptions, Backend, ReduceOptions, BroadcastOptions, \
AllGatherOptions, ReduceScatterOptions
from ray.util.collective.const import get_nccl_store_name
logger = logging.getLogger(__name__)
# TODO(Hao):
# (1) stream management, instead of using the default stream,
# using a dedicate stream
# (2) communicator management and support num_gpus > 2 per actor.
class Rendezvous:
"""A rendezvous class for different actor/task processes to meet.
To initialize an NCCL collective communication group, different
actors/tasks spawned in Ray in a collective group needs to meet
each other to synchronize the NCCLUniqueID. This class guarantees
they meet via the NCCLUniqueIDStore, initialized on the rank=0
process.
Args:
group_name (str): the unique user-specified group name.
"""
def __init__(self, group_name):
if not group_name:
raise ValueError("Invalid group name.")
self._group_name = group_name
self._store_name = None
self._store = None
def meet(self, timeout_s=180):
"""Meet at the named actor store.
Args:
timeout_s: timeout in seconds.
Return:
None
"""
if timeout_s <= 0:
raise ValueError("The 'timeout' argument must be positive. "
"Got '{}'.".format(timeout_s))
self._store_name = get_nccl_store_name(self._group_name)
timeout_delta = datetime.timedelta(seconds=timeout_s)
elapsed = datetime.timedelta(seconds=0)
start_time = datetime.datetime.now()
while elapsed < timeout_delta:
try:
logger.debug("Trying to meet at the store '{}'".format(
self._store_name))
self._store = ray.get_actor(self._store_name)
except ValueError:
logger.debug("Failed to meet at the store '{}'."
"Trying again...".format(self._store_name))
time.sleep(1)
elapsed = datetime.datetime.now() - start_time
continue
logger.debug("Successful rendezvous!")
break
if not self._store:
raise RuntimeError("Unable to meet other processes "
"at the rendezvous store.")
@property
def store(self):
return self._store
def get_nccl_id(self, timeout_s=180):
"""Get the NCCLUniqueID from the store through Ray.
Args:
timeout_s: timeout in seconds.
Return:
str: the NCCLUniqueID if successful.
"""
if not self._store:
raise ValueError("Rendezvous store is not setup.")
uid = None
timeout_delta = datetime.timedelta(seconds=timeout_s)
elapsed = datetime.timedelta(seconds=0)
start_time = datetime.datetime.now()
while elapsed < timeout_delta:
uid = ray.get(self._store.get_id.remote())
if not uid:
time.sleep(1)
elapsed = datetime.datetime.now() - start_time
continue
break
if not uid:
raise RuntimeError(
"Unable to get the NCCLUniqueID from the store.")
return uid
class NCCLGroup(BaseGroup):
def __init__(self, world_size, rank, group_name):
"""Init an NCCL collective group."""
super(NCCLGroup, self).__init__(world_size, rank, group_name)
self._nccl_uid = None
# TODO(Hao): change this to a be a cache
self._nccl_comm = None
if nccl_util.get_nccl_build_version() < 2000:
raise RuntimeError("NCCL in Ray requires NCCL >= 2.0.")
# TODO(Hao): check version here
if nccl_util.get_nccl_runtime_version() < 2704:
logger.warning("NCCL send/recv calls requires NCCL>=2.7.4")
self._rendezvous = Rendezvous(self.group_name)
self._rendezvous.meet()
# Setup the nccl uid using the store
self._init_nccl_unique_id()
# Setup a tensor for barrier calls
self._barrier_tensor = cupy.array([1])
def _init_nccl_unique_id(self):
"""Init the NCCLUniqueID required for creating NCCL communicators."""
self._nccl_uid = self._rendezvous.get_nccl_id()
@property
def nccl_uid(self):
return self._nccl_uid
def destroy_group(self):
"""Destroy the group and release the NCCL communicators safely."""
if self._nccl_comm is not None:
self.barrier()
# We also need a barrier call here.
stream = self._get_cuda_stream()
stream.synchronize()
# destroy the communicator
self._nccl_comm.destroy()
self._nccl_comm = None
super(NCCLGroup, self).destroy_group()
@classmethod
def backend(cls):
return Backend.NCCL
def allreduce(self, tensor, allreduce_options=AllReduceOptions()):
"""AllReduce the tensor across the collective group following options.
Args:
tensor: the tensor to be reduced, each tensor locates on a GPU
allreduce_options:
Returns:
"""
# obtain the communicator
comm = self._get_nccl_communicator()
# obtain the stream: using default stream by now
# TODO(Hao): implement a simple stream manager here
stream = self._get_cuda_stream()
dtype = nccl_util.get_nccl_tensor_dtype(tensor)
ptr = nccl_util.get_tensor_ptr(tensor)
n_elems = nccl_util.get_tensor_n_elements(tensor)
reduce_op = nccl_util.get_nccl_reduce_op(allreduce_options.reduceOp)
# in-place allreduce
comm.allReduce(ptr, ptr, n_elems, dtype, reduce_op, stream.ptr)
def barrier(self, barrier_options=BarrierOptions()):
"""Blocks until all processes reach this barrier.
Args:
barrier_options:
Returns:
"""
self.allreduce(self._barrier_tensor)
def reduce(self, tensor, reduce_options=ReduceOptions()):
"""Reduce tensor to a destination process following options.
Args:
tensor: the tensor to be reduced.
reduce_options: reduce options
Returns:
None
"""
comm = self._get_nccl_communicator()
stream = self._get_cuda_stream()
dtype = nccl_util.get_nccl_tensor_dtype(tensor)
ptr = nccl_util.get_tensor_ptr(tensor)
n_elems = nccl_util.get_tensor_n_elements(tensor)
reduce_op = nccl_util.get_nccl_reduce_op(reduce_options.reduceOp)
# in-place reduce
comm.reduce(ptr, ptr, n_elems, dtype, reduce_op,
reduce_options.root_rank, stream.ptr)
def broadcast(self, tensor, broadcast_options=BroadcastOptions()):
"""Broadcast tensor to all other processes following options.
Args:
tensor: the tensor to be broadcasted.
broadcast_options: broadcast options.
Returns:
None
"""
comm = self._get_nccl_communicator()
stream = self._get_cuda_stream()
dtype = nccl_util.get_nccl_tensor_dtype(tensor)
ptr = nccl_util.get_tensor_ptr(tensor)
n_elems = nccl_util.get_tensor_n_elements(tensor)
# in-place broadcast
comm.broadcast(ptr, ptr, n_elems, dtype, broadcast_options.root_rank,
stream.ptr)
def allgather(self,
tensor_list,
tensor,
allgather_options=AllGatherOptions()):
"""Allgather tensors across the group into a list of tensors.
Args:
tensor_list: the tensor list to store the results.
tensor: the tensor to be allgather-ed across the group.
allgather_options: allgather options.
Returns:
None
"""
_check_inputs_compatibility_for_scatter_gather(tensor, tensor_list)
comm = self._get_nccl_communicator()
stream = self._get_cuda_stream()
dtype = nccl_util.get_nccl_tensor_dtype(tensor)
send_ptr = nccl_util.get_tensor_ptr(tensor)
n_elems = nccl_util.get_tensor_n_elements(tensor)
flattened = _flatten_for_scatter_gather(tensor_list, copy=False)
recv_ptr = nccl_util.get_tensor_ptr(flattened)
comm.allGather(send_ptr, recv_ptr, n_elems, dtype, stream.ptr)
for i, t in enumerate(tensor_list):
nccl_util.copy_tensor(t, flattened[i])
def reducescatter(self,
tensor,
tensor_list,
reducescatter_options=ReduceScatterOptions()):
"""Reducescatter a list of tensors across the group.
Args:
tensor: the output after reducescatter (could be unspecified).
tensor_list: the list of tensor to be reduce and scattered.
reducescatter_options: reducescatter options.
Returns:
None
"""
_check_inputs_compatibility_for_scatter_gather(tensor, tensor_list)
comm = self._get_nccl_communicator()
stream = self._get_cuda_stream()
dtype = nccl_util.get_nccl_tensor_dtype(tensor_list[0])
n_elems = nccl_util.get_tensor_n_elements(tensor_list[0])
reduce_op = nccl_util.get_nccl_reduce_op(
reducescatter_options.reduceOp)
# get the send_ptr
flattened = _flatten_for_scatter_gather(tensor_list, copy=True)
send_ptr = nccl_util.get_tensor_ptr(flattened)
recv_ptr = nccl_util.get_tensor_ptr(tensor)
comm.reduceScatter(send_ptr, recv_ptr, n_elems, dtype, reduce_op,
stream.ptr)
def _get_nccl_communicator(self):
"""Create or use a cached NCCL communicator for the collective task.
"""
# TODO(Hao): later change this to use device keys and query from cache.
# TODO(Hao): implement a thin wrapper
if not self._nccl_comm:
self._nccl_comm = nccl_util.create_nccl_communicator(
self.world_size, self.nccl_uid, self.rank)
return self._nccl_comm
@staticmethod
def _get_cuda_stream():
"""Obtain an idle stream from a stream pool for the collective task."""
# TODO: implement a simple stream manager.
return cupy.cuda.Stream.null
# def _collective_call(self, *args):
# """Private method to encapsulate all collective calls"""
# pass
def _flatten_for_scatter_gather(tensor_list, copy=False):
"""Flatten the tensor for gather/scatter operations.
Args:
tensor_list: the list of tensors to be scattered/gathered.
copy: whether the copy the tensors in tensor_list into the buffer.
Returns:
The flattened tensor buffer.
"""
if not tensor_list:
raise RuntimeError("Received an empty list.")
t = tensor_list[0]
# note we need a cupy dtype here.
dtype = nccl_util.get_cupy_tensor_dtype(t)
buffer_shape = [len(tensor_list)] + nccl_util.get_tensor_shape(t)
buffer = cupy.empty(buffer_shape, dtype=dtype)
if copy:
for i, tensor in enumerate(tensor_list):
nccl_util.copy_tensor(buffer[i], tensor)
return buffer
def _check_inputs_compatibility_for_scatter_gather(tensor, tensor_list):
"""Check the compatibility between tensor input and tensor list inputs."""
if not tensor_list:
raise RuntimeError("Got empty list of tensors.")
dtype = nccl_util.get_nccl_tensor_dtype(tensor)
shape = nccl_util.get_tensor_shape(tensor)
for t in tensor_list:
# check dtype
dt = nccl_util.get_nccl_tensor_dtype(t)
if dt != dtype:
raise RuntimeError("All tensor operands to scatter/gather must "
"have the same dtype. Got '{}' and '{}'"
"".format(dt, dtype))
# Note: typically CCL libraries only requires they have the same
# number of elements;
# Here we make it more strict -- we require exact shape match.
if nccl_util.get_tensor_shape(t) != shape:
raise RuntimeError("All tensor operands to scatter/gather must "
"have the same shape.")
| en | 0.787372 | # TODO(Hao): # (1) stream management, instead of using the default stream, # using a dedicate stream # (2) communicator management and support num_gpus > 2 per actor. A rendezvous class for different actor/task processes to meet. To initialize an NCCL collective communication group, different actors/tasks spawned in Ray in a collective group needs to meet each other to synchronize the NCCLUniqueID. This class guarantees they meet via the NCCLUniqueIDStore, initialized on the rank=0 process. Args: group_name (str): the unique user-specified group name. Meet at the named actor store. Args: timeout_s: timeout in seconds. Return: None Get the NCCLUniqueID from the store through Ray. Args: timeout_s: timeout in seconds. Return: str: the NCCLUniqueID if successful. Init an NCCL collective group. # TODO(Hao): change this to a be a cache # TODO(Hao): check version here # Setup the nccl uid using the store # Setup a tensor for barrier calls Init the NCCLUniqueID required for creating NCCL communicators. Destroy the group and release the NCCL communicators safely. # We also need a barrier call here. # destroy the communicator AllReduce the tensor across the collective group following options. Args: tensor: the tensor to be reduced, each tensor locates on a GPU allreduce_options: Returns: # obtain the communicator # obtain the stream: using default stream by now # TODO(Hao): implement a simple stream manager here # in-place allreduce Blocks until all processes reach this barrier. Args: barrier_options: Returns: Reduce tensor to a destination process following options. Args: tensor: the tensor to be reduced. reduce_options: reduce options Returns: None # in-place reduce Broadcast tensor to all other processes following options. Args: tensor: the tensor to be broadcasted. broadcast_options: broadcast options. Returns: None # in-place broadcast Allgather tensors across the group into a list of tensors. Args: tensor_list: the tensor list to store the results. tensor: the tensor to be allgather-ed across the group. allgather_options: allgather options. Returns: None Reducescatter a list of tensors across the group. Args: tensor: the output after reducescatter (could be unspecified). tensor_list: the list of tensor to be reduce and scattered. reducescatter_options: reducescatter options. Returns: None # get the send_ptr Create or use a cached NCCL communicator for the collective task. # TODO(Hao): later change this to use device keys and query from cache. # TODO(Hao): implement a thin wrapper Obtain an idle stream from a stream pool for the collective task. # TODO: implement a simple stream manager. # def _collective_call(self, *args): # """Private method to encapsulate all collective calls""" # pass Flatten the tensor for gather/scatter operations. Args: tensor_list: the list of tensors to be scattered/gathered. copy: whether the copy the tensors in tensor_list into the buffer. Returns: The flattened tensor buffer. # note we need a cupy dtype here. Check the compatibility between tensor input and tensor list inputs. # check dtype # Note: typically CCL libraries only requires they have the same # number of elements; # Here we make it more strict -- we require exact shape match. | 2.350681 | 2 |
235.py | nanahyunpark/python_study | 0 | 6632949 | <gh_stars>0
def div_ans(num, level):
if(num == 1):
print(level)
return
if(num%2 == 0):
num = num//2
div_ans(num, level+1)
elif(num%2 == 1):
num = num//3
div_ans(num, level+1)
def main():
num = int(input())
div_ans(num, 0)
if(__name__ == '__main__'):
main()
| def div_ans(num, level):
if(num == 1):
print(level)
return
if(num%2 == 0):
num = num//2
div_ans(num, level+1)
elif(num%2 == 1):
num = num//3
div_ans(num, level+1)
def main():
num = int(input())
div_ans(num, 0)
if(__name__ == '__main__'):
main() | none | 1 | 3.850352 | 4 |
|
fatiando/mesher/tests/__init__.py | XuesongDing/fatiando | 179 | 6632950 | <reponame>XuesongDing/fatiando<filename>fatiando/mesher/tests/__init__.py<gh_stars>100-1000
"""
Tests for the fatiando.mesher package.
"""
| """
Tests for the fatiando.mesher package.
""" | en | 0.585076 | Tests for the fatiando.mesher package. | 1.001592 | 1 |
Subsets and Splits