text
stringlengths 957
885k
|
---|
"""Functional tests using pytest-flask."""
from backend import models
from pytest import mark
from tests import asserts
@mark.parametrize('endpoint', ['reports.list_submits'], indirect=True)
class TestListSubmits:
@mark.usefixtures('grant_admin')
@mark.parametrize('query', indirect=True, argvalues=[
{'resource_type': "benchmark"},
{'resource_type': "site"},
{'resource_type': "flavor"},
{'upload_before': "3000-01-01"},
{'upload_after': "2000-01-01"},
{}, # Multiple reports
{'sort_by': "+upload_datetime"},
])
def test_200(self, response_GET, url):
"""GET method succeeded 200."""
assert response_GET.status_code == 200
asserts.match_pagination(response_GET.json, url)
assert response_GET.json['items'] != []
for item in response_GET.json['items']:
asserts.match_submit(item)
asserts.match_query(item, url)
@mark.parametrize('query', indirect=True, argvalues=[
{'upload_before': "3000-01-01"},
])
def test_401(self, response_GET):
"""GET method fails 401 if not logged in."""
assert response_GET.status_code == 401
@mark.usefixtures('grant_logged')
@mark.parametrize('query', indirect=True, argvalues=[
{'upload_before': "3000-01-01"},
])
def test_403(self, response_GET):
"""GET method fails 403 if forbidden."""
assert response_GET.status_code == 403
@mark.usefixtures('grant_admin')
@mark.parametrize('query', indirect=True, argvalues=[
{'bad_key': "This is a non expected query key"},
{'sort_by': "Bad sort command"}
])
def test_422(self, response_GET):
"""GET method fails 422 if bad request body."""
assert response_GET.status_code == 422
@mark.parametrize('endpoint', ['reports.list_claims'], indirect=True)
class TestListClaims:
@mark.usefixtures('grant_admin')
@mark.parametrize('query', indirect=True, argvalues=[
{'upload_before': "3000-01-01"},
{'upload_after': "1000-01-01"},
{}, # Empty query
{'sort_by': "+upload_datetime"},
])
def test_200(self, response_GET, url):
"""GET method succeeded 200."""
assert response_GET.status_code == 200
asserts.match_pagination(response_GET.json, url)
assert response_GET.json['items'] != []
for item in response_GET.json['items']:
asserts.match_query(item, url)
item.pop('uploader')
item.pop('resource_type')
assert models.Result._claim_report_class.query.\
filter_by(**item).first()
@mark.parametrize('query', indirect=True, argvalues=[
{'upload_before': "3000-01-01"},
])
def test_401(self, response_GET):
"""GET method fails 401 if not logged in."""
assert response_GET.status_code == 401
@mark.usefixtures('grant_logged')
@mark.parametrize('query', indirect=True, argvalues=[
{'upload_before': "3000-01-01"},
])
def test_403(self, response_GET):
"""GET method fails 403 if forbidden."""
assert response_GET.status_code == 403
@mark.usefixtures('grant_admin')
@mark.parametrize('query', indirect=True, argvalues=[
{'bad_key': "This is a non expected query key"},
{'sort_by': "Bad sort command"}
])
def test_422(self, response_GET):
"""GET method fails 422 if bad request body."""
assert response_GET.status_code == 422
# @mark.parametrize('endpoint', ['reports.approve_claim'], indirect=True)
# class TestApproveClaim:
# @fixture(scope='function')
# def url(request_id, claim, query):
# request_id = request_id if request_id else claim.id
# return url_for('reports.approve_claim', id=request_id, **query)
# @mark.usefixtures('grant_admin')
# def test_204(self, response_POST, claim):
# """POST method succeeded 200."""
# assert response_POST.status_code == 204
# assert claim.status.name == "approved"
# def test_401(self, response_POST, claim):
# """POST method fails 401 if not authorized."""
# assert response_POST.status_code == 401
# assert claim.status.name == "on_review"
# @mark.usefixtures('grant_logged')
# def test_403(self, response_POST, claim):
# """POST method fails 403 if method forbidden."""
# assert response_POST.status_code == 403
# assert claim.status.name == "on_review"
# @mark.usefixtures('grant_admin')
# @mark.parametrize('request_id', [uuid4()], indirect=True)
# def test_404(self, response_POST, claim):
# """POST method fails 404 if no id found."""
# assert response_POST.status_code == 404
# assert claim.status.name == "on_review"
# @mark.parametrize('endpoint', ['reports.reject_claim'], indirect=True)
# @mark.parametrize('claim_id', indirect=True, argvalues=[
# uuid4() # Random uuid4 generation
# ])
# class TestRejectClaim:
# @mark.usefixtures('grant_admin')
# def test_204(self, response_POST, claim):
# """POST method succeeded 200."""
# assert response_POST.status_code == 204
# assert None == models.Claim.query.get(claim.id)
# def test_401(self, response_POST, claim):
# """POST method fails 401 if not authorized."""
# assert response_POST.status_code == 401
# assert claim.status.name == "on_review"
# @mark.usefixtures('grant_logged')
# def test_403(self, response_POST, claim):
# """POST method fails 403 if method forbidden."""
# assert response_POST.status_code == 403
# assert claim.status.name == "on_review"
# @mark.usefixtures('grant_admin')
# @mark.parametrize('request_id', [uuid4()], indirect=True)
# def test_404(self, response_POST, claim):
# """POST method fails 404 if no id found."""
# assert response_POST.status_code == 404
# assert claim.status.name == "on_review"
|
#
# caloStage2Params_2017_v1_10
# change w.r.t. v1_8_4: 92X Layer 1 SF
#
import FWCore.ParameterSet.Config as cms
from L1Trigger.L1TCalorimeter.caloParams_cfi import caloParamsSource
import L1Trigger.L1TCalorimeter.caloParams_cfi
caloStage2Params = L1Trigger.L1TCalorimeter.caloParams_cfi.caloParams.clone()
# towers
caloStage2Params.towerLsbH = cms.double(0.5)
caloStage2Params.towerLsbE = cms.double(0.5)
caloStage2Params.towerLsbSum = cms.double(0.5)
caloStage2Params.towerNBitsH = cms.int32(8)
caloStage2Params.towerNBitsE = cms.int32(8)
caloStage2Params.towerNBitsSum = cms.int32(9)
caloStage2Params.towerNBitsRatio = cms.int32(3)
caloStage2Params.towerEncoding = cms.bool(True)
# regions
caloStage2Params.regionLsb = cms.double(0.5)
caloStage2Params.regionPUSType = cms.string("None")
caloStage2Params.regionPUSParams = cms.vdouble()
# EG
caloStage2Params.egLsb = cms.double(0.5)
caloStage2Params.egSeedThreshold = cms.double(2.)
caloStage2Params.egNeighbourThreshold = cms.double(1.)
caloStage2Params.egHcalThreshold = cms.double(0.)
caloStage2Params.egTrimmingLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/egTrimmingLUT_10_v16.01.19.txt")
caloStage2Params.egMaxHcalEt = cms.double(0.)
caloStage2Params.egMaxPtHOverE = cms.double(128.)
caloStage2Params.egMaxHOverELUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/HoverEIdentification_0.995_v15.12.23.txt")
caloStage2Params.egBypassExtHOverE = cms.uint32(0)
caloStage2Params.egCompressShapesLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/egCompressLUT_v4.txt")
caloStage2Params.egShapeIdType = cms.string("compressed")
caloStage2Params.egShapeIdVersion = cms.uint32(0)
caloStage2Params.egShapeIdLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/shapeIdentification_adapt0.99_compressedieta_compressedE_compressedshape_v15.12.08.txt")#Not used any more in the current emulator version, merged with calibration LUT
caloStage2Params.egPUSType = cms.string("None")
caloStage2Params.egIsolationType = cms.string("compressed")
caloStage2Params.egIsoLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/EG_Iso_LUT_04_04_2017.txt")
caloStage2Params.egIsoLUTFile2 = cms.FileInPath("L1Trigger/L1TCalorimeter/data/EG_LooseIsoIdentification_adapt_extrap_FW_v16.08.08.2.txt")
caloStage2Params.egIsoAreaNrTowersEta = cms.uint32(2)
caloStage2Params.egIsoAreaNrTowersPhi = cms.uint32(4)
caloStage2Params.egIsoVetoNrTowersPhi = cms.uint32(2)
#caloStage2Params.egIsoPUEstTowerGranularity = cms.uint32(1)
#caloStage2Params.egIsoMaxEtaAbsForTowerSum = cms.uint32(4)
#caloStage2Params.egIsoMaxEtaAbsForIsoSum = cms.uint32(27)
caloStage2Params.egPUSParams = cms.vdouble(1,4,32) #Isolation window in firmware goes up to abs(ieta)=32 for now
caloStage2Params.egCalibrationType = cms.string("compressed")
caloStage2Params.egCalibrationVersion = cms.uint32(0)
caloStage2Params.egCalibrationLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/EG_Calibration_LUT_FW_v17.04.04_shapeIdentification_adapt0.99_compressedieta_compressedE_compressedshape_v15.12.08_correct.txt")
# Tau
caloStage2Params.tauLsb = cms.double(0.5)
caloStage2Params.tauSeedThreshold = cms.double(0.)
caloStage2Params.tauNeighbourThreshold = cms.double(0.)
caloStage2Params.tauIsoAreaNrTowersEta = cms.uint32(2)
caloStage2Params.tauIsoAreaNrTowersPhi = cms.uint32(4)
caloStage2Params.tauIsoVetoNrTowersPhi = cms.uint32(2)
caloStage2Params.tauPUSType = cms.string("None")
caloStage2Params.tauIsoLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/Tau_Iso_LUT_Option_22_2017_FW_v10_261017.0.0.txt")
caloStage2Params.tauIsoLUTFile2 = cms.FileInPath("L1Trigger/L1TCalorimeter/data/Tau_Iso_LUT_Option_22_2017_FW_v10_261017.0.0.txt")
caloStage2Params.tauCalibrationLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/Tau_Calibration_LUT_261017.0.0.txt")
caloStage2Params.tauCompressLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/tauCompressAllLUT_12bit_v3.txt")
caloStage2Params.tauPUSParams = cms.vdouble(1,4,32)
# jets
caloStage2Params.jetLsb = cms.double(0.5)
caloStage2Params.jetSeedThreshold = cms.double(4.0)
caloStage2Params.jetNeighbourThreshold = cms.double(0.)
caloStage2Params.jetPUSType = cms.string("ChunkyDonut")
caloStage2Params.jetBypassPUS = cms.uint32(0)
# Calibration options
# function6PtParams22EtaBins or None
#caloStage2Params.jetCalibrationType = cms.string("None")
#caloStage2Params.jetCalibrationType = cms.string("function8PtParams22EtaBins")
caloStage2Params.jetCalibrationType = cms.string("LUT")
#Vector with 6 parameters for eta bin, from low eta to high
# 1,0,1,0,1,1 gives no correction
# must be in this form as may require > 255 arguments
# Or vector with 8 parameters, which caps correction value below given pT
# as 6 parameters, but last two are max correction and L1 pT below which cap is applied, respectively
jetCalibParamsVector = cms.vdouble()
jetCalibParamsVector.extend([
1,0,1,0,1,1,1.36123039014,1024,
1,0,1,0,1,1,1.37830172245,1024,
1,0,1,0,1,1,1.37157036457,1024,
1,0,1,0,1,1,1.42460009989,1024,
10.1179757811,-697.422255848,55.9767511168,599.040770412,0.00930772659892,-21.9921521313,1.77585386314,24.1202894336,
12.2578170485,-736.96846599,45.3225355911,848.976802835,0.00946235693865,-21.7970133915,2.04623980351,19.6049149791,
14.0198255047,-769.175319944,38.687351315,1072.9785137,0.00951954709279,-21.6277409602,2.08021511285,22.265051562,
14.119589176,-766.199501821,38.7767169666,1059.63374337,0.00952979125289,-21.6477483043,2.05901166216,23.8125466978,
13.7594864391,-761.860391454,39.9060363401,1019.30588542,0.00952105483129,-21.6814176696,2.03808638982,22.2127275989,
10.2635352836,-466.890522023,32.5408463829,2429.03382746,0.0111274121697,-22.0890253377,2.04880080215,22.5083699943,
5.46086027683,-150.888778124,18.3292242153,16968.6469599,0.0147496053457,-22.4089831889,2.08107691501,22.4129703515,
5.46086027683,-150.888778124,18.3292242153,16968.6469599,0.0147496053457,-22.4089831889,2.08107691501,22.4129703515,
10.2635352836,-466.890522023,32.5408463829,2429.03382746,0.0111274121697,-22.0890253377,2.04880080215,22.5083699943,
13.7594864391,-761.860391454,39.9060363401,1019.30588542,0.00952105483129,-21.6814176696,2.03808638982,22.2127275989,
14.119589176,-766.199501821,38.7767169666,1059.63374337,0.00952979125289,-21.6477483043,2.05901166216,23.8125466978,
14.0198255047,-769.175319944,38.687351315,1072.9785137,0.00951954709279,-21.6277409602,2.08021511285,22.265051562,
12.2578170485,-736.96846599,45.3225355911,848.976802835,0.00946235693865,-21.7970133915,2.04623980351,19.6049149791,
10.1179757811,-697.422255848,55.9767511168,599.040770412,0.00930772659892,-21.9921521313,1.77585386314,24.1202894336,
1,0,1,0,1,1,1.42460009989,1024,
1,0,1,0,1,1,1.37157036457,1024,
1,0,1,0,1,1,1.37830172245,1024,
1,0,1,0,1,1,1.36123039014,1024
])
caloStage2Params.jetCalibrationParams = jetCalibParamsVector
caloStage2Params.jetCompressPtLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/lut_pt_compress_2017v1.txt")
caloStage2Params.jetCompressEtaLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/lut_eta_compress_2017v1.txt")
caloStage2Params.jetCalibrationLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/lut_calib_2017v3_mean.txt")
# sums: 0=ET, 1=HT, 2=MET, 3=MHT
caloStage2Params.etSumLsb = cms.double(0.5)
caloStage2Params.etSumEtaMin = cms.vint32(1, 1, 1, 1, 1)
caloStage2Params.etSumEtaMax = cms.vint32(28, 26, 28, 26, 28)
caloStage2Params.etSumEtThreshold = cms.vdouble(0., 30., 0., 30., 0.) # only 2nd (HT) and 4th (MHT) values applied
caloStage2Params.etSumMetPUSType = cms.string("LUT") # et threshold from this LUT supercedes et threshold in line above
caloStage2Params.etSumEttPUSType = cms.string("None")
caloStage2Params.etSumEcalSumPUSType = cms.string("None")
caloStage2Params.etSumBypassMetPUS = cms.uint32(0)
caloStage2Params.etSumBypassEttPUS = cms.uint32(1)
caloStage2Params.etSumBypassEcalSumPUS = cms.uint32(1)
caloStage2Params.etSumXCalibrationType = cms.string("None")
caloStage2Params.etSumYCalibrationType = cms.string("None")
caloStage2Params.etSumEttCalibrationType = cms.string("None")
caloStage2Params.etSumEcalSumCalibrationType = cms.string("None")
caloStage2Params.etSumMetPUSLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/lut_towEtThresh_2017v7.txt")
caloStage2Params.etSumEttPUSLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/lut_towEtThresh_dummy.txt")
caloStage2Params.etSumEcalSumPUSLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/lut_towEtThresh_dummy.txt")
caloStage2Params.etSumXCalibrationLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/lut_etSumPUS_dummy.txt")
caloStage2Params.etSumYCalibrationLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/lut_etSumPUS_dummy.txt")
caloStage2Params.etSumEttCalibrationLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/lut_etSumPUS_dummy.txt")
caloStage2Params.etSumEcalSumCalibrationLUTFile = cms.FileInPath("L1Trigger/L1TCalorimeter/data/lut_etSumPUS_dummy.txt")
# Layer 1 LUT specification
#
# Et-dependent scale factors
# ECal/HCal scale factors will be a 13*28 array:
# 28 eta scale factors (1-28)
# in 13 ET bins (6, 9, 12, 15, 20, 25, 30, 35, 40, 45, 55, 70, Max)
# So, index = etBin*28+ieta
# ECAL and HCAl calibrations using mean
caloStage2Params.layer1ECalScaleETBins = cms.vint32([6, 9, 12, 15, 20, 25, 30, 35, 40, 45, 55, 70, 256])
caloStage2Params.layer1ECalScaleFactors = cms.vdouble([
1.197340, 1.198549, 1.168785, 1.173931, 1.191020, 1.209413, 1.196497, 1.209573, 1.195505, 1.231375, 1.235413, 1.244471, 1.283982, 1.325228, 1.334809, 1.353722, 1.428926, 2.126767,
1.450591, 1.589677, 1.580657, 1.629203, 1.564859, 1.577755, 1.625670, 1.594695, 1.424415, 1.321468, 1.135290, 1.151154, 1.125139, 1.130923, 1.135517, 1.148669, 1.147089, 1.154148,
1.183942, 1.187542, 1.191086, 1.190894, 1.249920, 1.258438, 1.273714, 1.287786, 1.342814, 2.053505, 1.313293, 1.461993, 1.451037, 1.465911, 1.438294, 1.455272, 1.501573, 1.477581,
1.339441, 1.245791, 1.129958, 1.107732, 1.102933, 1.100946, 1.120345, 1.124828, 1.126518, 1.136332, 1.145752, 1.175010, 1.179295, 1.188173, 1.211749, 1.224195, 1.234790, 1.239917,
1.353503, 2.008072, 1.252317, 1.365824, 1.378117, 1.403996, 1.356526, 1.385768, 1.434346, 1.415377, 1.298908, 1.216760, 1.102309, 1.097450, 1.090676, 1.084893, 1.091920, 1.109602,
1.103849, 1.112758, 1.126005, 1.137318, 1.120697, 1.142343, 1.150537, 1.201907, 1.168302, 1.188819, 1.228637, 1.936608, 1.224452, 1.326251, 1.342814, 1.353976, 1.325363, 1.359490,
1.399696, 1.364164, 1.276219, 1.195622, 1.076251, 1.069282, 1.066564, 1.074088, 1.070074, 1.084258, 1.086150, 1.076595, 1.092879, 1.114732, 1.101672, 1.105921, 1.119918, 1.145530,
1.167513, 1.147558, 1.191129, 1.809826, 1.202365, 1.287467, 1.304235, 1.317980, 1.291666, 1.317809, 1.374505, 1.342310, 1.254258, 1.175981, 1.061569, 1.053739, 1.050862, 1.052114,
1.057964, 1.073229, 1.058238, 1.066881, 1.063274, 1.090312, 1.075247, 1.088771, 1.097769, 1.135655, 1.119135, 1.123404, 1.172366, 1.741823, 1.173261, 1.258103, 1.279940, 1.279914,
1.276035, 1.291460, 1.347826, 1.321888, 1.237275, 1.159756, 1.058557, 1.043179, 1.038852, 1.040351, 1.047275, 1.056788, 1.051126, 1.058392, 1.051716, 1.085330, 1.061614, 1.073405,
1.081882, 1.109701, 1.103221, 1.100014, 1.149658, 1.650972, 1.163525, 1.237588, 1.259934, 1.268718, 1.254323, 1.276469, 1.335477, 1.298039, 1.226921, 1.151347, 1.046273, 1.035069,
1.033646, 1.034902, 1.037039, 1.055578, 1.043272, 1.044873, 1.045536, 1.067714, 1.058866, 1.060444, 1.067633, 1.101122, 1.083575, 1.089725, 1.133219, 1.530750, 1.150335, 1.220118,
1.237836, 1.251671, 1.239206, 1.262410, 1.317311, 1.279968, 1.221607, 1.145441, 1.039182, 1.033807, 1.026964, 1.030851, 1.035037, 1.046218, 1.034010, 1.038878, 1.038807, 1.061946,
1.047964, 1.052194, 1.061816, 1.089591, 1.077566, 1.075823, 1.118349, 1.441061, 1.144726, 1.205469, 1.228561, 1.240078, 1.224216, 1.249805, 1.307356, 1.275350, 1.210373, 1.139566,
1.033242, 1.027776, 1.025388, 1.025144, 1.029551, 1.045796, 1.031684, 1.032839, 1.032635, 1.060448, 1.040870, 1.047611, 1.060231, 1.075297, 1.066971, 1.073752, 1.113008, 1.383509,
1.129704, 1.198243, 1.222456, 1.234389, 1.224164, 1.243444, 1.294541, 1.265006, 1.178805, 1.135663, 1.029008, 1.023628, 1.019729, 1.022226, 1.024997, 1.036473, 1.027582, 1.028378,
1.029302, 1.047454, 1.035725, 1.038674, 1.047384, 1.068694, 1.060923, 1.063771, 1.100034, 1.333569, 1.126848, 1.185826, 1.209725, 1.224937, 1.212785, 1.236321, 1.284212, 1.256900,
1.115347, 1.114443, 1.023628, 1.017810, 1.014326, 1.015847, 1.018518, 1.028086, 1.020245, 1.020984, 1.022730, 1.038105, 1.027760, 1.028804, 1.041350, 1.059088, 1.051748, 1.053073,
1.087165, 1.252114, 1.119432, 1.174365, 1.196021, 1.210201, 1.200302, 1.226177, 1.270829, 1.244451, 1.048434, 1.049180, 1.018333, 1.014078, 1.010072, 1.010963, 1.013350, 1.020835,
1.014829, 1.016063, 1.016330, 1.026939, 1.021395, 1.022569, 1.033490, 1.047872, 1.042920, 1.044526, 1.072217, 1.185529, 1.108676, 1.161552, 1.183706, 1.197698, 1.189131, 1.212932,
1.255325, 1.225494, 1.048434, 1.049180
])
caloStage2Params.layer1HCalScaleETBins = cms.vint32([6, 9, 12, 15, 20, 25, 30, 35, 40, 45, 55, 70, 256])
caloStage2Params.layer1HCalScaleFactors = cms.vdouble([
1.488772, 1.486679, 1.482133, 1.479425, 1.485548, 1.493674, 1.492273, 1.493985, 1.492969, 1.509587, 1.506320, 1.515023, 1.536133, 1.531514, 1.526063, 1.523588, 1.484326, 1.331186,
1.355782, 1.387601, 1.342361, 1.360238, 1.360894, 1.357810, 1.361534, 1.375109, 1.424183, 1.501489, 1.399359, 1.404418, 1.398637, 1.399352, 1.396019, 1.410175, 1.410339, 1.406340,
1.407406, 1.418949, 1.419240, 1.429573, 1.439777, 1.439575, 1.437873, 1.429671, 1.386724, 1.231026, 1.273743, 1.302278, 1.247894, 1.253293, 1.255920, 1.251581, 1.251463, 1.265636,
1.304193, 1.359426, 1.344773, 1.350364, 1.344524, 1.345861, 1.341056, 1.353025, 1.354453, 1.351831, 1.347695, 1.364280, 1.359560, 1.372041, 1.381087, 1.385518, 1.382776, 1.370359,
1.327976, 1.177840, 1.228646, 1.249099, 1.186989, 1.193231, 1.197696, 1.195938, 1.196179, 1.206994, 1.244052, 1.290444, 1.312420, 1.314244, 1.309209, 1.307359, 1.307022, 1.316532,
1.318803, 1.313482, 1.308246, 1.323321, 1.325338, 1.330967, 1.337016, 1.338398, 1.339131, 1.327637, 1.286923, 1.141686, 1.190420, 1.213207, 1.149381, 1.160818, 1.159674, 1.159706,
1.158536, 1.169460, 1.207328, 1.248669, 1.276808, 1.278511, 1.274205, 1.271484, 1.270841, 1.278961, 1.282849, 1.277440, 1.273669, 1.284206, 1.284441, 1.290392, 1.294976, 1.296487,
1.298681, 1.286720, 1.244613, 1.110049, 1.157259, 1.176192, 1.112071, 1.119705, 1.123068, 1.121734, 1.123006, 1.132017, 1.169278, 1.213867, 1.242737, 1.243424, 1.240171, 1.239669,
1.236894, 1.241291, 1.244473, 1.241839, 1.234634, 1.244791, 1.243586, 1.250908, 1.250071, 1.254379, 1.257426, 1.244129, 1.200212, 1.077383, 1.122736, 1.139789, 1.076388, 1.083750,
1.085063, 1.085238, 1.086152, 1.095831, 1.131103, 1.174074, 1.215358, 1.216519, 1.212013, 1.211151, 1.210772, 1.213001, 1.216205, 1.212945, 1.203300, 1.212112, 1.212353, 1.216219,
1.216911, 1.220303, 1.222827, 1.209306, 1.164908, 1.053285, 1.098127, 1.112139, 1.046242, 1.053812, 1.054951, 1.055403, 1.056634, 1.065248, 1.100811, 1.146619, 1.189579, 1.190152,
1.186635, 1.187759, 1.184085, 1.184657, 1.188523, 1.186424, 1.177457, 1.183637, 1.182490, 1.187512, 1.187172, 1.190456, 1.192421, 1.180374, 1.138839, 1.034745, 1.078450, 1.089012,
1.021600, 1.028598, 1.029529, 1.030437, 1.033001, 1.039217, 1.075602, 1.118267, 1.171107, 1.168946, 1.166512, 1.166769, 1.161480, 1.165436, 1.165121, 1.162166, 1.153355, 1.158267,
1.159683, 1.162556, 1.161758, 1.164033, 1.169004, 1.154110, 1.114707, 1.016696, 1.060155, 1.070569, 1.000000, 1.005364, 1.007959, 1.009434, 1.009694, 1.015478, 1.051155, 1.095691,
1.147927, 1.150166, 1.146134, 1.147374, 1.142142, 1.143955, 1.144191, 1.141270, 1.134016, 1.138813, 1.136992, 1.142244, 1.139741, 1.140879, 1.146482, 1.132095, 1.091087, 1.003826,
1.042366, 1.053090, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.023234, 1.031095, 1.075333, 1.124485, 1.126611, 1.122901, 1.121996, 1.119331, 1.121150, 1.122024, 1.116685,
1.110000, 1.112285, 1.113655, 1.114063, 1.112371, 1.111978, 1.116022, 1.101930, 1.061707, 1.000000, 1.024583, 1.031882, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.055449,
1.003816, 1.046213, 1.092452, 1.092536, 1.091150, 1.090404, 1.085964, 1.085791, 1.086160, 1.082304, 1.075379, 1.074526, 1.072966, 1.073412, 1.070047, 1.069312, 1.070556, 1.054325,
1.019816, 1.000000, 1.000000, 1.001951, 1.000000, 1.000000, 1.000000, 1.000000, 1.000301, 1.032098, 1.000000, 1.005659, 1.051117, 1.050717, 1.049425, 1.047891, 1.044951, 1.044487,
1.042311, 1.036290, 1.030471, 1.028289, 1.022935, 1.020965, 1.017667, 1.013806, 1.014022, 1.004382, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000,
1.000000, 1.000000, 1.000000, 1.000000
])
# HF 1x1 scale factors will be a 13*12 array:
# 12 eta scale factors (30-41)
# in 13 ET bins (6, 9, 12, 15, 20, 25, 30, 35, 40, 45, 55, 70, Max)
# So, index = etBin*12+ietaHF
# HF energies were formerly multiplied by 0.7; this has been removed
caloStage2Params.layer1HFScaleETBins = cms.vint32([6, 9, 12, 15, 20, 25, 30, 35, 40, 45, 55, 70, 256])
caloStage2Params.layer1HFScaleFactors = cms.vdouble([
2.378339, 1.502094, 1.558828, 1.468909, 1.388092, 1.444754, 1.493556, 1.541491, 1.647650, 1.812072, 2.791145, 2.844066,
2.111653, 1.312496, 1.351124, 1.291042, 1.239054, 1.278956, 1.315620, 1.361558, 1.449292, 1.571425, 2.709180, 2.717564,
1.963179, 1.217324, 1.256356, 1.202818, 1.162660, 1.204208, 1.231526, 1.276481, 1.351362, 1.457253, 2.613049, 2.644112,
1.864273, 1.162345, 1.199680, 1.153738, 1.119396, 1.152063, 1.182551, 1.225995, 1.291988, 1.390649, 2.529912, 2.581591,
1.752451, 1.117623, 1.147027, 1.110546, 1.079779, 1.114737, 1.142444, 1.178901, 1.242175, 1.336171, 2.407025, 2.526142,
1.663160, 1.074331, 1.106646, 1.072905, 1.049034, 1.080200, 1.108287, 1.143216, 1.199594, 1.291001, 2.232567, 2.450402,
1.573166, 1.048392, 1.078650, 1.048091, 1.024573, 1.055920, 1.081953, 1.115248, 1.170655, 1.256432, 2.070575, 2.389922,
1.489765, 1.024323, 1.055465, 1.029036, 1.007379, 1.036369, 1.061089, 1.092431, 1.145947, 1.227190, 1.925361, 2.348549,
1.404872, 1.006701, 1.035613, 1.009332, 1.007379, 1.017418, 1.040979, 1.071060, 1.120826, 1.197973, 1.791211, 2.243741,
1.339055, 1.006701, 1.019214, 1.009332, 1.007379, 1.003242, 1.026977, 1.054007, 1.099699, 1.168445, 1.688074, 2.103020,
1.272889, 1.006701, 1.006044, 1.009332, 1.007379, 1.003242, 1.009030, 1.033555, 1.074019, 1.135660, 1.573541, 1.918549,
1.188140, 1.006701, 1.006044, 1.009332, 1.007379, 1.003242, 1.009030, 1.001923, 1.039081, 1.094883, 1.434509, 1.705331,
1.108268, 1.006701, 1.006044, 1.009332, 1.007379, 1.003242, 1.009030, 1.001923, 1.010006, 1.057960, 1.301315, 1.523940
])
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
#
# *****************************************************************************
"""Devices for the Refsans VSD."""
import struct
from nicos.core import ADMIN, SIMULATION, Attach, Override, Param, Readable, \
dictof, oneof, requires, status, usermethod
from nicos.devices.tango import PyTangoDevice
class VSDIO(PyTangoDevice, Readable):
"""
Basic IO Device object for devices in refsans' vsd rack
contains common things for all devices.
"""
hardware_access = True
parameters = {
'address': Param('Starting offset (words) of IO area',
type=oneof(12288),
mandatory=True, settable=False,
userparam=False, default=12288),
'firmware': Param('Firmware Version', settable=False,
type=str, mandatory=False, volatile=True),
}
def doInit(self, mode):
# switch off watchdog, important before doing any write access
if mode != SIMULATION:
self._dev.WriteOutputWord((0x1120, 0))
#
# access-helpers for accessing the fields inside the IO area
#
def _readU16(self, addr):
# reads a uint16 from self.address + addr
value = self._dev.ReadOutputWords((self.address + addr, 1))[0]
self.log.debug('_readU16(%d): -> %d', addr, value)
return value
def _readI16(self, addr):
# reads a int16 from self.address + addr
value = self._dev.ReadOutputWords((self.address + addr, 1))[0]
if value > 32767:
value = value - 65536
self.log.info('_readI16(%d): -> %d', addr, value)
return value
def _writeU16(self, addr, value):
# writes a uint16 to self.address+addr
value = int(value)
self.log.debug('_writeU16(%d, %d)', addr, value)
self._dev.WriteOutputWord((self.address + addr, value))
def _readU32(self, addr):
# reads a uint32 from self.address + addr .. self.address + addr + 1
value = self._dev.ReadOutputWords((self.address + addr, 2))
value = struct.unpack('=I', struct.pack('<2H', *value))[0]
self.log.debug('_readU32(%d): -> %d', addr, value)
return value
def _writeU32(self, addr, value):
# writes a uint32 to self.address + addr
value = int(value)
self.log.debug('_writeU32(%d, %d)', addr, value)
value = struct.unpack('<2H', struct.pack('=I', value))
self._dev.WriteOutputWords(tuple([self.address + addr]) + value)
@requires(level=ADMIN)
def _load_factory(self):
# QAD should be
# pwd([0xFFFF,2])
# write('kommando',[0xFFFF,2])
# see Nicos old
self._dev.WriteOutputWords((12338, 25759, 245))
self._dev.WriteOutputWords((12340, 65535, 2))
ACK = self._dev.ReadOutputWord(12363)
NAK = self._dev.ReadOutputWord(12364)
self.log.info('_load_factory %d %d', ACK, NAK)
# mapping of user selectable channel name to BYTE_OFFSET, scaling and unit
_HW_AnalogChannels = dict(
User1Voltage=(200, 0.01, 'V-foo'),
User1Current=(202, 0.01, 'mA-foo'),
User2Voltage=(204, 0.01, 'V-foo'),
User2Current=(206, 0.01, 'mA-foo'),
Temperature1=(208, 0.01, 'degC'),
Temperature2=(210, 0.01, 'degC'),
Temperature3=(212, 0.01, 'degC'),
Temperature4=(214, 0.01, 'degC'),
Temperature5=(344, 0.01, 'degC'),
Temperature6=(346, 0.01, 'degC'),
Temperature7=(348, 0.01, 'degC'),
Temperature8=(350, 0.01, 'degC'),
Media1Voltage=(300, 0.01, 'V'),
Media1Current=(302, 0.01, 'mA'),
Media2Voltage=(304, 0.01, 'V'),
Media2Current=(306, 0.01, 'mA'),
Media3Voltage=(308, 0.01, 'V'),
Media3Current=(310, 0.01, 'mA'),
Media4Voltage=(312, 0.01, 'V'),
Media4Current=(314, 0.01, 'mA'),
Air1Pressure=(316, 0.01, 'bar'),
Air2Pressure=(318, 0.01, 'bar'),
Water1Temp=(320, 0.01, 'degC'),
Water1Flow=(322, 0.01, 'l/min'),
Water2Temp=(324, 0.01, 'degC'),
Water2Flow=(326, 0.01, 'l/min'),
Water3Temp=(328, 0.01, 'degC'),
Water3Flow=(330, 0.01, 'l/min'),
Water4Temp=(332, 0.01, 'degC'),
Water4Flow=(334, 0.01, 'l/min'),
Water5Temp=(336, 0.01, 'degC'),
Water5Flow=(338, 0.01, 'l/min'),
Water1Pressure=(340, 0.01, 'bar'),
Water2Pressure=(342, 0.01, 'bar'),
X16Voltage1=(352, 0.01, 'V'),
X16Voltage2=(354, 0.01, 'V'),
X16Voltage3=(356, 0.01, 'V'),
X16Voltage4=(358, 0.01, 'V'),
)
# mapping of user selectable channel name to BYTE_OFFSET, bit number
_HW_DigitalChannels = dict(
(('Merker%d' % i, (160 + 2 * (i // 16), i % 16))
for i in range(128, 255)),
ControllerStatus=(148, 0),
TempVibration=(148, 1),
ChopperEnable1=(148, 2),
ChopperEnable2=(148, 3),
AkkuPower=(148, 4),
PowerBreakdown=(148, 5),
SolenoidValve=(148, 6),
PowerSupplyUSV=(148, 7),
PowerSupplyNormal=(148, 8),
VSD_User1DigitalInput=(154, 0),
VSD_User2DigitalInput=(154, 1),
VSD_User3DigitalInput1=(154, 2),
VSD_User3DigitalInput2=(154, 3),
VSD_User3DigitalInput3=(154, 4),
VSD_User4DigitalInput1=(154, 5),
VSD_User4DigitalInput2=(154, 6),
VSD_User4DigitalInput3=(156, 7),
VSD_User1DigitalOutput1=(156, 0),
VSD_User1DigitalOutput2=(156, 1),
VSD_User2DigitalOutput1=(156, 2),
VSD_User2DigitalOutput2=(156, 3),
VSD_User3DigitalOutput1=(156, 4),
VSD_User3DigitalOutput2=(156, 5),
VSD_User3DigitalOutput3=(156, 6),
VSD_User4DigitalOutput1=(156, 7),
VSD_User4DigitalOutput2=(156, 8),
VSD_User4DigitalOutput3=(156, 9),
Media_DigitalOutput1=(158, 0),
Media_DigitalOutput2=(158, 1),
Media_DigitalOutput3=(158, 2),
Media_DigitalOutput4=(158, 3),
)
#
# Hardware abstraction: which actions do we want to do...
#
def _HW_readVersion(self):
return 'V%.1f' % (self._readU32(120 // 2) * 0.1)
def _HW_parallel_pumping_pressure(self):
return 0
#
# Nicos Methods
#
def doRead(self, maxage=0):
return self._HW_parallel_pumping_pressure()
def doReadFirmware(self):
return self._HW_readVersion()
def doStatus(self, maxage=0):
return status.OK, ''
@usermethod
def diag(self):
"""Display all available diagnostic values."""
self.log.info("Analog Values:")
for k, v in sorted(self._HW_AnalogChannels.items()):
self.log.info("%s: %.2f %s", k, v[1] * self._readI16(v[0] // 2),
v[2])
self.log.info("Digital Values:")
for k, v in sorted(self._HW_DigitalChannels.items()):
if k.startswith('Merker'):
continue
self.log.info("%s: %s", k,
'SET' if self._readU16(v[0] // 2) & (1 << v[1])
else 'clear')
self.log.info("Merkerwords:")
for i in range(16):
self.log.info("Merker%d..%d : 0x%04x", 128 + 15 + 16 * i,
128 + 16 * i, self._readU16(160 // 2 + i))
class AnalogValue(Readable):
attached_devices = {
'iodev': Attach('IO Device', VSDIO),
}
parameters = {
'channel': Param('Channel for readout',
type=oneof(*VSDIO._HW_AnalogChannels),
settable=True, preinit=True),
}
parameter_overrides = {
'unit': Override(mandatory=False, volatile=True, settable=False),
}
def doReadUnit(self):
_ofs, _scale, unit = \
self._attached_iodev._HW_AnalogChannels[self.channel]
if unit == 'mA-foo':
unit = 'mA'
elif unit == 'V-foo':
unit = 'V'
return unit
def doRead(self, maxage=0):
ofs, scale, _unit = \
self._attached_iodev._HW_AnalogChannels[self.channel]
# ofs is in Bytes, we need it in words! => /2
if _unit == 'mA-foo':
raw = scale * self._attached_iodev._readU16(ofs // 2)
self.log.debug('mA-foo %.2f', raw)
# Work around bug in firmware
if raw > 20.0:
raw -= 615.37
self.log.debug('mA-foo %.2f', raw)
# Tested against Multimeter (2018-08-07)
raw /= 2.0
self.log.debug('mA-foo %.2f', raw)
elif _unit == 'V-foo':
raw = self._attached_iodev._readU16(ofs // 2)
self.log.debug('V-foo %d', raw)
# Work around bug in firmware
if raw > 0x8000:
raw -= 63536
self.log.debug('V-foo %d sign1', raw)
self.log.debug('V-foo %d sign', raw)
# Tested against Multimeter (2018-08-07)
raw /= 2.0
self.log.debug('v-foo %.2f /2.0', raw)
raw *= scale
self.log.debug('v-foo %.2f scale', raw)
else:
raw = scale * self._attached_iodev._readU16(ofs // 2)
return raw
def doStatus(self, maxage=0):
return status.OK, ''
class DigitalValue(Readable):
attached_devices = {
'iodev': Attach('IO Device', VSDIO),
}
parameters = {
'channel': Param('Channel for readout',
type=oneof(*VSDIO._HW_DigitalChannels),
settable=True, preinit=True),
'mapping': Param('Mapping of 0/1 to sensible strings',
type=dictof(str, oneof(0, 1)), mandatory=True),
}
parameter_overrides = {
'unit': Override(mandatory=False, settable=False, default=''),
}
def doInit(self, mode):
self._revmapping = {v: k for k, v in self.mapping.items()}
def doRead(self, maxage=0):
ofs, bit = self._attached_iodev._HW_DigitalChannels[self.channel]
# ofs is in Bytes, we need it in words! => /2
if self._attached_iodev._readU16(ofs // 2) & (1 << bit):
return self._revmapping[1]
return self._revmapping[0]
def doStatus(self, maxage=0):
return status.OK, ''
|
<reponame>leelige/mindspore
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore.nn as nn
from mindspore.ops import operations as P
def conv_bn_relu(in_channel, out_channel, kernel_size, stride, depthwise, activation='relu6'):
output = []
output.append(nn.Conv2d(in_channel, out_channel, kernel_size, stride, pad_mode="same",
group=1 if not depthwise else in_channel))
output.append(nn.BatchNorm2d(out_channel))
if activation:
output.append(nn.get_activation(activation))
return nn.SequentialCell(output)
class MobileNetV1(nn.Cell):
"""
MobileNet V1 backbone
"""
def __init__(self, class_num=1001, features_only=False):
super(MobileNetV1, self).__init__()
self.features_only = features_only
cnn = [
conv_bn_relu(3, 32, 3, 2, False), # Conv0
conv_bn_relu(32, 32, 3, 1, True), # Conv1_depthwise
conv_bn_relu(32, 64, 1, 1, False), # Conv1_pointwise
conv_bn_relu(64, 64, 3, 2, True), # Conv2_depthwise
conv_bn_relu(64, 128, 1, 1, False), # Conv2_pointwise
conv_bn_relu(128, 128, 3, 1, True), # Conv3_depthwise
conv_bn_relu(128, 128, 1, 1, False), # Conv3_pointwise
conv_bn_relu(128, 128, 3, 2, True), # Conv4_depthwise
conv_bn_relu(128, 256, 1, 1, False), # Conv4_pointwise
conv_bn_relu(256, 256, 3, 1, True), # Conv5_depthwise
conv_bn_relu(256, 256, 1, 1, False), # Conv5_pointwise
conv_bn_relu(256, 256, 3, 2, True), # Conv6_depthwise
conv_bn_relu(256, 512, 1, 1, False), # Conv6_pointwise
conv_bn_relu(512, 512, 3, 1, True), # Conv7_depthwise
conv_bn_relu(512, 512, 1, 1, False), # Conv7_pointwise
conv_bn_relu(512, 512, 3, 1, True), # Conv8_depthwise
conv_bn_relu(512, 512, 1, 1, False), # Conv8_pointwise
conv_bn_relu(512, 512, 3, 1, True), # Conv9_depthwise
conv_bn_relu(512, 512, 1, 1, False), # Conv9_pointwise
conv_bn_relu(512, 512, 3, 1, True), # Conv10_depthwise
conv_bn_relu(512, 512, 1, 1, False), # Conv10_pointwise
conv_bn_relu(512, 512, 3, 1, True), # Conv11_depthwise
conv_bn_relu(512, 512, 1, 1, False), # Conv11_pointwise
conv_bn_relu(512, 512, 3, 2, True), # Conv12_depthwise
conv_bn_relu(512, 1024, 1, 1, False), # Conv12_pointwise
conv_bn_relu(1024, 1024, 3, 1, True), # Conv13_depthwise
conv_bn_relu(1024, 1024, 1, 1, False), # Conv13_pointwise
]
if self.features_only:
self.network = nn.CellList(cnn)
else:
self.network = nn.SequentialCell(cnn)
self.fc = nn.Dense(1024, class_num)
def construct(self, x):
output = x
if self.features_only:
features = ()
for block in self.network:
output = block(output)
features = features + (output,)
return features
output = self.network(x)
output = P.ReduceMean()(output, (2, 3))
output = self.fc(output)
return output
def mobilenet_v1(class_num=1001):
return MobileNetV1(class_num)
|
<filename>search/irank.py
import numpy as np
from structures import Graph
def normalize(vec):
"""
Function performs mathematical normalization of the first order for a given n - dimensional vector.
:param vec: vector to be normalized
:return: normalized vector
"""
if np.count_nonzero(vec) == 0:
return vec
return np.array(vec) / np.linalg.norm(vec, ord=1)
def IRank1(hard_result_set: dict, ordered_list: list, positive_query_len: int, word_count: list = None):
"""
IR1 is normalized.
IR1 = appearance penal factor * appearance dividend
Appearance penal factor - takes values from 0 to 1, all sites not matching The Query given by our God Saviour - User
are 0, this will also further propagate to RANK and assure that only sites matching
given criteria appears in results. Otherwise, this holds a number following formula:
number of words from positive query appearing on site / num of words in pos. query
therefore, max value is 1 (all words are present)
Appearance dividend - holds count of positive search query words in site divided by maximal same number across all
sites
"""
# word count is not used, but it could be, to utilize percentage of searched words per document
appearance_penal_factor = np.zeros(len(ordered_list))
appearance_dividend = np.ones(len(ordered_list))
for file in ordered_list:
if positive_query_len == 0 and file in hard_result_set:
appearance_penal_factor[ordered_list.index(file)] = 1
elif file in hard_result_set:
appearance_penal_factor[ordered_list.index(file)] = \
np.count_nonzero(hard_result_set[file]) / positive_query_len
appearance_dividend[ordered_list.index(file)] = np.sum(hard_result_set[file])
# / word_count[ordered_list.index(file)]
return normalize(np.multiply(appearance_penal_factor, appearance_dividend / np.max(appearance_dividend)))
def IRank2(graph: Graph, hard_result_set: dict, broad_positive_res_set: dict, ordered_list: list,
positive_query_len: int, word_count: list = None):
"""
Calculates IR2 factor by formula:
IR2 = 1 + appearance words count / appearance file count
Appearance word count - for each site that has a link to matched site, sum of count of appearances of words from
positive query
Appearance file count - number of linking sites in which appear words from positive search query
"""
# word count is not used, but it could be, to utilize percentage of searched words per document
appearance_count = np.zeros(len(ordered_list))
appearance_files = np.ones(len(ordered_list))
for file in ordered_list:
if positive_query_len == 0:
break
elif file in hard_result_set:
appearance_files[ordered_list.index(file)] = 0
for edge in graph.incident_edges(file, outgoing=False):
inlink = edge.opposite(file)
if inlink in broad_positive_res_set:
appearance_count[ordered_list.index(file)] += np.sum(broad_positive_res_set[inlink])
appearance_files[ordered_list.index(file)] += 1
elif inlink in hard_result_set:
appearance_count[ordered_list.index(file)] += np.sum(hard_result_set[inlink])
appearance_files[ordered_list.index(file)] += 1
if appearance_files[ordered_list.index(file)] == 0:
appearance_files[ordered_list.index(file)] = 1
return 1 + np.divide(appearance_count, appearance_files)
def IRank(graph: Graph, hard_result_set: dict, broad_positive_res_set: dict,
ordered_list: list, positive_query_len: int):
"""
IR is normalized.
IR = IR1 * IR2
For further details on algorithm see IRank1 and IRank2 functions in this module.
"""
IR1 = IRank1(hard_result_set, ordered_list, positive_query_len)
IR2 = IRank2(graph, hard_result_set, broad_positive_res_set, ordered_list, positive_query_len)
return normalize(np.multiply(IR1, IR2)), IR1, IR2
def get_ranks(pagerank: np.ndarray, graph: Graph, hard_result_set: dict, broad_positive_res_set: dict,
ordered_list: list, positive_query_len: int):
"""
Rank calculation algorithm:
Formula influenced by:
number of appearances of the searched words on site - IR1 (included in IR)
number of sites linking to site - PR (PageRank - US6285999B1)
number of searched words on linking sites to site - IR2 (included in IR)
Normalized version of PageRank is used (values 0-1) - PR
IR is also normalized.
RANK = PR * IR
RANK is normalized.
For details on the algorithm see function IRank in this module and pagerank.py module.
:param pagerank: PR
:param graph: PopulateStructures attribute
:param hard_result_set: result set of the search query, see execute_query method in query.py module or
eval_query method in advanced.eval_query.py module
:param broad_positive_res_set: result set of broad set of sites influencing ranking algorithm, see execute_query
method in query.py module or eval_query method in advanced.eval_query.py module
:param ordered_list: order od sites from PS object
:param positive_query_len: number of parameters influencing ranking process (all 'positive' words)
:return: rank matrix (with additional details)
"""
pagerank = pagerank.reshape((len(ordered_list),))
IR, IR1, IR2 = IRank(graph, hard_result_set, broad_positive_res_set, ordered_list, positive_query_len)
return np.concatenate((normalize(np.multiply(pagerank, IR)).reshape((len(ordered_list), 1)),
pagerank.reshape((len(ordered_list), 1)),
IR.reshape((len(ordered_list), 1)),
IR1.reshape((len(ordered_list), 1)),
IR2.reshape((len(ordered_list), 1))),
axis=1).tolist()
|
<gh_stars>0
import re
import pandas as pd
import urllib.parse
from collections import namedtuple
text_doc_head_pattern_str = "<doc id=\"(.*?)\" url=\"(.*?)\" title=\"(.*?)\""
anchor_pattern_str = "<a href=\"(.*?)\">(.*?)</a>"
WikiTextInfo = namedtuple('WikiTextInfo', ['title', 'wid', 'text'])
no_comma_util_title_prefixs = ['List of', 'Lists of', 'Index of']
comma_util_title_prefixs = ['Wikipedia:', 'File:', 'Draft:', 'Template:', 'MediaWiki:', 'Category:',
'Help:', 'wikt:', 'Portal:']
# code judges based on 'of' and 'in'
special_intro_title_prefixs = ['Geography of', 'Monarchy of', 'History of', 'Politics of', 'Economy of',
'Transport in', 'Foreign relations of', 'Foreign Relations of',
'Demographics of', 'Transportation in', 'Telecommunications in', 'Culture of']
def next_xml_page(f):
page_xml = ''
for line in f:
if line == ' <page>\n':
page_xml += line
break
if not page_xml:
return None
for line in f:
page_xml += line
if line == ' </page>\n':
break
return page_xml
def next_text_page(f):
wid, title = None, None
for line in f:
if line.startswith('<doc id="'):
m = re.match(text_doc_head_pattern_str, line)
if m:
title = m.group(3)
wid = int(m.group(1))
break
else:
print('fine "<doc id=" but not matched')
print(line.strip())
if wid is None:
return None
text = ''
for i, line in enumerate(f):
if line.strip() == '</doc>':
break
if i == 0 and line.strip() == title:
continue
text += line
return WikiTextInfo(title, wid, text.strip())
def find_anchors(text_with_anchors):
miter = re.finditer(anchor_pattern_str, text_with_anchors)
anchors = list()
for m in miter:
target = urllib.parse.unquote(m.group(1))
mention_str = m.group(2)
anchors.append((mention_str, target))
return anchors
def load_redirects_file(filename):
with open(filename, encoding='utf-8') as f:
df = pd.read_csv(f, na_filter=False)
redirects_dict = {title_from: title_to for title_from, title_to in df.itertuples(False, None)}
return redirects_dict
def is_not_util_page_title(page_title: str):
if ':' in page_title:
for s in comma_util_title_prefixs:
if page_title.startswith(s):
return False
for s in no_comma_util_title_prefixs:
if page_title.startswith(s):
return False
return True
def __num_starting_digits(title: str):
cnt = 0
while cnt < len(title) and title[cnt].isdigit():
cnt += 1
return cnt
def is_special_intro_title(page_title: str):
n = __num_starting_digits(page_title)
if n > 0:
if len(page_title) == n:
return True
tmp_str = page_title[n:]
if tmp_str == ' BC' or tmp_str == 's BC' or tmp_str == 's':
return True
if tmp_str.startswith(' in') or tmp_str.startswith('s in'):
return True
if tmp_str.startswith('BC in') or tmp_str.startswith('s BC in'):
return True
if ' of' not in page_title and ' in' not in page_title:
return False
for s in special_intro_title_prefixs:
if page_title.startswith(s):
return True
return False
def load_linked_cnts_file(filename):
with open(filename, encoding='utf-8') as f:
df = pd.read_csv(f, na_filter=False)
return {title: cnt for title, cnt in df.itertuples(False, None)}
|
<gh_stars>0
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseServerError
from django.shortcuts import redirect
from sampleAppOAuth2.services import *
from sampleAppOAuth2 import getDiscoveryDocument
import urllib
# from django.template import Context, Template
# from django.apps import apps
# Create your views here.
def index(request):
return render(request, 'index.html')
def connectToQuickbooks(request):
url = getDiscoveryDocument.auth_endpoint
scope = ' '.join(settings.PAYMENTS_SCOPE)
params = {'scope': scope, 'redirect_uri': settings.REDIRECT_URI,
'response_type': 'code', 'state': get_CSRF_token(request), 'client_id': settings.CLIENT_ID}
url += '?' + urllib.parse.urlencode(params)
return redirect(url)
def signInWithIntuit(request):
url = getDiscoveryDocument.auth_endpoint
scope = ' '.join(settings.OPENID_SCOPES) # Scopes are required to be sent delimited by a space
params = {'scope': scope, 'redirect_uri': settings.REDIRECT_URI,
'response_type': 'code', 'state': get_CSRF_token(request), 'client_id': settings.CLIENT_ID}
url += '?' + urllib.parse.urlencode(params)
return redirect(url)
def getAppNow(request):
url = getDiscoveryDocument.auth_endpoint
scope = ' '.join(settings.GET_APP_SCOPES) # Scopes are required to be sent delimited by a space
params = {'scope': scope, 'redirect_uri': settings.REDIRECT_URI,
'response_type': 'code', 'state': get_CSRF_token(request), 'client_id': settings.CLIENT_ID}
url += '?' + urllib.parse.urlencode(params)
return redirect(url)
def authCodeHandler(request):
print("here!")
state = request.GET.get('state', None)
error = request.GET.get('error', None)
if error == 'access_denied':
return redirect('index')
if state is None:
return HttpResponseBadRequest()
# elif state != get_CSRF_token(request): # validate against CSRF attacks
# print("here 2 ......!")
# return HttpResponse('unauthorized', status=401)
auth_code = request.GET.get('code', None)
if auth_code is None:
return HttpResponseBadRequest()
bearer = getBearerToken(auth_code)
realmId = request.GET.get('realmId', None)
updateSession(request, bearer.accessToken, bearer.refreshToken, realmId)
# Validate JWT tokens only for OpenID scope
if bearer.idToken is not None:
if not validateJWTToken(bearer.idToken):
return HttpResponse('JWT Validation failed. Please try signing in again.')
else:
return redirect('connected')
else:
return redirect('connected')
def connected(request):
access_token = request.session.get('accessToken', None)
if access_token is None:
return HttpResponse('Your Bearer token has expired, please initiate Sign In With Intuit flow again')
refresh_token = request.session.get('refreshToken', None)
realmId = request.session['realmId']
if realmId is None:
user_profile_response, status_code = getUserProfile(access_token)
if status_code >= 400:
# if call to User Profile Service doesn't succeed then get a new bearer token from
# refresh token and try again
bearer = getBearerTokenFromRefreshToken(refresh_token)
user_profile_response, status_code = getUserProfile(bearer.accessToken)
updateSession(request, bearer.accessToken, bearer.refreshToken, request.session.get('realmId', None),
name=user_profile_response.get('givenName', None))
if status_code >= 400:
return HttpResponseServerError()
c = {
'first_name': user_profile_response.get('givenName', ' '),
}
else:
if request.session.get('name') is None:
name = ''
else:
name = request.session.get('name')
c = {
'first_name': name,
}
return render(request, 'connected.html', context=c)
def disconnect(request):
access_token = request.session.get('accessToken', None)
refresh_token = request.session.get('refreshToken', None)
revoke_response = ''
if access_token is not None:
revoke_response = revokeToken(access_token)
elif refresh_token is not None:
revoke_response = revokeToken(refresh_token)
else:
return HttpResponse('No accessToken or refreshToken found, Please connect again')
request.session.flush()
return HttpResponse(revoke_response)
def refreshTokenCall(request):
refresh_token = request.session.get('refreshToken', None)
if refresh_token is None:
return HttpResponse('Not authorized')
first_name = request.session.get('name', None)
bearer = getBearerTokenFromRefreshToken(refresh_token)
if isinstance(bearer, str):
return HttpResponse(bearer)
else:
return HttpResponse('Access Token: ' + bearer.accessToken + ', Refresh Token: ' + bearer.refreshToken)
def apiCall(request):
access_token = request.session.get('accessToken', None)
if access_token is None:
return HttpResponse('Your Bearer token has expired, please initiate C2QB flow again')
realmId = request.session['realmId']
if realmId is None:
return HttpResponse('No realm ID. QBO calls only work if the payment scope was passed!')
refresh_token = request.session['refreshToken']
create_charge_response, status_code = createCharge(access_token)
print(create_charge_response)
print(status_code)
if status_code >= 400:
# if call to QBO doesn't succeed then get a new bearer token from refresh token and try again
bearer = getBearerTokenFromRefreshToken(refresh_token)
updateSession(request, bearer.accessToken, bearer.refreshToken, realmId)
create_charge_response, status_code = createCharge(bearer.accessToken)
if status_code >= 400:
return HttpResponseServerError()
return HttpResponse('Charge create response: ' + str(create_charge_response))
# Invoice CRUD
def newInvoice(request):
access_token = request.session.get('accessToken', None)
if access_token is None:
return HttpResponse('Your Bearer token has expired, please initiate C2QB flow again')
realmId = request.session['realmId']
if realmId is None:
return HttpResponse('No realm ID. QBO calls only work if the payment scope was passed!')
refresh_token = request.session['refreshToken']
create_invoice_response, status_code = createInvoice(access_token, realmId)
print(create_invoice_response)
print(status_code)
if status_code >= 400:
# if call to QBO doesn't succeed then get a new bearer token from refresh token and try again
bearer = getBearerTokenFromRefreshToken(refresh_token)
updateSession(request, bearer.accessToken, bearer.refreshToken, realmId)
create_invoice_response, status_code = createInvoice(bearer.accessToken, realmId)
if status_code >= 400:
return HttpResponseServerError()
return HttpResponse('Invoice create response: ' + str(create_invoice_response))
def oneInvoice(request):
access_token = request.session.get('accessToken', None)
if access_token is None:
return HttpResponse('Your Bearer token has expired, please initiate C2QB flow again')
realmId = request.session['realmId']
if realmId is None:
return HttpResponse('No realm ID. QBO calls only work if the payment scope was passed!')
invoiceid = 60
refresh_token = request.session['refreshToken']
show_invoice_response, status_code = showInvoice(access_token, realmId, invoiceid)
print(show_invoice_response)
print(status_code)
if status_code >= 400:
# if call to QBO doesn't succeed then get a new bearer token from refresh token and try again
bearer = getBearerTokenFromRefreshToken(refresh_token)
updateSession(request, bearer.accessToken, bearer.refreshToken, realmId)
create_charge_response, status_code = showInvoice(bearer.accessToken, realmId)
if status_code >= 400:
return HttpResponseServerError()
return HttpResponse('Query Item response: ' + str(show_invoice_response))
# Customer CRUD
def newCustomer(request):
access_token = request.session.get('accessToken', None)
if access_token is None:
return HttpResponse('Your Bearer token has expired, please initiate C2QB flow again')
realmId = request.session['realmId']
if realmId is None:
return HttpResponse('No realm ID. QBO calls only work if the payment scope was passed!')
refresh_token = request.session['refreshToken']
create_customer_response, status_code = createCustomer(access_token, realmId)
print(create_customer_response)
print(status_code)
if status_code >= 400:
# if call to QBO doesn't succeed then get a new bearer token from refresh token and try again
bearer = getBearerTokenFromRefreshToken(refresh_token)
updateSession(request, bearer.accessToken, bearer.refreshToken, realmId)
create_customer_response, status_code = createCustomer(bearer.accessToken, realmId)
if status_code >= 400:
return HttpResponseServerError()
return HttpResponse('Invoice create response: ' + str(create_customer_response))
def oneCustomer(request):
access_token = request.session.get('accessToken', None)
if access_token is None:
return HttpResponse('Your Bearer token has expired, please initiate C2QB flow again')
realmId = request.session['realmId']
if realmId is None:
return HttpResponse('No realm ID. QBO calls only work if the payment scope was passed!')
customerid = "60"
refresh_token = request.session['refreshToken']
show_customer_response, status_code = showCustomer(access_token, realmId, customerid)
print(show_customer_response)
print(status_code)
if status_code >= 400:
# if call to QBO doesn't succeed then get a new bearer token from refresh token and try again
bearer = getBearerTokenFromRefreshToken(refresh_token)
updateSession(request, bearer.accessToken, bearer.refreshToken, realmId)
create_charge_response, status_code = showCustomer(bearer.accessToken, realmId)
if status_code >= 400:
return HttpResponseServerError()
return HttpResponse('Query Item response: ' + str(show_customer_response))
def allCustomer(request):
access_token = request.session.get('accessToken', None)
if access_token is None:
return HttpResponse('Your Bearer token has expired, please initiate C2QB flow again')
realmId = request.session['realmId']
if realmId is None:
return HttpResponse('No realm ID. QBO calls only work if the payment scope was passed!')
refresh_token = request.session['refreshToken']
show_all_customer_response, status_code = showAllCustomer(access_token, realmId)
print(show_all_customer_response)
print(status_code)
if status_code >= 400:
# if call to QBO doesn't succeed then get a new bearer token from refresh token and try again
bearer = getBearerTokenFromRefreshToken(refresh_token)
updateSession(request, bearer.accessToken, bearer.refreshToken, realmId)
create_charge_response, status_code = showAllCustomer(bearer.accessToken, realmId)
if status_code >= 400:
return HttpResponseServerError()
return HttpResponse('Query Customer response: ' + str(show_all_customer_response))
# Service Items CRUD
def newItem(request):
access_token = request.session.get('accessToken', None)
if access_token is None:
return HttpResponse('Your Bearer token has expired, please initiate C2QB flow again')
realmId = request.session['realmId']
if realmId is None:
return HttpResponse('No realm ID. QBO calls only work if the payment scope was passed!')
refresh_token = request.session['refreshToken']
create_item_response, status_code = createItem(access_token, realmId)
print(create_item_response)
print(status_code)
if status_code >= 400:
# if call to QBO doesn't succeed then get a new bearer token from refresh token and try again
bearer = getBearerTokenFromRefreshToken(refresh_token)
updateSession(request, bearer.accessToken, bearer.refreshToken, realmId)
create_charge_response, status_code = createItem(bearer.accessToken, realmId)
if status_code >= 400:
return HttpResponseServerError()
return HttpResponse('Item Create response: ' + str(create_item_response))
def allItem(request):
access_token = request.session.get('accessToken', None)
if access_token is None:
return HttpResponse('Your Bearer token has expired, please initiate C2QB flow again')
realmId = request.session['realmId']
if realmId is None:
return HttpResponse('No realm ID. QBO calls only work if the payment scope was passed!')
refresh_token = request.session['refreshToken']
show_all_item_response, status_code = showAllItem(access_token, realmId)
print(show_all_item_response)
print(status_code)
if status_code >= 400:
# if call to QBO doesn't succeed then get a new bearer token from refresh token and try again
bearer = getBearerTokenFromRefreshToken(refresh_token)
updateSession(request, bearer.accessToken, bearer.refreshToken, realmId)
create_charge_response, status_code = showAllItem(bearer.accessToken, realmId)
if status_code >= 400:
return HttpResponseServerError()
return HttpResponse('Query Item response: ' + str(show_all_item_response))
def oneItem(request):
access_token = request.session.get('accessToken', None)
if access_token is None:
return HttpResponse('Your Bearer token has expired, please initiate C2QB flow again')
realmId = request.session['realmId']
if realmId is None:
return HttpResponse('No realm ID. QBO calls only work if the payment scope was passed!')
itemid = "1"
refresh_token = request.session['refreshToken']
show_item_response, status_code = showItem(access_token, realmId, itemid)
print(show_item_response)
print(status_code)
if status_code >= 400:
# if call to QBO doesn't succeed then get a new bearer token from refresh token and try again
bearer = getBearerTokenFromRefreshToken(refresh_token)
updateSession(request, bearer.accessToken, bearer.refreshToken, realmId)
create_charge_response, status_code = showItem(bearer.accessToken, realmId)
if status_code >= 400:
return HttpResponseServerError()
return HttpResponse('Query Item response: ' + str(show_item_response))
def get_CSRF_token(request):
token = request.session.get('csrfToken', None)
if token is None:
token = getSecretKey()
request.session['csrfToken'] = token
return token
def updateSession(request, access_token, refresh_token, realmId, name=None):
request.session['accessToken'] = access_token
request.session['refreshToken'] = refresh_token
request.session['realmId'] = realmId
request.session['name'] = name
|
<filename>landscape/vault.py
import hvac
import os
import sys
import yaml
import base64
import logging
def kubeconfig_context_entry(context_name):
"""
Generates a kubeconfig context entry
Args:
context_name (str): The Kubernetes context
Returns:
context entry for kubeconfig file (dict)
"""
context_entry = {
'name': context_name,
'context': {
'cluster': context_name + '-cluster',
'user': context_name + '-user',
}
}
return context_entry
def kubeconfig_cluster_entry(context_name, k8s_server, ca_cert):
"""
Generates a kubeconfig cluster entry
Args:
context_name (str): The Kubernetes context
k8s_server (str): The URL of the Kubernetes API server
client_key (str): The PEM-encoded CA certificate to verify against
Returns: cluster entry for kubeconfig file (dict)
"""
base64_ca_cert = base64.b64encode(bytes(ca_cert, 'utf-8')).decode('ascii')
cluster_entry = {
'name': context_name + '-cluster',
'cluster': {
'server': k8s_server,
'certificate-authority-data': base64_ca_cert
}
}
return cluster_entry
def kubeconfig_user_entry(context_name, client_cert, client_key):
"""
Generates a kubeconfig user entry
Args:
context_name (str): The Kubernetes context
client_cert (str): The PEM-encoded client cert
client_key (str): The PEM-encoded client key
Returns: user entry for kubeconfig file (dict)
"""
base64_cert = base64.b64encode(bytes(client_cert, 'utf-8')).decode('ascii')
base64_key = base64.b64encode(bytes(client_key, 'utf-8')).decode('ascii')
user_entry = {
'name': context_name + '-user',
'user': {
'client-certificate-data': base64_cert,
'client-key-data': base64_key
}
}
return user_entry
def write_kubeconfig(cfg_path):
"""
Writes a kubernetes client configuration file with values from Vault
Expects Vault to be pre-populated like so:
vault write /secret/k8s_contexts/minikube \
ca_cert='ca_cert_value' \
client_cert='client_cert_value' \
client_key='client_key_value' \
api_server='https://kubernetes.default.svc.cluster.local'
Args:
cfg_path (str): Path to the kubeconfig file being written
Returns:
None
"""
vault_root = '/secret/k8s_contexts'
vault_addr = os.environ.get('VAULT_ADDR')
vault_cacert = os.environ.get('VAULT_CACERT')
vault_token = os.environ.get('VAULT_TOKEN')
vault_client = hvac.Client(url=vault_addr,
token=<PASSWORD>,
verify=vault_cacert)
k8sconfig_contents = {}
for context in vault_client.list(vault_root)['data']['keys']:
clustercfg_root = vault_root + '/' + context
print("Reading kubeconfig settings from {0}".format(clustercfg_root))
try:
vault_clustercfg = vault_client.read(clustercfg_root)
except hvac.exceptions.InvalidRequest:
sys.exit("Failed to read from Vault. Check VAULT_ vars")
if not vault_clustercfg:
sys.exit("No entry {0} found in Vault path {1}".format(context,
vault_root))
vault_data = vault_clustercfg['data']
server_addr = vault_data['api_server']
server_cacert = vault_data['ca_cert']
client_cert = vault_data['client_cert']
client_key = vault_data['client_key']
context_contents = gen_k8sconf(k8s_context=context,
api_server=server_addr,
ca_cert=server_cacert,
client_auth_cert=client_cert,
client_auth_key=client_key)
k8sconfig_contents.update(context_contents)
expanded_cfg_path = os.path.expanduser(cfg_path)
cfg_dir = '/'.join(expanded_cfg_path.split('/')[0:-1])
if not os.path.exists(cfg_dir):
print("Creating directory {0}".format(cfg_dir))
os.makedirs(cfg_dir)
with open(expanded_cfg_path, 'w') as kubeconfig:
kubeconfig.write(yaml.dump(k8sconfig_contents,default_flow_style=False))
print("Wrote kubeconfig to {0}".format(expanded_cfg_path))
def gen_k8sconf(k8s_context=None, api_server=None, ca_cert=None,
client_auth_cert=None,
client_auth_key=None):
"""
Generate a kubeconfig object
Args:
k8s_context (str):
api_server (str):
ca_cert (str):
client_auth_cert (str):
client_auth_key (str):
Returns: kubeconfig data (dict)
"""
contents = {}
contents['apiVersion'] = 'v1'
contents['kind'] = 'Config'
contents['preferences'] = {}
contents['clusters'] = []
contents['contexts'] = []
contents['users'] = []
contents['current-context'] = k8s_context
vault_context_entry = kubeconfig_context_entry(k8s_context)
vault_cluster_entry = kubeconfig_cluster_entry(k8s_context,
api_server,
ca_cert)
vault_user_entry = kubeconfig_user_entry(k8s_context,
client_auth_cert,
client_auth_key)
contents['contexts'].append(vault_context_entry)
contents['clusters'].append(vault_cluster_entry)
contents['users'].append(vault_user_entry)
return contents
def read_kubeconfig(cfg_path):
"""
Reads the current kubeconfig file and places it into Vault
"""
k8sconfig_contents = {}
with open(cfg_path, 'r') as stream:
try:
k8sconfig_contents = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
contexts = k8sconfig_contents['contexts']
clusters = k8sconfig_contents['clusters']
users = k8sconfig_contents['users']
for context in contexts:
# kubeconfig context entries
context_name = context['name']
# gke clusters are set with GOOGLE_CREDENTIALS, not here
if context_name.startswith('gke_'):
continue
context_cluster = context['context']['cluster']
context_user = context['context']['user']
# kubeconfig cluster entries
cluster_cacert = ''
client_auth_cert = ''
client_auth_key = ''
cluster_cfg = [d for d in clusters if d['name'] == context_cluster][0]
cluster_server = cluster_cfg['cluster']['server']
if 'certificate-authority-data' in cluster_cfg['cluster']:
ca_cert_data = cluster_cfg['cluster']['certificate-authority-data']
cluster_cacert = base64.b64encode(bytes(ca_cert_data, 'utf-8')).decode('ascii')
elif 'certificate-authority' in cluster_cfg['cluster']:
cacert_file = cluster_cfg['cluster']['certificate-authority']
if cacert_file.startswith('/'):
cacert_path = cacert_file
else:
cacert_path = os.path.expanduser('~/.kube/' + cacert_file)
with open(cacert_path, 'r') as stream:
try:
cluster_cacert = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
else:
raise "no user certificate-authority(-data) entry in kubeconfig"
# kubeconfig user entries
user_cfg = [d for d in users if d['name'] == context_user][0]
print("user_cfg={0}".format(user_cfg))
if 'client-certificate-data' in user_cfg['user']:
client_cert_data = user_cfg['user']['client-certificate-data']
client_key_data = user_cfg['user']['client-key-data']
client_auth_cert = base64.b64encode(bytes(client_cert_data, 'utf-8')).decode('ascii')
client_auth_key = base64.b64encode(bytes(client_key_data, 'utf-8')).decode('ascii')
elif 'client-certificate' in user_cfg['user']:
client_cert_file = user_cfg['user']['client-certificate']
client_key_file = user_cfg['user']['client-key']
# client cert
if client_cert_file.startswith('/'):
client_cert_path = client_cert_file
else:
client_cert_path = os.path.expanduser('~/.kube/' + client_cert_file)
with open(client_cert_path, 'r') as stream:
try:
client_auth_cert = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
# client key
if client_key_file.startswith('/'):
client_key_path = client_key_file
else:
client_key_path = os.path.expanduser('~/.kube/' + client_key_file)
with open(client_key_path, 'r') as stream:
try:
client_auth_key = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
raise "read_kubeconfig not implemented"
class VaultClient(object):
"""Connects to and authenticates with Vault
Attributes:
__vault_client (hvac.Client): Client connected to Vault
"""
def __init__(self):
vault_addr = os.environ.get('VAULT_ADDR')
vault_cacert = os.environ.get('VAULT_CACERT')
vault_token = os.environ.get('VAULT_TOKEN')
self.logger = logging.getLogger(__name__)
logging.debug(" - VAULT_ADDR is {0}".format(vault_addr))
logging.debug(" - VAULT_CACERT is {0}".format(vault_cacert))
# Raise error if VAUT_ environment variables not set
missing_fmt_string = '{0} missing in environment'
if not vault_addr:
raise ValueError(missing_fmt_string.format('VAULT_ADDR'))
if not vault_token:
raise ValueError(missing_fmt_string.format('VAULT_TOKEN'))
if vault_addr.startswith('https://') and not vault_cacert:
raise ValueError(missing_fmt_string.format('VAULT_CACERT'))
self.__vault_client = hvac.Client(url=vault_addr,
token=vault_token,
verify=vault_cacert)
def dump_vault_from_prefix(self, path_prefix, strip_root_key=False):
"""
Dump Vault data at prefix into dict.
strip_root_key argument used for recursive path-stripping.
Set to True when you call the method outside of itself (non-recursively)
Args:
path_prefix (str): The prefix which to dump
strip_root_key (bool): Strip the root key from return value
Returns:
Data from Vault at prefix (dict)
"""
all_values_at_prefix = {}
logging.debug(" - reading vault subkeys at {0}".format(path_prefix))
subkeys_at_prefix = self.__vault_client.list(path_prefix)
logging.debug(" - subkeys are {0}".format(subkeys_at_prefix))
# use last vault key (delimited by '/') as dict index
prefix_keyname = path_prefix.split('/')[-1]
if not prefix_keyname in all_values_at_prefix:
all_values_at_prefix[prefix_keyname] = {}
# look in Vault path for subkeys. If they exist, recurse.
if subkeys_at_prefix:
for subkey in subkeys_at_prefix['data']['keys']:
prefixed_key = path_prefix + '/' + subkey
sub_vault_key = self.dump_vault_from_prefix(prefixed_key)
all_values_at_prefix[prefix_keyname].update(sub_vault_key)
else:
vault_item_data = self.get_vault_data(path_prefix)
all_values_at_prefix[prefix_keyname].update(vault_item_data)
# Generate full paths to the vault item.
# Set it to True when called from outside this method
# It'll handle the rest
if strip_root_key == True:
retval = all_values_at_prefix[prefix_keyname]
else:
retval = all_values_at_prefix
return retval
def get_vault_data(self, vault_path):
"""
Get Vault data for a specific path
Args:
vault_path (str): path to Vault item
Returns:
Vault secret contents (dict)
"""
vault_error_read_str = 'Vault read at path: {0} error: {1}'
vault_error_data_str = 'Vault data missing at path: {0}'
try:
vault_item_contents = self.__vault_client.read(vault_path)
except ValueError as e:
raise ValueError(vault_error_read_str.format(vault_path, e))
if vault_item_contents and 'data' in vault_item_contents:
return vault_item_contents['data']
else:
raise ValueError(vault_error_data_str.format(vault_path))
def list_vault_prefix(self, vault_path):
"""
Get Vault data for a specific path
Args:
vault_path (str): path to Vault item
Returns:
Vault secret contents (dict)
"""
vault_error_read_str = 'Vault read at path: {0} error: {1}'
vault_error_data_str = 'Vault data missing at path: {0}'
try:
vault_item_list = self.__vault_client.list(vault_path)
except ValueError as e:
raise ValueError(vault_error_read_str.format(vault_path, e))
if vault_item_list and 'data' in vault_item_list:
return vault_item_list['data']
else:
raise ValueError(vault_error_data_str.format(vault_path))
|
<reponame>seqRep/dgl-lifesci
# -*- coding: utf-8 -*-
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# SchNet
# pylint: disable=C0103, C0111, W0621, W0221, E1102, E1101
import numpy as np
import torch
import torch.nn as nn
from dgl.nn.pytorch import CFConv
__all__ = ['SchNetGNN']
class RBFExpansion(nn.Module):
r"""Expand distances between nodes by radial basis functions.
.. math::
\exp(- \gamma * ||d - \mu||^2)
where :math:`d` is the distance between two nodes and :math:`\mu` helps centralizes
the distances. We use multiple centers evenly distributed in the range of
:math:`[\text{low}, \text{high}]` with the difference between two adjacent centers
being :math:`gap`.
The number of centers is decided by :math:`(\text{high} - \text{low}) / \text{gap}`.
Choosing fewer centers corresponds to reducing the resolution of the filter.
Parameters
----------
low : float
Smallest center. Default to 0.
high : float
Largest center. Default to 30.
gap : float
Difference between two adjacent centers. :math:`\gamma` will be computed as the
reciprocal of gap. Default to 0.1.
"""
def __init__(self, low=0., high=30., gap=0.1):
super(RBFExpansion, self).__init__()
num_centers = int(np.ceil((high - low) / gap))
self.centers = np.linspace(low, high, num_centers)
self.centers = nn.Parameter(torch.tensor(self.centers).float(), requires_grad=False)
self.gamma = 1 / gap
def reset_parameters(self):
"""Reinitialize model parameters."""
device = self.centers.device
self.centers = nn.Parameter(
torch.tensor(self.centers).float(), requires_grad=False).to(device)
def forward(self, edge_dists):
"""Expand distances.
Parameters
----------
edge_dists : float32 tensor of shape (E, 1)
Distances between end nodes of edges, E for the number of edges.
Returns
-------
float32 tensor of shape (E, len(self.centers))
Expanded distances.
"""
radial = edge_dists - self.centers
coef = - self.gamma
return torch.exp(coef * (radial ** 2))
class Interaction(nn.Module):
"""Building block for SchNet.
SchNet is introduced in `SchNet: A continuous-filter convolutional neural network for
modeling quantum interactions <https://arxiv.org/abs/1706.08566>`__.
This layer combines node and edge features in message passing and updates node
representations.
Parameters
----------
node_feats : int
Size for the input and output node features.
edge_in_feats : int
Size for the input edge features.
hidden_feats : int
Size for hidden representations.
"""
def __init__(self, node_feats, edge_in_feats, hidden_feats):
super(Interaction, self).__init__()
self.conv = CFConv(node_feats, edge_in_feats, hidden_feats, node_feats)
self.project_out = nn.Linear(node_feats, node_feats)
def reset_parameters(self):
"""Reinitialize model parameters."""
for layer in self.conv.project_edge:
if isinstance(layer, nn.Linear):
layer.reset_parameters()
self.conv.project_node.reset_parameters()
self.conv.project_out[0].reset_parameters()
self.project_out.reset_parameters()
def forward(self, g, node_feats, edge_feats):
"""Performs message passing and updates node representations.
Parameters
----------
g : DGLGraph
DGLGraph for a batch of graphs.
node_feats : float32 tensor of shape (V, node_feats)
Input node features, V for the number of nodes.
edge_feats : float32 tensor of shape (E, edge_in_feats)
Input edge features, E for the number of edges.
Returns
-------
float32 tensor of shape (V, node_feats)
Updated node representations.
"""
node_feats = self.conv(g, node_feats, edge_feats)
return self.project_out(node_feats)
class SchNetGNN(nn.Module):
"""SchNet.
SchNet is introduced in `SchNet: A continuous-filter convolutional neural network for
modeling quantum interactions <https://arxiv.org/abs/1706.08566>`__.
This class performs message passing in SchNet and returns the updated node representations.
Parameters
----------
node_feats : int
Size for node representations to learn. Default to 64.
hidden_feats : list of int
``hidden_feats[i]`` gives the size of hidden representations for the i-th interaction
layer. ``len(hidden_feats)`` equals the number of interaction layers.
Default to ``[64, 64, 64]``.
num_node_types : int
Number of node types to embed. Default to 100.
cutoff : float
Largest center in RBF expansion. Default to 30.
gap : float
Difference between two adjacent centers in RBF expansion. Default to 0.1.
"""
def __init__(self, node_feats=64, hidden_feats=None, num_node_types=100, cutoff=30., gap=0.1):
super(SchNetGNN, self).__init__()
if hidden_feats is None:
hidden_feats = [64, 64, 64]
self.embed = nn.Embedding(num_node_types, node_feats)
self.rbf = RBFExpansion(high=cutoff, gap=gap)
n_layers = len(hidden_feats)
self.gnn_layers = nn.ModuleList()
for i in range(n_layers):
self.gnn_layers.append(
Interaction(node_feats, len(self.rbf.centers), hidden_feats[i]))
def reset_parameters(self):
"""Reinitialize model parameters."""
self.embed.reset_parameters()
self.rbf.reset_parameters()
for layer in self.gnn_layers:
layer.reset_parameters()
def forward(self, g, node_types, edge_dists):
"""Performs message passing and updates node representations.
Parameters
----------
g : DGLGraph
DGLGraph for a batch of graphs.
node_types : int64 tensor of shape (V)
Node types to embed, V for the number of nodes.
edge_dists : float32 tensor of shape (E, 1)
Distances between end nodes of edges, E for the number of edges.
Returns
-------
node_feats : float32 tensor of shape (V, node_feats)
Updated node representations.
"""
node_feats = self.embed(node_types)
expanded_dists = self.rbf(edge_dists)
for gnn in self.gnn_layers:
node_feats = gnn(g, node_feats, expanded_dists)
return node_feats
|
<reponame>huilin16/PaddleRS
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path as osp
import shutil
import requests
import tqdm
import time
import hashlib
import tarfile
import zipfile
import filelock
import paddle
from . import logging
DOWNLOAD_RETRY_LIMIT = 3
def md5check(fullname, md5sum=None):
if md5sum is None:
return True
logging.info("File {} md5 checking...".format(fullname))
md5 = hashlib.md5()
with open(fullname, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
md5.update(chunk)
calc_md5sum = md5.hexdigest()
if calc_md5sum != md5sum:
logging.info("File {} md5 check failed, {}(calc) != "
"{}(base)".format(fullname, calc_md5sum, md5sum))
return False
return True
def move_and_merge_tree(src, dst):
"""
Move src directory to dst, if dst is already exists,
merge src to dst
"""
if not osp.exists(dst):
shutil.move(src, dst)
else:
for fp in os.listdir(src):
src_fp = osp.join(src, fp)
dst_fp = osp.join(dst, fp)
if osp.isdir(src_fp):
if osp.isdir(dst_fp):
move_and_merge_tree(src_fp, dst_fp)
else:
shutil.move(src_fp, dst_fp)
elif osp.isfile(src_fp) and \
not osp.isfile(dst_fp):
shutil.move(src_fp, dst_fp)
def download(url, path, md5sum=None):
"""
Download from url, save to path.
url (str): download url
path (str): download to given path
"""
if not osp.exists(path):
os.makedirs(path)
fname = osp.split(url)[-1]
fullname = osp.join(path, fname)
retry_cnt = 0
while not (osp.exists(fullname) and md5check(fullname, md5sum)):
if retry_cnt < DOWNLOAD_RETRY_LIMIT:
retry_cnt += 1
else:
logging.debug("{} download failed.".format(fname))
raise RuntimeError("Download from {} failed. "
"Retry limit reached".format(url))
logging.info("Downloading {} from {}".format(fname, url))
req = requests.get(url, stream=True)
if req.status_code != 200:
raise RuntimeError("Downloading from {} failed with code "
"{}!".format(url, req.status_code))
# For protecting download interupted, download to
# tmp_fullname firstly, move tmp_fullname to fullname
# after download finished
tmp_fullname = fullname + "_tmp"
total_size = req.headers.get('content-length')
with open(tmp_fullname, 'wb') as f:
if total_size:
download_size = 0
current_time = time.time()
for chunk in tqdm.tqdm(
req.iter_content(chunk_size=1024),
total=(int(total_size) + 1023) // 1024,
unit='KB'):
f.write(chunk)
download_size += 1024
if download_size % 524288 == 0:
total_size_m = round(
int(total_size) / 1024.0 / 1024.0, 2)
download_size_m = round(download_size / 1024.0 / 1024.0,
2)
speed = int(524288 / (time.time() - current_time + 0.01)
/ 1024.0)
current_time = time.time()
logging.debug(
"Downloading: TotalSize={}M, DownloadSize={}M, Speed={}KB/s"
.format(total_size_m, download_size_m, speed))
else:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
shutil.move(tmp_fullname, fullname)
logging.debug("{} download completed.".format(fname))
return fullname
def decompress(fname):
"""
Decompress for zip and tar file
"""
logging.info("Decompressing {}...".format(fname))
# For protecting decompressing interupted,
# decompress to fpath_tmp directory firstly, if decompress
# successed, move decompress files to fpath and delete
# fpath_tmp and remove download compress file.
fpath = osp.split(fname)[0]
fpath_tmp = osp.join(fpath, 'tmp')
if osp.isdir(fpath_tmp):
shutil.rmtree(fpath_tmp)
os.makedirs(fpath_tmp)
if fname.find('tar') >= 0 or fname.find('tgz') >= 0:
with tarfile.open(fname) as tf:
tf.extractall(path=fpath_tmp)
elif fname.find('zip') >= 0:
with zipfile.ZipFile(fname) as zf:
zf.extractall(path=fpath_tmp)
else:
raise TypeError("Unsupport compress file type {}".format(fname))
for f in os.listdir(fpath_tmp):
src_dir = osp.join(fpath_tmp, f)
dst_dir = osp.join(fpath, f)
move_and_merge_tree(src_dir, dst_dir)
shutil.rmtree(fpath_tmp)
logging.debug("{} decompressed.".format(fname))
return dst_dir
def url2dir(url, path):
download(url, path)
if url.endswith(('tgz', 'tar.gz', 'tar', 'zip')):
fname = osp.split(url)[-1]
savepath = osp.join(path, fname)
return decompress(savepath)
def download_and_decompress(url, path='.'):
nranks = paddle.distributed.get_world_size()
local_rank = paddle.distributed.get_rank()
fname = osp.split(url)[-1]
fullname = osp.join(path, fname)
# if url.endswith(('tgz', 'tar.gz', 'tar', 'zip')):
# fullname = osp.join(path, fname.split('.')[0])
if nranks <= 1:
dst_dir = url2dir(url, path)
if dst_dir is not None:
fullname = dst_dir
else:
lock_path = fullname + '.lock'
if not os.path.exists(fullname):
with open(lock_path, 'w'):
os.utime(lock_path, None)
if local_rank == 0:
dst_dir = url2dir(url, path)
if dst_dir is not None:
fullname = dst_dir
os.remove(lock_path)
else:
while os.path.exists(lock_path):
time.sleep(1)
return fullname
|
<filename>neural_circuits/SC_Circuit_4.py
import tensorflow as tf
import numpy as np
import scipy
from scipy.special import expit
import matplotlib.pyplot as plt
import os
from epi.util import get_conditional_mode
DTYPE = tf.float32
t_cue_delay = 1.2
t_choice = 0.3
t_post_choice = 0.3
t_total = t_cue_delay + t_choice + t_post_choice
dt = 0.024
t = np.arange(0.0, t_total, dt)
T = t.shape[0]
# input parameters
E_constant = 0.75
E_Pbias = 0.5
E_Prule = 0.6
E_Arule = 0.6
E_choice = 0.25
E_light = 0.5
# set constant parameters
C = 2
theta = 0.05
beta = 0.5
tau = 0.09
sigma = 0.2
# inputs
I_constant = E_constant * tf.ones((T, 1, 1, 4, 1), dtype=DTYPE)
I_Pbias = np.zeros((T, 4))
I_Pbias[t < T * dt] = np.array([1, 0, 0, 1])
I_Pbias = I_Pbias[:, None, None, :, None]
I_Pbias = E_Pbias * tf.constant(I_Pbias, dtype=DTYPE)
I_Prule = np.zeros((T, 4))
I_Prule[t < 1.2] = np.array([1, 0, 0, 1])
I_Prule = I_Prule[:, None, None, :, None]
I_Prule = E_Prule * tf.constant(I_Prule, dtype=DTYPE)
I_Arule = np.zeros((T, 4))
I_Arule[t < 1.2] = np.array([0, 1, 1, 0])
I_Arule = I_Arule[:, None, None, :, None]
I_Arule = E_Arule * tf.constant(I_Arule, dtype=DTYPE)
I_choice = np.zeros((T, 4))
I_choice[t > 1.2] = np.array([1, 1, 1, 1])
I_choice = I_choice[:, None, None, :, None]
I_choice = E_choice * tf.constant(I_choice, dtype=DTYPE)
I_lightL = np.zeros((T, 4))
I_lightL[np.logical_and(1.2 < t, t < 1.5)] = np.array([1, 1, 0, 0])
I_lightL = I_lightL[:, None, None, :, None]
I_lightL = E_light * tf.constant(I_lightL, dtype=DTYPE)
I_lightR = np.zeros((T, 4))
I_lightR[np.logical_and(1.2 < t, t < 1.5)] = np.array([0, 0, 1, 1])
I_lightR = I_lightR[:, None, None, :, None]
I_lightR = E_light * tf.constant(I_lightR, dtype=DTYPE)
I_LP = I_constant + I_Pbias + I_Prule + I_choice + I_lightL
I_LA = I_constant + I_Pbias + I_Arule + I_choice + I_lightL
I = tf.concat((I_LP, I_LA), axis=2)
def SC_sim(sW, vW, dW, hW):
N = 200
Wrow1 = tf.stack([sW, vW, dW, hW], axis=2)
Wrow2 = tf.stack([vW, sW, hW, dW], axis=2)
Wrow3 = tf.stack([dW, hW, sW, vW], axis=2)
Wrow4 = tf.stack([hW, dW, vW, sW], axis=2)
W = tf.stack([Wrow1, Wrow2, Wrow3, Wrow4], axis=2)
# initial conditions
# M,C,4,1
state_shape = (sW.shape[0], C, 4, N)
v0 = 0.1 * tf.ones(state_shape, dtype=DTYPE)
v0 = v0 + 0.005 * tf.random.normal(v0.shape, 0.0, 1.0)
u0 = beta * tf.math.atanh(2 * v0 - 1) - theta
v = v0
u = u0
v_t_list = [v]
u_t_list = [u]
for i in range(1, T):
du = (dt / tau) * (
-u
+ tf.matmul(W, v)
+ I[i]
+ sigma * tf.random.normal(state_shape, 0.0, 1.0)
)
# du = (dt / tau) * (-u + tf.matmul(W, v) + I[i] + sigma * w[i])
u = u + du
v = 1.0 * (0.5 * tf.tanh((u - theta) / beta) + 0.5)
# v = eta[i] * (0.5 * tf.tanh((u - theta) / beta) + 0.5)
v_t_list.append(v)
u_t_list.append(u)
u_t = tf.stack(u_t_list, axis=0)
v_t = tf.stack(v_t_list, axis=0)
return u_t, v_t
def unwrap(z):
sW = z[:, 0][:, None]
vW = z[:, 1][:, None]
dW = z[:, 2][:, None]
hW = z[:, 3][:, None]
return sW, vW, dW, hW
def SC_acc(sW, vW, dW, hW):
N = 200
Wrow1 = tf.stack([sW, vW, dW, hW], axis=2)
Wrow2 = tf.stack([vW, sW, hW, dW], axis=2)
Wrow3 = tf.stack([dW, hW, sW, vW], axis=2)
Wrow4 = tf.stack([hW, dW, vW, sW], axis=2)
W = tf.stack([Wrow1, Wrow2, Wrow3, Wrow4], axis=2)
# initial conditions
# M,C,4,N
state_shape = (sW.shape[0], C, 4, N)
v0 = 0.1 * tf.ones(state_shape, dtype=DTYPE)
v0 = v0 + 0.005 * tf.random.normal(v0.shape, 0.0, 1.0)
u0 = beta * tf.math.atanh(2 * v0 - 1) - theta
v = v0
u = u0
for i in range(1, T):
du = (dt / tau) * (
-u
+ tf.matmul(W, v)
+ I[i]
+ sigma * tf.random.normal(state_shape, 0.0, 1.0)
)
u = u + du
v = 1.0 * (0.5 * tf.tanh((u - theta) / beta) + 0.5)
# v = eta[i] * (0.5 * tf.tanh((u - theta) / beta) + 0.5)
p = tf.reduce_mean(tf.math.sigmoid(100.0 * (v[:, :, 0, :] - v[:, :, 3, :])), axis=2)
return p
def SC_acc_var(P):
not_P = 1.0 - P
_mu = np.array([[P, not_P]])
def _SC_acc_var(sW, vW, dW, hW):
N = 200
Wrow1 = tf.stack([sW, vW, dW, hW], axis=2)
Wrow2 = tf.stack([vW, sW, hW, dW], axis=2)
Wrow3 = tf.stack([dW, hW, sW, vW], axis=2)
Wrow4 = tf.stack([hW, dW, vW, sW], axis=2)
W = tf.stack([Wrow1, Wrow2, Wrow3, Wrow4], axis=2)
# initial conditions
# M,C,4,N
state_shape = (sW.shape[0], C, 4, N)
v0 = 0.1 * tf.ones(state_shape, dtype=DTYPE)
v0 = v0 + 0.005 * tf.random.normal(v0.shape, 0.0, 1.0)
u0 = beta * tf.math.atanh(2 * v0 - 1) - theta
v = v0
u = u0
for i in range(1, T):
du = (dt / tau) * (
-u
+ tf.matmul(W, v)
+ I[i]
+ sigma * tf.random.normal(state_shape, 0.0, 1.0)
)
u = u + du
v = 1.0 * (0.5 * tf.tanh((u - theta) / beta) + 0.5)
p = tf.reduce_mean(
tf.math.sigmoid(100.0 * (v[:, :, 0, :] - v[:, :, 3, :])), axis=2
)
p_var = (p - _mu) ** 2
T_x = tf.concat((p, p_var), axis=1)
return T_x
return _SC_acc_var
C_opto = 4
I_opto = tf.concat((I_LP, I_LA, I_LP, I_LA), axis=2)
def SC_sim_opto(strength, period):
eta = np.ones((T, 1, C_opto, 1, 1), dtype=np.float32)
if period == "delay":
eta[np.logical_and(0.8 <= t, t <= 1.2), :, 2:, :, :] = strength
elif period == "choice":
eta[t >= 1.2, :, 2:, :, :] = strength
elif period == "total":
eta[:, :, 2:, :, :] = strength
def _SC_sim_opto(sW, vW, dW, hW):
N = 200
Wrow1 = tf.stack([sW, vW, dW, hW], axis=2)
Wrow2 = tf.stack([vW, sW, hW, dW], axis=2)
Wrow3 = tf.stack([dW, hW, sW, vW], axis=2)
Wrow4 = tf.stack([hW, dW, vW, sW], axis=2)
W = tf.stack([Wrow1, Wrow2, Wrow3, Wrow4], axis=2)
# initial conditions
# M,C,4,1
state_shape = (sW.shape[0], C_opto, 4, N)
v0 = 0.1 * tf.ones(state_shape, dtype=DTYPE)
v0 = v0 + 0.005 * tf.random.normal(v0.shape, 0.0, 1.0)
u0 = beta * tf.math.atanh(2 * v0 - 1) - theta
v = v0
u = u0
v_t_list = [v]
u_t_list = [u]
for i in range(1, T):
du = (dt / tau) * (
-u
+ tf.matmul(W, v)
+ I_opto[i]
+ sigma * tf.random.normal(state_shape, 0.0, 1.0)
)
u = u + du
v = eta[i] * (0.5 * tf.tanh((u - theta) / beta) + 0.5)
v_t_list.append(v)
u_t_list.append(u)
u_t = tf.stack(u_t_list, axis=0)
v_t = tf.stack(v_t_list, axis=0)
return u_t, v_t
return _SC_sim_opto
def SC_acc_opto(strength, period):
eta = np.ones((T, 1, C_opto, 1, 1), dtype=np.float32)
if period == "delay":
eta[np.logical_and(0.8 <= t, t <= 1.2), :, 2:, :, :] = strength
elif period == "choice":
eta[t >= 1.2, :, 2:, :, :] = strength
elif period == "total":
eta[:, :, 2:, :, :] = strength
def _SC_acc_opto(sW, vW, dW, hW):
N = 200
Wrow1 = tf.stack([sW, vW, dW, hW], axis=2)
Wrow2 = tf.stack([vW, sW, hW, dW], axis=2)
Wrow3 = tf.stack([dW, hW, sW, vW], axis=2)
Wrow4 = tf.stack([hW, dW, vW, sW], axis=2)
W = tf.stack([Wrow1, Wrow2, Wrow3, Wrow4], axis=2)
# initial conditions
# M,C,4,N
state_shape = (sW.shape[0], C_opto, 4, N)
v0 = 0.1 * tf.ones(state_shape, dtype=DTYPE)
v0 = v0 + 0.005 * tf.random.normal(v0.shape, 0.0, 1.0)
u0 = beta * tf.math.atanh(2 * v0 - 1) - theta
v = v0
u = u0
for i in range(1, T):
du = (dt / tau) * (
-u
+ tf.matmul(W, v)
+ I_opto[i]
+ sigma * tf.random.normal(state_shape, 0.0, 1.0)
)
u = u + du
v = eta[i] * (0.5 * tf.tanh((u - theta) / beta) + 0.5)
p = tf.reduce_mean(
tf.math.sigmoid(100.0 * (v[:, :, 0, :] - v[:, :, 3, :])), axis=2
)
return p
return _SC_acc_opto
def SC_acc_diff(strength, period):
sc_acc_opto = SC_acc_opto(strength, period)
def _SC_acc_diff(sW, vW, dW, hW):
p = sc_acc_opto(sW, vW, dW, hW)
p_diffs = p[:, :2] - p[:, 2:]
return p_diffs
return _SC_acc_diff
def z_to_W(z):
sW = z[:, 0]
vW = z[:, 1]
dW = z[:, 2]
hW = z[:, 3]
Wrow1 = tf.stack([sW, vW, dW, hW], axis=1)
Wrow2 = tf.stack([vW, sW, hW, dW], axis=1)
Wrow3 = tf.stack([dW, hW, sW, vW], axis=1)
Wrow4 = tf.stack([hW, dW, vW, sW], axis=1)
W = tf.stack([Wrow1, Wrow2, Wrow3, Wrow4], axis=1)
return W
MODES = np.array(
[
[1.0, 1.0, 1.0, 1.0], # all mode
[-1.0, -1.0, 1.0, 1.0], # side mode
[1.0, -1.0, -1.0, 1.0], # task mode
[-1.0, 1.0, -1.0, 1.0],
]
) # diag mode
def get_schur_eigs(W):
# returns
T, Z = scipy.linalg.schur(W)
b = Z.copy()
b[b < 0.0] = -1
b[b > 0.0] = 1
modes = 0.25 * MODES
X = np.abs(np.dot(modes, b)) # (template_mode x z_col)
eigs = np.zeros((4,))
z_inds = []
for i in range(4):
z_ind = np.argmax(X[i] == 1.0)
z_inds.append(z_ind)
eigs[i] = T[z_ind, z_ind]
# print(z_inds)
# print(T)
return eigs
MODE_INDS = {"all": 0, "side": 1, "task": 2, "diag": 3}
A_EIG = 0.25 * np.array(
[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, -1.0, -1.0],
[1.0, -1.0, -1.0, 1.0],
[1.0, -1.0, 1.0, -1.0],
]
)
A_EIG_inv = np.linalg.inv(A_EIG)
def z_from_eigs(eigs, V):
W = np.matmul(V, np.matmul(np.diag(eigs), V.T))
return W[0, :]
def z_from_eig_perturb(eigs, mode, facs):
V = 0.5 * MODES.T
assert eigs.shape[0] == 4
num_facs = facs.shape[0]
zs = []
mode_ind = MODE_INDS[mode]
for i in range(num_facs):
_eigs = eigs.copy()
_eigs[mode_ind] = eigs[mode_ind] + facs[i]
z = z_from_eigs(_eigs, V)
zs.append(z)
zs = np.array(zs)
return zs.astype(np.float32)
def z_from_eigs_analytic(eigs):
return np.matmul(A_EIG, eigs)
def eigs_from_z_analytic(z):
return np.matmul(A_EIG_inv, z)
def get_SC_z_mode_path(
dist,
z_init,
ind,
vals,
lrs,
num_steps,
decay=0.5,
decay_steps=100,
do_plot=False,
labels=None,
):
z0 = None
z_stars = []
for i in range(vals.shape[0]):
val = vals[i]
_lr = lrs[i]
_num_steps = num_steps[i]
if z0 is None:
z0 = z_init.copy()
else:
z0 = zs[-1].copy()
z0[ind] = val
zs, log_q_zs = get_conditional_mode(
dist,
ind,
val,
z0,
lr=_lr,
num_steps=_num_steps,
decay=decay,
decay_steps=decay_steps,
)
z_stars.append(zs[-1])
if do_plot:
fig, axs = plt.subplots(1, 2, figsize=(10, 4))
axs[0].plot(zs)
axs[1].plot(log_q_zs)
if labels is not None:
axs[0].legend(labels)
plt.show()
z_stars = np.array(z_stars)
return z_stars
def get_SC_sens_vs(zs, dist):
num_zs = zs.shape[0]
hessians = dist.hessian(zs)
vs = []
for i in range(num_zs):
hess_i = hessians[i]
d, v = np.linalg.eig(hess_i)
min_ind = np.argmin(d)
_v = v[:, min_ind]
if _v[3] > 0.0:
_v = -_v
vs.append(_v)
vs = np.array(vs)
return vs
def perturbed_acc(z_perturb, sim_func, N_sim):
N_fac = z_perturb.shape[0]
pPs, pAs = [], []
for i in range(N_sim):
_, v_t_perturb = sim_func(*unwrap(z_perturb.astype(np.float32)))
v_t_perturb = v_t_perturb.numpy()
T_x_perturb = np.mean(
expit(100.0 * (v_t_perturb[-1, :, :, 0, :] - v_t_perturb[-1, :, :, 3, :])),
axis=2,
)
pPs.append(100.0 * T_x_perturb[:, 0])
pAs.append(100.0 * (1.0 - T_x_perturb[:, 1]))
pPs = np.array(pPs)
pAs = np.array(pAs)
return pPs, pAs
def perturbed_acc_plots(facs, pPs, pAs, c_stars, fontsize=12, label="", figdir=None):
N_sim = pPs[0].shape[0]
xticks = [-0.25, 0.0, 0.25]
if label == "v1_opto":
yticks = [0, 25, 50, 75, 100]
else:
yticks = [50, 75, 100]
for task, ps in zip(["P", "A"], [pPs, pAs]):
fig, ax = plt.subplots(1, 1, figsize=(3.0, 1.5))
for i, _ps in enumerate(ps):
plt.errorbar(
facs,
np.mean(_ps, axis=0),
np.std(_ps, axis=0) / np.sqrt(N_sim),
c=c_stars[i],
lw=2,
)
ax.set_yticks(yticks)
ax.set_yticklabels(["%d" % tick for tick in yticks], fontsize=fontsize)
ax.set_xticks(xticks)
ax.set_xticklabels(xticks, fontsize=fontsize)
plt.tight_layout()
if figdir is not None:
plt.savefig(os.path.join(figdir, "p%s_%s.png" % (task, label)))
plt.show()
return None
c_LP = "#3B8023"
c_LA = "#EA8E4C"
c_RA = "#F4C673"
c_RP = "#81C176"
def plot_SC_responses(v_t, fname, figsize=(6, 4)):
M = v_t.shape[1]
T_x = np.mean(expit(100.0 * (v_t[-1, :, :, 0, :] - v_t[-1, :, :, 3, :])), axis=2)
percfont = {
"family": "arial",
"weight": "light",
"size": 20,
}
neuron_labels = ["LP", "LA", "RA", "RP"]
colors = [c_LP, c_LA, c_RA, c_RP]
C_titles = ["Pro, left trials", "Anti, left trials"]
for m in range(M):
print("%.1f, %.1f" % (100 * T_x[m, 0], 100 - 100 * T_x[m, 1]))
fig, axs = plt.subplots(2, 1, figsize=figsize)
for c in range(2):
for i in range(4):
mean_v = np.mean(v_t[:, m, c, i, :], axis=1)
std_v = np.std(v_t[:, m, c, i, :], axis=1)
axs[c].fill_between(
t, mean_v - std_v, mean_v + std_v, color=colors[i], alpha=0.2
)
axs[c].plot(t, mean_v, label=neuron_labels[i], c=colors[i])
# axs[c].set_title(C_titles[c])
axs[c].set_ylim([0, 1])
if c == 0:
axs[c].text(
0.75, 0.5, "%2d%%" % round(100.0 * T_x[m, c]), fontdict=percfont
)
else:
axs[c].text(
0.75,
0.5,
"%2d%%" % round(100.0 * (1.0 - T_x[m, c])),
fontdict=percfont,
)
if c == 1:
axs[c].set_xlabel("t (s)", fontsize=18)
# axs[0].set_ylabel('Activation')
plt.tight_layout()
plt.savefig(fname + "sim%d.pdf" % (m + 1))
plt.show()
return None
|
<gh_stars>0
#!/usr/bin/env python
import matplotlib.pyplot as plt
def main(output: str):
output = output.replace("silhouette with k=", '').replace(' ', '').strip().split()
values = [(int(line[0]), float(line[1])) for line in [l.split(':') for l in output]]
xs = [x for (x, _) in values]
ys = [y for (_, y) in values]
plt.plot(xs, ys, 'bo-')
plt.title('Figure 1: Silhouette value for a dataset of 10000 points')
plt.xlabel('Number of clusters k')
plt.ylabel('Silhouette value')
plt.show()
if __name__ == '__main__':
output = """
silhouette with k=2: 0.346227
silhouette with k=3: 0.470481
silhouette with k=4: 0.417261
silhouette with k=5: 0.409835
silhouette with k=6: 0.456681
silhouette with k=7: 0.448866
silhouette with k=8: 0.472404
silhouette with k=9: 0.427348
silhouette with k=10: 0.416935
silhouette with k=11: 0.475667
silhouette with k=12: 0.476308
silhouette with k=13: 0.464758
silhouette with k=14: 0.470616
silhouette with k=15: 0.497664
silhouette with k=16: 0.486602
silhouette with k=17: 0.493796
silhouette with k=18: 0.493715
silhouette with k=19: 0.491342
silhouette with k=20: 0.502917
silhouette with k=21: 0.517567
silhouette with k=22: 0.507803
silhouette with k=23: 0.496292
silhouette with k=24: 0.48864
silhouette with k=25: 0.49081
silhouette with k=26: 0.482454
silhouette with k=27: 0.476064
silhouette with k=28: 0.472313
silhouette with k=29: 0.483676
silhouette with k=30: 0.480141
silhouette with k=31: 0.477628
silhouette with k=32: 0.469208
silhouette with k=33: 0.468222
silhouette with k=34: 0.473008
silhouette with k=35: 0.473488
silhouette with k=36: 0.462661
silhouette with k=37: 0.458961
silhouette with k=38: 0.460186
silhouette with k=39: 0.460809
silhouette with k=40: 0.448713
silhouette with k=41: 0.444547
silhouette with k=42: 0.43751
silhouette with k=43: 0.432112
silhouette with k=44: 0.419957
silhouette with k=45: 0.415264
silhouette with k=46: 0.414678
silhouette with k=47: 0.415762
silhouette with k=48: 0.410349
silhouette with k=49: 0.40355
silhouette with k=50: 0.396805
silhouette with k=51: 0.397055
silhouette with k=52: 0.39367
silhouette with k=53: 0.394573
silhouette with k=54: 0.382966
silhouette with k=55: 0.375492
silhouette with k=56: 0.373873
silhouette with k=57: 0.363668
silhouette with k=58: 0.373386
silhouette with k=59: 0.370099
silhouette with k=60: 0.367258
silhouette with k=61: 0.367424
silhouette with k=62: 0.362858
silhouette with k=63: 0.366425
silhouette with k=64: 0.360038
silhouette with k=65: 0.356889
silhouette with k=66: 0.354824
silhouette with k=67: 0.356824
silhouette with k=68: 0.351318
silhouette with k=69: 0.352354
silhouette with k=70: 0.347736
silhouette with k=71: 0.348015
silhouette with k=72: 0.345109
silhouette with k=73: 0.355291
silhouette with k=74: 0.353053
silhouette with k=75: 0.353703
silhouette with k=76: 0.348955
silhouette with k=77: 0.343844
silhouette with k=78: 0.339369
silhouette with k=79: 0.338756
silhouette with k=80: 0.339398
silhouette with k=81: 0.339631
silhouette with k=82: 0.339216
silhouette with k=83: 0.340668
silhouette with k=84: 0.341916
silhouette with k=85: 0.343993
silhouette with k=86: 0.34329
silhouette with k=87: 0.344199
silhouette with k=88: 0.341271
silhouette with k=89: 0.337846
silhouette with k=90: 0.340286
silhouette with k=91: 0.340816
silhouette with k=92: 0.339894
silhouette with k=93: 0.33923
silhouette with k=94: 0.338258
silhouette with k=95: 0.337707
silhouette with k=96: 0.335981
silhouette with k=97: 0.336288
silhouette with k=98: 0.336502
silhouette with k=99: 0.335928
"""
main(output)
|
<reponame>testigos2022/ocr-forms
import itertools
import os
from pathlib import Path
import numpy as np
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from data_io.readwrite_files import read_lines, write_file
from handwritten_ocr.craft_text_detection import CraftCroppedImages
from handwritten_ocr.pdf_to_images import ImagesFromPdf
from handwritten_ocr.trocr_inference import OCRInferencer, EmbeddedData
from misc_utils.prefix_suffix import PrefixSuffix
from misc_utils.utils import build_markdown_table_from_dicts
def image_in_markdown(f):
return f""
def parse_table_row_line(l):
def strip_away(x):
x = x.strip(" ![]()")
return x
idx, label, file = [strip_away(s) for s in l.split("|")]
return int(idx), int(label), file
if __name__ == "__main__":
#
# annotated_data=[
# {"idx": 0, "label": 1, "image": image_in_markdown(
# "data/e14_cong_2018__e14_divulgacion_01_001_001_CAM_E14_CAM_X_01_001_001_XX_01_005_X_XXX/e14_cong_2018__e14_divulgacion_01_001_001_CAM_E14_CAM_X_01_001_001_XX_01_005_X_XXX.pdf-2/cropped_0_3680.jpg")},
# {"idx": 1, "label": 0, "image": image_in_markdown(
# "data/e14_cong_2018__e14_divulgacion_01_001_001_CAM_E14_CAM_X_01_001_001_XX_01_005_X_XXX/e14_cong_2018__e14_divulgacion_01_001_001_CAM_E14_CAM_X_01_001_001_XX_01_005_X_XXX.pdf-0/cropped_0_0.jpg")}
# ]
data_path = os.environ["DATA_PATH"]
embedded_data = EmbeddedData(
name="debug",
embeddings=TrOCREmbeddings(
inferencer=OCRInferencer(model_name="microsoft/trocr-base-handwritten"),
files=CraftCroppedImages(
name="debug",
image_files=ImagesFromPdf(
pdf_file=PrefixSuffix(
"data_path",
"handwritten_ocr/data/e14_cong_2018__e14_divulgacion_01_001_001_CAM_E14_CAM_X_01_001_001_XX_01_005_X_XXX.pdf",
)
),
),
),
).build()
# write_file("annotations.md",build_markdown_table_from_dicts(annotated_data))
train_data = [
parse_table_row_line(l)
for l in read_lines("annotations.md")
if not any([l.startswith(s) for s in ["---", "idx"]])
]
print(train_data)
file_embeddings = [
(f, embedder.embedd_image(f"{data_path}/{f}").detach().numpy())
for _, _, f in train_data
]
X = np.concatenate([[x] for _, x in file_embeddings])
print(f"{X.shape=}")
# y = np.array([y for _, y, _ in examples])
# n_neighbors = 1
# weights = "distance"
# clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
# clf.fit(X, y)
path = f"{data_path}/e14_cong_2018__e14_divulgacion_01_001_001_CAM_E14_CAM_X_01_001_001_XX_01_005_X_XXX.pdf-0/e14_cong_2018__e14_divulgacion_01_001_001_CAM_E14_CAM_X_01_001_001_XX_01_005_X_XXX.pdf-0_crops"
files = Path(path).rglob("crop_*.png")
def get_predictions(p):
x = embedder.embedd_image(str(p))
x = x.detach().numpy()
# x = np.expand_dims(x, 0)
# o = clf.predict_proba(x).squeeze()
# o=cosine_similarity(x,np.expand_dims(X[0],0))
return x
files = list(itertools.islice(files, 0, 100))
file_embeddings_unlabeled = (
(
str(p).replace(f"{base_path}/", ""),
get_predictions(p),
)
for p in files
)
unlabeled = np.array(
[x for _, x in tqdm(file_embeddings_unlabeled, desc="calc embeddings")]
)
print(f"{unlabeled.shape=}")
pipeline = Pipeline(
[("scaling", StandardScaler()), ("pca", PCA(random_state=42, n_components=20))]
)
# print(f"{std_scaler.mean_=},{std_scaler.scale_=}")
unlabeld_svd = pipeline.fit_transform(unlabeled)
labeled_svd = pipeline.transform(X)
print(f"{unlabeld_svd.shape=},{labeled_svd.shape=}")
print(f"{unlabeld_svd=},{labeled_svd=}")
sims = cosine_similarity(labeled_svd, unlabeld_svd).squeeze()
print(f"{sims.shape=}")
g = (
{
"idx": k,
"cosine_similarity": sim,
"image": image_in_markdown(file),
}
for k, (sim, file) in enumerate(zip(sims, files))
)
write_file(
f"predictions.md",
build_markdown_table_from_dicts(
sorted(g, key=lambda d: d["cosine_similarity"], reverse=True)
),
)
|
<reponame>Ravillatypov/zadarma-call-integation
import os
from asyncio import sleep
from datetime import date, datetime, timedelta
from settings import Config
from zadarma import ZadarmaAPI
from functools import lru_cache
from dataclasses import dataclass
from models import CallRecords
from sanic.log import logger
@dataclass
class CallInfo:
a_number: str = ''
b_number: str = ''
sip_number: str = ''
internal_id: int = 0
zd_client = ZadarmaAPI(Config.ZADARMA_KEY, Config.ZADARMA_SECRET, Config.DEBUG, max_channels=3)
calls = []
@lru_cache(maxsize=1)
def get_download_path(today: str) -> str:
save_path = os.path.join(Config.CALL_RECORDS_PATH, today)
if not os.path.exists(save_path):
os.mkdir(save_path)
return save_path
async def run_call(data: dict):
if not zd_client.pbx_id:
await zd_client.get_internal_numbers()
a_number = normalize_number(data['first_number'])
b_number = normalize_number(data['second_number'])
internal_id = int(data['slave_id'])
sip_number = await zd_client.get_sip_number()
await zd_client.get_lock(sip_number)
status = await zd_client.set_redirect(sip_number, a_number)
if status['status'] == 'success':
await zd_client.callback(sip_number, b_number)
else:
await zd_client.call('/v1/request/callback/', {'from': a_number, 'to': b_number, 'sip': sip_number})
zd_client.release_lock(sip_number)
calls.append(CallInfo(a_number, b_number, sip_number, internal_id))
return
async def record_download(call_id: str):
await sleep(60)
today = date.today().isoformat()
save_path = get_download_path(today)
logger.info(save_path)
return await zd_client.get_record(call_id, save_path)
async def event_process(event: dict):
if event.get('call_id_with_rec') is None:
return
sip_number = event['internal']
dst_number = event['destination']
if len(sip_number) > len(dst_number):
sip_number, dst_number = dst_number, sip_number
zd_client.release_number(sip_number)
audio_file, a_number, internal_id = '', '', 0
audio_file = await record_download(event['call_id_with_rec'])
if audio_file:
audio_file = os.path.relpath(audio_file, Config.STATIC_PATH)
call_start = datetime.fromisoformat(event['call_start'])
duration = int(event['duration'])
call_end = call_start + timedelta(seconds=duration)
call_lst = list(filter(lambda x: x.sip_number == sip_number and x.b_number == dst_number, calls))
if call_lst:
call = call_lst[-1]
calls.remove(call)
internal_id = call.internal_id
a_number = call.a_number
call_record = CallRecords(
master_id=1,
slave_id=internal_id,
internal_id=int(event['call_id_with_rec'].replace('.', '')),
status=1 if event['disposition'] == 'answered' else 0,
direction=2,
source_number=a_number,
destination_number=dst_number,
call_started_datetime=call_start,
call_ended_datetime=call_end,
ringing_time=0,
talking_time=duration,
audio_file=audio_file,
internal_number=sip_number,
unique_id=event.get('call_id_with_rec') or event.get('pbx_call_id'),
service_data='{}'
)
await call_record.save()
def normalize_dict(data: dict) -> dict:
result = {}
for k, v in data.items():
if isinstance(v, dict):
result[k] = normalize_dict(v)
elif isinstance(v, list) and len(v) == 1:
result[k] = v[0]
else:
result[k] = v
return result
def normalize_number(number: str) -> str:
if number.startswith('8'):
return number.replace('8', '7', 1)
return number
|
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2020
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Unsupervised Learning with CNNs for Image Registration
"""
# Imports
import logging
import collections
import torch
import torch.nn as nn
import torch.nn.functional as func
from torch.distributions.normal import Normal
from pynet.interfaces import DeepLearningDecorator
from pynet.utils import Networks
from pynet.utils import Regularizers
# Global parameters
logger = logging.getLogger("pynet")
@Networks.register
@DeepLearningDecorator(family="register")
class VoxelMorphNet(nn.Module):
""" VoxelMorphNet.
An unsupervised learning-based inference algorithm that uses insights from
classical registration methods and makes use of recent developments
inconvolutional neural networks (CNNs).
VoxelMorph assumes that input images are pre-affined by an external tool.
2018 CVPR implementation of voxelmorph.
TODO: expand this model by including anatomical surface alignment, which
enables training the network given (optional) anatomical segmentations ->
described in the paper.
Reference: https://arxiv.org/abs/1903.03545.
Code: https://github.com/voxelmorph/voxelmorph.
"""
def __init__(self, vol_size, enc_nf=[16, 32, 32, 32],
dec_nf=[32, 32, 32, 32, 32, 16, 16], full_size=False):
""" Init class.
Parameters
----------
vol_size: uplet
volume size of the atlas.
enc_nf: list of int, default [16, 32, 32, 32]
the number of features maps for encoding stages.
dec_nf: int, default [32, 32, 32, 32, 32, 16, 16]
the number of features maps for decoding stages.
full_size: bool, default False
full amount of decoding layers.
"""
# Inheritance
super(VoxelMorphNet, self).__init__()
# Estimate the generative model mean and covariance using a UNet-style
# architecture:
# the network includes a convolutional layer with 32 filters, four
# downsampling layerswith 64 convolutional filters and a stride of
# two, and threeupsampling convolutional layers with 64 filters. We
# onlyupsample three times to predict the velocity field (and
# following integration steps) at every two voxels, to enablethese
# operations to fit in current GPU card memory.
dim = len(vol_size)
self.unet = UNetCore(dim, enc_nf, dec_nf, full_size)
# One convolution to get the flow field.
conv_fn = getattr(nn, "Conv{0}d".format(dim))
self.flow = conv_fn(dec_nf[-1], dim, kernel_size=3, padding=1)
# Make flow weights + bias small. Not sure this is necessary.
nd = Normal(0, 1e-5)
self.flow.weight = nn.Parameter(nd.sample(self.flow.weight.shape))
self.flow.bias = nn.Parameter(torch.zeros(self.flow.bias.shape))
# Finally warp the moving image.
self.spatial_transform = SpatialTransformer(vol_size)
def forward(self, x):
""" Forward method.
Parameters
----------
x: Tensor
concatenated moving and fixed images.
"""
logger.debug("VoxelMorphNet...")
logger.debug("Moving + Fixed: {0}".format(x.shape))
x = self.unet(x)
logger.debug("Unet: {0}".format(x.shape))
flow = self.flow(x)
logger.debug("Flow: {0}".format(flow.shape))
moving = x[:, :1]
logger.debug("Moving: {0}".format(moving.shape))
warp = self.spatial_transform(moving, flow)
logger.debug("Warp: {0}".format(warp.shape))
logger.debug("Done.")
return warp, {"flow": flow}
class SpatialTransformer(nn.Module):
""" Represesents a spatial transformation block that uses the output from
the UNet to preform a grid_sample.
"""
def __init__(self, size, mode="bilinear"):
""" Initilaize the block.
Parameters
----------
size: uplet
the size of input of the spatial transformer block.
mode: str, default 'bilinear'
method of interpolation for the grid sampler.
"""
# Inheritance
super(SpatialTransformer, self).__init__()
self.mode = mode
# Create sampling grid.
vectors = [torch.arange(0, val) for val in size]
grids = torch.meshgrid(vectors)
grid = torch.stack(grids) # y, x, z
grid = torch.unsqueeze(grid, 0) # add batch
grid = grid.type(torch.FloatTensor)
self.register_buffer("grid", grid)
def forward(self, moving, flow):
logger.debug("Grid: {0}".format(self.grid.shape))
new_locs = self.grid + flow
logger.debug("Field: {0}".format(new_locs.shape))
shape = flow.shape[2:]
logger.debug("Shape: {0}".format(shape))
# Need to normalize grid values to [-1, 1] for resampler
logger.debug("Normalize field...")
for idx in range(len(shape)):
new_locs[:, idx, ...] = (
2 * (new_locs[:, idx, ...] / (shape[idx] - 1) - 0.5))
logger.debug("Done...")
if len(shape) == 2:
new_locs = new_locs.permute(0, 2, 3, 1)
new_locs = new_locs[..., [1, 0]]
elif len(shape) == 3:
new_locs = new_locs.permute(0, 2, 3, 4, 1)
new_locs = new_locs[..., [2, 1, 0]]
logger.debug("Field: {0}".format(new_locs.shape))
warp = func.grid_sample(moving, new_locs, mode=self.mode,
align_corners=False)
return warp, new_locs
class UNetCore(nn.Module):
""" Class representing the U-Net implementation that takes in
a fixed image and a moving image and outputs a flow-field.
"""
def __init__(self, dim, enc_nf, dec_nf, full_size=True):
""" Initiliza the UNet model.
Parameters
----------
enc_nf: list of int, default [16, 32, 32, 32]
the number of features maps for encoding stages.
dec_nf: int, default [32, 32, 32, 32, 32, 16, 16]
the number of features maps for decoding stages.
full_size: bool, default False
full amount of decoding layers.
"""
# Inheritance
super(UNetCore, self).__init__()
self.full_size = full_size
self.vm2 = len(dec_nf) == 7
# Encoder functions
self.enc = nn.ModuleList()
for idx in range(len(enc_nf)):
prev_nf = 2 if idx == 0 else enc_nf[idx - 1]
self.enc.append(ConvBlock(dim, prev_nf, enc_nf[idx], 2))
# Decoder functions
self.dec = nn.ModuleList()
self.dec.append(ConvBlock(dim, enc_nf[-1], dec_nf[0])) # 1
self.dec.append(ConvBlock(dim, dec_nf[0] * 2, dec_nf[1])) # 2
self.dec.append(ConvBlock(dim, dec_nf[1] * 2, dec_nf[2])) # 3
self.dec.append(ConvBlock(dim, dec_nf[2] + enc_nf[0], dec_nf[3])) # 4
self.dec.append(ConvBlock(dim, dec_nf[3], dec_nf[4])) # 5
if self.full_size:
self.dec.append(ConvBlock(dim, dec_nf[4] + 2, dec_nf[5], 1))
if self.vm2:
self.vm2_conv = ConvBlock(dim, dec_nf[5], dec_nf[6])
self.upsample = nn.Upsample(scale_factor=2, mode="nearest")
def forward(self, x):
""" Forward method.
Parameters
----------
x: Tensor
concatenated moving and fixed images.
"""
logger.debug("UNet...")
logger.debug("Moving + Fixed: {0}".format(x.shape))
# Get encoder activations
x_enc = [x]
for enc in self.enc:
logger.debug("Encoder: {0}".format(enc))
logger.debug("Encoder input: {0}".format(x_enc[-1].shape))
x_enc.append(enc(x_enc[-1]))
logger.debug("Encoder output: {0}".format(x_enc[-1].shape))
# Three conv + upsample + concatenate series
y = x_enc[-1]
for idx in range(3):
logger.debug("Decoder: {0}".format(self.dec[idx]))
logger.debug("Decoder input: {0}".format(y.shape))
y = self.dec[idx](y)
logger.debug("Decoder output: {0}".format(y.shape))
y = self.upsample(y)
logger.debug("Decoder upsampling: {0}".format(y.shape))
y = torch.cat([y, x_enc[-(idx + 2)]], dim=1)
logger.debug("Decoder skip connexion: {0}".format(y.shape))
# Two convs at full_size/2 res
logger.debug("Decoder: {0}".format(self.dec[3]))
logger.debug("Decoder input: {0}".format(y.shape))
y = self.dec[3](y)
logger.debug("Decoder output: {0}".format(y.shape))
y = self.dec[4](y)
logger.debug("Decoder: {0}".format(self.dec[4]))
logger.debug("Decoder input: {0}".format(y.shape))
logger.debug("Decoder output: {0}".format(y.shape))
# Upsample to full res, concatenate and conv
if self.full_size:
y = self.upsample(y)
logger.debug("Full size Decoder upsampling: {0}".format(y.shape))
y = torch.cat([y, x_enc[0]], dim=1)
logger.debug("Decoder skip connexion: {0}".format(y.shape))
logger.debug("Decoder: {0}".format(self.dec[5]))
logger.debug("Decoder input: {0}".format(y.shape))
y = self.dec[5](y)
logger.debug("Decoder output: {0}".format(y.shape))
# Extra conv for vm2
if self.vm2:
logger.debug("VM2: {0}".format(self.vm2_conv))
logger.debug("VM2 input: {0}".format(y.shape))
y = self.vm2_conv(y)
logger.debug("VM2 output: {0}".format(y.shape))
logger.debug("Done.")
return y
class ConvBlock(nn.Module):
""" Represents a single convolution block in the Unet which
is a convolution based on the size of the input channel and output
channels and then preforms a Leaky Relu with parameter 0.2.
"""
def __init__(self, dim, in_channels, out_channels, stride=1):
""" Initialize the conv block.
Parameters
----------
dim: int
the number of dimensions of the input.
in_channels: int
the number of input channels.
out_channels: int
the number of output channels.
stride: int, default 1
the stride of the convolution.
"""
# Inheritance
super(ConvBlock, self).__init__()
conv_fn = getattr(nn, "Conv{0}d".format(dim))
if stride == 1:
ksize = 3
elif stride == 2:
ksize = 4
else:
raise Exception("Stride must be 1 or 2.")
self.main = conv_fn(in_channels, out_channels, ksize, stride, 1)
self.activation = nn.LeakyReLU(0.2)
def forward(self, x):
out = self.main(x)
out = self.activation(out)
return out
@Regularizers.register
class FlowRegularizer(object):
""" Total Variation Loss (Smooth Term).
For a dense flow field, we regularize it with the following loss that
discourages discontinuity.
k1 * FlowLoss
FlowLoss: a gradient loss on the flow field.
Recommend for k1 are 1.0 for ncc, or 0.01 for mse.
"""
def __init__(self, k1=0.01):
self.k1 = k1
def __call__(self, signal):
logger.debug("Compute flow regularization...")
flow = signal.layer_outputs["flow"]
logger.debug(" lambda: {0}".format(self.k1))
self.debug("flow", flow)
flow_loss = self._gradient_loss(flow, penalty="l2")
logger.debug(" flow loss: {0}".format(flow_loss))
logger.debug(" flow loss: {0} - {1}".format(flow.min(), flow.max()))
logger.debug("Done.")
return self.k1 * flow_loss
def _gradient_loss(self, flow, penalty="l2"):
""" Gradient Loss.
"""
dx = torch.abs(flow[:, :, 1:, :, :] - flow[:, :, :-1, :, :])
dy = torch.abs(flow[:, :, :, 1:, :] - flow[:, :, :, :-1, :])
dz = torch.abs(flow[:, :, :, :, 1:] - flow[:, :, :, :, :-1])
if (penalty == "l2"):
dx = dx * dx
dy = dy * dy
dz = dz * dz
displacement = torch.mean(dx) + torch.mean(dy) + torch.mean(dz)
return displacement / 3.0
def debug(self, name, tensor):
""" Print debug message.
Parameters
----------
name: str
the tensor name in the displayed message.
tensor: Tensor
a pytorch tensor.
"""
logger.debug(" {3}: {0} - {1} - {2}".format(
tensor.shape, tensor.get_device(), tensor.dtype, name))
|
from __future__ import annotations
import sys
from typing import Any, TypeVar, Type, Union, Optional, Callable
from objectmodel.base import ObjectModelABC, FieldABC
from objectmodel.errors import FieldValidationError, FieldValueRequiredError
__all__ = [
'NOT_PROVIDED',
'Field',
'ObjectField',
'ListCollectionField',
'DictCollectionField',
'ProxyField']
class _NotProvided:
def __bool__(self):
return False
def __copy__(self):
return self
def __deepcopy__(self, _):
return self
def __repr__(self):
return "<objectmodel.NOT_PROVIDED>"
NOT_PROVIDED = _NotProvided()
T = TypeVar('T')
class Field(FieldABC):
__slots__ = 'name', 'default', 'required', 'allow_none', 'validator'
def __init__(self,
name: str = NOT_PROVIDED,
default: Union[Callable[[], T], T] = NOT_PROVIDED,
required: bool = False,
allow_none: bool = False,
validator: Optional[
Callable[[Optional[ObjectModelABC], FieldABC, Any], None]
] = None):
self.name = name
self.default = default
self.required = required
self.allow_none = allow_none
self.validator = validator
# Defaults also should be validated!
if not callable(default):
self.validate(None, default)
def __get__(self, instance: ObjectModelABC, owner: Type[ObjectModelABC]) -> T:
assert isinstance(instance, ObjectModelABC)
try:
return instance.__state__[self.name]
except KeyError:
if self.default is not NOT_PROVIDED:
default = self.default
if callable(default):
default = default()
self.__set__(instance, default)
return default
raise FieldValueRequiredError(instance, self)
def __set__(self, instance: ObjectModelABC, value: T):
assert isinstance(instance, ObjectModelABC)
self.validate(instance, value)
instance.__state__[self.name] = value
def __set_name__(self, owner, name):
if self.name is NOT_PROVIDED:
assert name and isinstance(name, str), 'String name must be specified'
self.name = name
def __delete__(self, instance):
assert isinstance(instance, ObjectModelABC)
if self.required:
raise FieldValueRequiredError(instance, self)
del instance.__state__[self.name]
def serialize(self, instance: ObjectModelABC) -> Any:
return self.__get__(instance, instance.__class__)
def deserialize(self, instance: ObjectModelABC, value):
self.__set__(instance, value)
def has_default(self) -> bool:
return self.default is not NOT_PROVIDED
def has_value(self, instance: ObjectModelABC):
return self.name in instance.__state__
def can_provide_value(self, instance: ObjectModelABC):
return self.default is not NOT_PROVIDED or self.name in instance.__state__
def validate(self, model_instance: Optional[ObjectModelABC], value: T):
if value is None and not self.allow_none:
raise FieldValidationError(model_instance, self, value,
'Cannot be None (allow_none=False)')
if self.validator:
value = self.__get__(model_instance, model_instance.__class__)
self.validator(model_instance, self, value)
def clear(self, instance):
self.__delete__(instance)
def __repr__(self):
return '{}(name={!r}, default={!r}, required={!r}, allow_none={!r}, validator={!r})'\
.format(
self.__class__.__name__,
self.name,
self.default,
self.required,
self.allow_none,
self.validator
)
class ProxyField(Field):
def __init__(self, attr_name: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self._attr_name = attr_name
def __get__(self, instance, owner):
return getattr(instance, self._attr_name)
def has_value(self, instance: ObjectModelABC) -> bool:
return True
class ObjectField(Field):
__slots__ = '_model'
def __init__(self, name: str, model: type, *args, **kwargs):
super().__init__(name, *args, **kwargs)
assert issubclass(model, ObjectModelABC)
self._model = model
def serialize(self, instance: ObjectModelABC) -> Any:
value = super().serialize(instance)
if value is not None:
return value.serialize()
return None
def deserialize(self, instance: ObjectModelABC, value):
if value is not None:
obj = self._model()
obj.deserialize(value)
super().deserialize(instance, obj)
def validate(self, model_instance: ObjectModelABC, value):
super().validate(model_instance, value)
if not self.allow_none and value is not None:
if not isinstance(value, ObjectModelABC):
raise FieldValidationError(model_instance, self, value,
f'Value should be of type: \'ObjectModel\'')
value.validate()
class ListCollectionField(Field):
__slots__ = '_model'
def __init__(self, item_model: Union[str, Type[ObjectModelABC]], *args, **kwargs):
super().__init__(*args, **kwargs)
self._model = item_model
def serialize(self, instance: ObjectModelABC) -> Any:
value = super().serialize(instance)
return [v.serialize() for v in value]
def deserialize(self, instance: ObjectModelABC, value):
deserialized_list = []
item_cls = self._resolve_item_type()
for v in value:
obj = item_cls()
obj.deserialize(v)
deserialized_list.append(obj)
super().deserialize(instance, deserialized_list)
def _resolve_item_type(self) -> Type[ObjectModelABC]:
if issubclass(self._model, ObjectModelABC):
return self._model
elif isinstance(self._model, str):
self._model = getattr(sys.modules[__name__], self._model)
return self._model
raise TypeError(f'Cant resolve item model type: {self._model}')
def validate(self, model_instance: ObjectModelABC, value):
super().validate(model_instance, value)
if not self.allow_none and value is not None:
if not isinstance(value, list):
raise FieldValidationError(model_instance, self, value,
'Value should be of type: List[ObjectModel]')
for item in value:
if not isinstance(item, ObjectModelABC):
raise FieldValidationError(model_instance, self, value,
f'List item {item!r} '
f'should be of type: \'ObjectModel\'')
item.validate()
class DictCollectionField(Field):
__slots__ = '_model', '_dict_factory'
def __init__(self, name: str, item_model: type, dict_factory: callable = dict,
*args, **kwargs):
super().__init__(name, *args, **kwargs)
assert issubclass(item_model, ObjectModelABC)
self._model = item_model
self._dict_factory = dict_factory
def serialize(self, instance: ObjectModelABC) -> Any:
value = super().serialize(instance)
return {k: v.serialize() for k, v in value.items()}
def deserialize(self, instance: ObjectModelABC, value):
deserialized_dict = self._dict_factory()
for k, v in value.items():
obj = self._model()
obj.deserialize(v)
deserialized_dict[k] = obj
super().deserialize(instance, deserialized_dict)
def validate(self, model_instance: ObjectModelABC, value: Any):
super().validate(model_instance, value)
if not isinstance(value, dict):
raise FieldValidationError(model_instance, self, value,
'Value should be of type Dict[ObjectModel]')
for item in value.values():
item.validate()
|
import datetime
import math
import itertools
import pytest
import calendar
from homeplotter.timeseries import TimeSeries
sample_data = {
"broken":[[datetime.date(2020, 10, 12), 200.0],[datetime.date(2020, 11, 24), 50.0],[datetime.date(2020, 12, 5), 200.0], [datetime.date(2020, 12, 30), 400.0], [datetime.date(2020, 12, 31), -300], [datetime.date(2021, 2, 2), 100],[datetime.date(2021,3,11),60]],
"even-3":[[datetime.date(2020, 10, 1), 200.0],[datetime.date(2020, 11, 24), 50.0],[datetime.date(2020, 12, 5), 200.0], [datetime.date(2020, 12, 30), 400.0], [datetime.date(2020, 12, 31), -300], [datetime.date(2021, 2, 2), 100],[datetime.date(2021,3,11),60]],
"even-5":[[datetime.date(2020, 10, 6), 200.0],[datetime.date(2020, 11, 24), 50.0],[datetime.date(2020, 12, 5), 200.0], [datetime.date(2020, 12, 30), 400.0], [datetime.date(2020, 12, 31), -300], [datetime.date(2021, 2, 2), 100],[datetime.date(2021,3,11),60]],
"short":[[datetime.date(2020, 12, 30), 400.0], [datetime.date(2020, 12, 31), -300], [datetime.date(2021, 1, 2), 100],[datetime.date(2021,1,3),60]],
}
deltas = [1,3,5]
def expected_start_date(date, delta, original_len, padding):
if padding and original_len%delta != 0:
return date - datetime.timedelta(delta - original_len%delta)
else:
return date + datetime.timedelta(original_len%delta)
def expected_end_date(date, delta, original_len, padding):
return date-datetime.timedelta(delta-1)
@pytest.mark.parametrize("padding,sample_key,delta", itertools.product([False,True],sample_data.keys(),deltas))
def test_accumulate__len(padding,sample_key,delta):
ts = TimeSeries(sample_data[sample_key])
original_len = len(ts.data)
start_date = expected_start_date(ts.data[0][0],delta,original_len,padding)
end_date = expected_end_date(ts.data[-1][0],delta,original_len,padding)
expected_len = (end_date-start_date).days/delta+1
ts.accumulate(delta,"Day",padding=padding)
assert(len(ts.data) == expected_len)
@pytest.mark.parametrize("padding,sample_key,delta", itertools.product([False,True],sample_data.keys(),deltas))
def test_accumulate__start_date(padding,sample_key,delta):
sd = sample_data[sample_key]
ts = TimeSeries(sd)
original_len = len(ts.data)
ts.accumulate(delta,"Day",padding=padding)
assert(ts.get_x()[0]==expected_start_date(sd[0][0],delta,original_len,padding))
@pytest.mark.parametrize("padding,sample_key,delta", itertools.product([False,True],sample_data.keys(),deltas))
def test_accumulate__steps(padding,sample_key,delta):
ts = TimeSeries(sample_data[sample_key])
ts.accumulate(delta,"Day",padding=padding)
for i in range(1,len(ts.data)):
assert((ts.data[i][0]-ts.data[i-1][0]).days == delta)
@pytest.mark.parametrize("padding,sample_key,delta", itertools.product([False,True],sample_data.keys(),deltas))
def test_accumulate__end_date(padding,sample_key,delta):
sd = sample_data[sample_key]
ts = TimeSeries(sd)
original_len = len(ts.data)
ts.accumulate(delta,"Day",padding=padding)
assert(ts.get_x()[-1]==expected_end_date(sd[-1][0],delta,original_len,padding))
@pytest.mark.parametrize("padding,sample_key,delta", itertools.product([False,True],sample_data.keys(),deltas))
def test_accumulate__sum(padding,sample_key,delta):
sd = sample_data[sample_key]
ts = TimeSeries(sd)
ts.accumulate(delta,"Day",padding=padding)
for i in range(len(ts.data)):
cum_sum = 0
for data in sd:
if ts.data[i][0]<=data[0]<ts.data[i][0]+datetime.timedelta(delta):
cum_sum+=data[1]
assert(ts.data[i][1]==cum_sum) |
<reponame>TimKam/py-ciu
import random
import pandas as pd
from ciu.ciu_object import CiuObject
def _generate_samples(case, feature_names, min_maxs, samples, indices,
category_mapping):
rows = []
rows.append(case)
for sample in range(samples):
sample_entry = {}
for index_j, feature_j in enumerate(feature_names):
if not (index_j in indices):
# if not (index_j in indices):
sample_entry[feature_j] = case[feature_j]
else:
min_val = min_maxs[feature_j][0]
max_val = min_maxs[feature_j][1]
is_int = min_maxs[feature_j][2]
sample_entry[feature_j] = \
random.randint(min_val, max_val) if is_int \
else random.uniform(min_val, max_val)
# check if feature_j, feature_k in same category;
# if so, set feature_k to 0 if feature_j is 1
for index_k, feature_k in enumerate(feature_names):
if not (index_j == index_k):
for categories in category_mapping.values():
same_category = feature_j in categories \
and feature_k in categories
if same_category and sample_entry[feature_j] == 1:
sample_entry[feature_k] = 0
# set categorical values that would otherwise not have a category
# assigned
for categories in category_mapping.values():
is_activated = False
for category in categories:
if sample_entry[category] == 1: is_activated = True
if not is_activated:
category = categories[random.randint(0, len(categories) -1)]
sample_entry[category] = 1
rows.append(sample_entry)
return pd.DataFrame(rows)
def determine_ciu(
case, predictor, min_maxs, samples=1000,
prediction_index=None, category_mapping=None, feature_interactions=[]):
"""
Determines contextual importance and utility for a given case.
:param case: Case data (dictionary)
:param predictor: The prediction function of the black-box model Py-CIU should call
:param min_maxs: dictionary (``'feature_name': [min, max, is_int]`` for each feature)
:param samples: number of samples to be generated. Defaults to 1000.
:param prediction_index: If the model returns several predictions, provide
the index of the relevant prediction. Defaults to
``None``
:param category_mapping: Mapping of one-hot encoded categorical variables to
list of categories and category name. Defaults to
``None``.
:param feature_interactions: List of {key: list} tuples of features whose
interactions should be evaluated. Defaults to
``[]``.
:return: dictionary: for each feature: list with
contextual importance value, contextual utility value
"""
if category_mapping is None:
category_mapping = {}
category_names = list(category_mapping.keys())
feature_names_decoded = []
categories_encoded = []
for feature in min_maxs.keys():
is_category = False
for index, categories in enumerate(category_mapping.values()):
if feature in categories:
categories_encoded.append(feature)
is_category = True
if category_names[index] not in feature_names_decoded:
feature_names_decoded.append(category_names[index])
if not is_category:
feature_names_decoded.append(feature)
cis = {}
cus = {}
case_prediction = \
predictor([list(case.values())])[0] if prediction_index is None \
else predictor([list(case.values())])[0][prediction_index]
predictions = {}
for index_i, feature_i in enumerate(min_maxs.keys()):
feature_samples = _generate_samples(
case, min_maxs.keys(), min_maxs, samples, [index_i],
category_mapping
)
predictions[feature_i] = \
predictor(feature_samples) if prediction_index is None \
else [prob[prediction_index] for \
prob in predictor(feature_samples)]
for feature_interaction in feature_interactions:
interaction_name = list(feature_interaction.keys())[0]
features = list(feature_interaction.values())[0]
indices = [list(min_maxs.keys()).index(feature) for feature in features]
feature_samples = _generate_samples(
case, min_maxs.keys(), min_maxs, samples, indices, category_mapping
)
predictions[interaction_name] = \
predictor(feature_samples) if prediction_index is None \
else [prob[prediction_index] for \
prob in predictor(feature_samples)]
abs_max = None
abs_min = None
# determine absolute min/max, only considering single features
for index, feature in enumerate(feature_names_decoded):
# Get right predictions for decoded category
if feature in category_mapping.keys():
encoded_feature = None
for encoded_feature_j in min_maxs.keys():
feature_max = max(predictions[encoded_feature_j])
if abs_max is None or abs_max < feature_max:
abs_max = feature_max
feature_min = min(predictions[encoded_feature_j])
if abs_min is None or abs_min > feature_min:
abs_min = feature_min
else:
feature_max = max(predictions[feature])
if abs_max is None or abs_max < feature_max:
abs_max = feature_max
feature_min = min(predictions[feature])
if abs_min is None or abs_min > feature_min:
abs_min = feature_min
# determine absolute min/max, also considering feature interactions
for feature_interaction in feature_interactions:
interaction_name = list(feature_interaction.keys())[0]
interaction_max = max(predictions[interaction_name])
if abs_max is None or abs_max < interaction_max:
abs_max = interaction_max
interaction_min = min(predictions[interaction_name])
if abs_min is None or abs_min > interaction_min:
abs_min = interaction_min
# compute CI/CU for single features
for index, feature in enumerate(feature_names_decoded):
# Get right predictions for decoded category
if feature in category_mapping.keys():
encoded_feature = None
for encoded_feature_j in min_maxs.keys():
if case[encoded_feature_j] == 1 and encoded_feature_j in category_mapping[feature]:
encoded_feature = encoded_feature_j
c_min = min(predictions[encoded_feature])
c_max = max(predictions[encoded_feature])
else:
c_min = min(predictions[feature])
c_max = max(predictions[feature])
n = case_prediction
ci = (c_max - c_min) / (abs_max - abs_min)
if (c_max - c_min) == 0:
cu = (n - c_min) / 0.01
else:
cu = (n - c_min) / (c_max - c_min)
if cu == 0: cu = 0.001
cis[feature] = ci
cus[feature] = cu
# compute CI/CU for feature interactions
interaction_names = [
"_".join(features) for features in feature_interactions
]
for interaction_name in interaction_names:
c_min = min(predictions[interaction_name])
c_max = max(predictions[interaction_name])
n = case_prediction
ci = (c_max - c_min) / (abs_max - abs_min)
if (c_max - c_min) == 0:
cu = (n - c_min) / 0.01
else:
cu = (n - c_min) / (c_max - c_min)
if cu == 0: cu = 0.001
cis[interaction_name] = ci
cus[interaction_name] = cu
ciu = CiuObject(cis, cus, interaction_names)
return ciu
|
<reponame>NanoMembers/DeepFlow
#!/usr/bin/env python3
###Use CUDA_VISIBLE_DEVICES=0,1,2... is used to make sure only the right GPUs
###are made visible
import argparse
import numpy as np
import os
import tensorflow as tf
import time as _time
import timeit
tf.debugging.set_log_device_placement(True)
v = tf.Variable(1)
@tf.function
def matmul(a, b, gid):
with tf.device('/GPU:{}'.format(gid)):
return tf.matmul(a,b)
def all_gather(c, kern_para_a, kern_para_b, num_devices):
tmp = [None] * kern_para_a
c_new = [None] * kern_para_a
for i in range(kern_para_a):
tmp[i] = [None] * kern_para_b
c_new[i] = [None] * kern_para_b
for j in range(kern_para_b):
tmp[i][j] = [None] * kern_para_b
with tf.device('/device:gpu:{}'.format((i*kern_para_b + j)%num_devices)):
tmp[i][j][j] = tf.identity(c[i][j])
#c_new[i][j] = c[i][j]
for k in range(kern_para_b - 1):
for j in range(kern_para_b):
with tf.device('/device:gpu:{}'.format((i*kern_para_b + j)%num_devices)):
tmp[i][j][(j-k+2*kern_para_b-1)%kern_para_b] = tf.identity(tmp[i][(j-1)%kern_para_b][(j-k+2*kern_para_b-1)%kern_para_b])
#tf.print("{}{} ::: {}{}" .format(j, (j-k+2*kern_para_b-1)%kern_para_b, (j-1)%kern_para_b, (j-k+2*kern_para_b-1)%kern_para_b))
for j in range(kern_para_b):
with tf.device('/device:gpu:{}'.format((i*kern_para_b + j)%num_devices)):
c_new[i][j] = (tf.concat([tmp[i][j]],axis=1))
return c_new
@tf.function
def RC(m, k, n, kern_para_a, kern_para_b, num_devices, a_shards, b_shards):
c = [None] * kern_para_a
for i in range(kern_para_a):
c[i] = [None] * kern_para_b
for j in range(kern_para_b):
gid = i * kern_para_b + j
with tf.device('/device:gpu:{}'.format(gid%num_devices)):
c[i][j] = tf.matmul(a_shards[i][j], b_shards[i][j])
c_new = all_gather(c, kern_para_a, kern_para_b, num_devices)
return c_new
#@tf.function
def Col(m, k, n, kern_para_a, kern_para_b, num_devices, a_shards, b_shards):
c = [None] * kern_para_a
#v.assign_add(1)
for i in range(kern_para_a):
with tf.device('/device:gpu:{}'.format(i%num_devices)):
c[i] = tf.matmul(a_shards[i][0], b_shards[i][0])
return c
@tf.function
def CR(m, k, n, kern_para_a, num_devices, a_shards, b_shards):
c = [None] * kern_para_a
for i in range(kern_para_a):
with tf.device('/device:gpu:{}'.format(i)):
c[i] = tf.matmul(a_shards[i], b_shards[i])
for i in range(kern_para_a):
with tf.device('/device:gpu:{}'.format(i)):
c_final = tf.math.add_n(c)
return c_final
def main():
parser = argparse.ArgumentParser(formatter_class =
argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-t', '--type', type=str, required=False, default='RC', help="for RC: parallelism along input dimension; for CR: parallelism along the inner dimension")
parser.add_argument('-kp1', '--kern_para_a', type=int, required=False, default=1, help="for RC: parallelism along input dimension; for CR: parallelism along the inner dimension")
parser.add_argument('-kp2', '--kern_para_b', type=int, required=False, default=1, help="for RC: parallelism along the outpiut dimension; for CR: NA")
parser.add_argument('-N', '--num_gpus', type=int, required=False, default=1, help="Number of GPUs available for parallelization")
parser.add_argument('-m', '--input_dim', type=int, required=False, default=32768, help="input dimension")
parser.add_argument('-n', '--output_dim', type=int, required=False, default=32768, help="output dimension")
parser.add_argument('-k', '--inner_dim', type=int, required=False, default=32768, help="inner dimension")
args = parser.parse_args()
op_type = args.type #Either RC or CR
kern_para_a = args.kern_para_a
kern_para_b = args.kern_para_b
m = args.input_dim
k = args.inner_dim
n = args.output_dim
num_devices = args.num_gpus
print("op_type: {} kern_para_a: {} kern_para_b: {} num_devices: {}" .format(op_type, kern_para_a, kern_para_b, num_devices))
#Create and initialize Variables
weights = [None] * kern_para_a
activs = [None] * kern_para_a
'''
if kern_para_a == 1 and kern_para_b == 1:
a_dim = (m, k)
w_dim = (k, n)
activs = tf.Variable(initial_value=tf.random.normal(shape=a_dim), name="a")
weights = tf.Variable(initial_value=tf.random.normal(shape=w_dim), name="w")
'''
if op_type == "RC":
a_dim = (m//kern_para_a, k)
w_dim = (k, n//kern_para_b)
print("a_dim: {}".format(a_dim))
print("b_dim: {}".format(w_dim))
for i in range(kern_para_a):
activs[i] = [None] * kern_para_b
weights[i] = [None] * kern_para_b
for j in range(kern_para_b):
did = i * kern_para_b + j
curr_device = '/device:gpu:' + str(did%num_devices)
with tf.device(curr_device):
a_shard = tf.Variable(
initial_value=tf.random.normal(shape=a_dim, mean=1.0, stddev=0.0, dtype=tf.dtypes.float16),
name="a_{}_{}".format(i,j))
activs[i][j] = a_shard
w_shard = tf.Variable(
initial_value=tf.random.normal(shape=w_dim, mean=1.0, stddev=0.0, dtype=tf.dtypes.float16),
name="w_{}".format(j))
weights[i][j] = w_shard
elif op_type == "CR":
a_dim = (m, k//kern_para_a)
w_dim = (k//kern_para_a, n)
for i in range(kern_para_a):
curr_device = '/device:gpu:' + str(i)
with tf.device(curr_device):
a_shard = tf.Variable(initial_value=tf.random.normal(shape=a_dim, dtype=tf.dtypes.float16), name="a_{}".format(i))
w_shard = tf.Variable(initial_value=tf.random.normal(shape=w_dim, dtype=tf.dtypes.float16), name="w_{}".format(i))
activs[i] = a_shard
weights[i] = w_shard
#Measure time
for i in range(100):
start = _time.perf_counter()
if op_type == "RC":
if kern_para_b == 1:
Col(m, k, n, kern_para_a, kern_para_b, num_devices, activs, weights)
elif kern_para_b > 1:
c_final = RC(m, k, n, kern_para_a, kern_para_b, num_devices, activs, weights)
elif op_type == "CR":
c_final = CR(m, k, n, kern_para_a, num_devices, activs, weights)
#_ = c_final.numpy() # Make sure to execute op and not just enqueue it
tot_time = _time.perf_counter() - start
print("Step{}: {}".format(i, tot_time))
#tf.profiler.experimental.stop()
#with writer.as_default():
# tf.summary.trace_export(name="trace",step=0,profiler_outdir="tensorboard.log")
#writer.flush()
#print(c_final)
#print(a, b, c_final)
if __name__ == "__main__":
main()
|
import polyphony
from polyphony.io import Port
from polyphony.typing import bit, uint3, uint12, uint24
from polyphony.timing import clksleep, clkfence, wait_rising, wait_falling
CONVST_PULSE_CYCLE = 10
CONVERSION_CYCLE = 39
@polyphony.module
class spi_lis3dh:
def __init__(self):
self.sclk = Port(bit, 'out')
self.mosi = Port(bit, 'out')
self.miso = Port(bit, 'in')
self.cs_n = Port(bit, 'out', init=1)
self.x_led = Port(bit, 'out')
self.y_led = Port(bit, 'out')
self.z_led = Port(bit, 'out')
self.data24 = Port(uint24, 'out', protocol='ready_valid')
self.append_worker(self.worker)
def set_addr(self, rw, ms, addr):
clksleep(1)
self.sclk.wr(0)
self.mosi.wr(rw)
clksleep(2)
self.sclk.wr(1)
clksleep(2)
self.sclk.wr(0)
self.mosi.wr(ms)
clksleep(2)
self.sclk.wr(1)
clksleep(2)
self.sclk.wr(0)
for i in range(6):
bit1 = (addr >> (5 - i)) & 1
self.mosi.wr(bit1)
clksleep(1)
self.sclk.wr(1)
clksleep(1)
self.sclk.wr(0)
clksleep(1)
def read_data(self, addr):
self.cs_n.wr(0)
self.sclk.wr(0)
clksleep(1)
self.set_addr(1, 0, addr)
data = 0
for i in range(8):
data <<= 1
clksleep(1)
self.sclk.wr(1)
clksleep(1)
bit1 = self.miso.rd() & 1
clkfence()
self.sclk.wr(0)
data |= bit1
clksleep(1)
self.cs_n.wr(1)
return data
def write_data(self, addr, data):
self.cs_n.wr(0)
self.sclk.wr(0)
clksleep(1)
self.set_addr(0, 0, addr)
for i in range(8):
bit1 = (data >> (7 - i)) & 1
self.mosi.wr(bit1)
clksleep(1)
self.sclk.wr(1)
clksleep(1)
self.sclk.wr(0)
clksleep(2)
self.cs_n.wr(1)
self.mosi.wr(0)
return data
def worker(self):
self.cs_n.wr(1)
self.sclk.wr(0)
clksleep(1)
self.write_data(0x20, 0x7F)
while polyphony.is_worker_running():
clksleep(20)
self.write_data(0x20, 0x7F)
clksleep(10)
data_who_am_i = self.read_data(0x0F)
clksleep(10)
data_x_l = self.read_data(0x29)
clksleep(10)
data_y_l = self.read_data(0x2B)
clksleep(10)
data_z_l = self.read_data(0x2D)
clksleep(10)
self.x_led.wr(1 if data_x_l > 0x30 else 0)
self.y_led.wr(1 if data_y_l > 0x30 else 0)
self.z_led.wr(1 if data_z_l > 0x30 else 0)
data_xyz = ( data_x_l << 16 ) | ( data_y_l << 8 ) | data_z_l
self.data24.wr(data_xyz)
@polyphony.testbench
def test(spic):
data = spic.data24.rd()
print("data0:" , data)
data = spic.data24.rd()
print("data1:" , data)
if __name__ == '__main__':
spic = spi_lis3dh()
test(spic)
|
<reponame>petertdavies/execution-specs
"""
Ethash Functions
^^^^^^^^^^^^^^^^
.. contents:: Table of Contents
:backlinks: none
:local:
Introduction
------------
Ethash algorithm related functionalities.
"""
from typing import Callable, Tuple, Union
from ethereum.base_types import UINT32_MAX_VALUE, Bytes8, Uint, Uint32
from ethereum.crypto.hash import Hash32, Hash64, keccak256, keccak512
from ethereum.utils.numeric import (
is_prime,
le_bytes_to_uint32_sequence,
le_uint32_sequence_to_bytes,
le_uint32_sequence_to_uint,
)
EPOCH_SIZE = 30000
INITIAL_CACHE_SIZE = 2**24
CACHE_EPOCH_GROWTH_SIZE = 2**17
INITIAL_DATASET_SIZE = 2**30
DATASET_EPOCH_GROWTH_SIZE = 2**23
HASH_BYTES = 64
MIX_BYTES = 128
CACHE_ROUNDS = 3
DATASET_PARENTS = 256
HASHIMOTO_ACCESSES = 64
def epoch(block_number: Uint) -> Uint:
"""
Obtain the epoch number to which the block identified by `block_number`
belongs.
Parameters
----------
block_number :
The number of the block of interest.
Returns
-------
epoch_number : `Uint`
The epoch number to which the passed in block belongs.
"""
return block_number // EPOCH_SIZE
def cache_size(block_number: Uint) -> Uint:
"""
Obtain the cache size (in bytes) of the epoch to which `block_number`
belongs.
Parameters
----------
block_number :
The number of the block of interest.
Returns
-------
cache_size_bytes : `Uint`
The cache size in bytes for the passed in block.
"""
size = INITIAL_CACHE_SIZE + (CACHE_EPOCH_GROWTH_SIZE * epoch(block_number))
size -= HASH_BYTES
while not is_prime(size // HASH_BYTES):
size -= 2 * HASH_BYTES
return size
def dataset_size(block_number: Uint) -> Uint:
"""
Obtain the dataset size (in bytes) of the epoch to which `block_number`
belongs.
Parameters
----------
block_number :
The number of the block of interest.
Returns
-------
dataset_size_bytes : `Uint`
The dataset size in bytes for the passed in block.
"""
size = INITIAL_DATASET_SIZE + (
DATASET_EPOCH_GROWTH_SIZE * epoch(block_number)
)
size -= MIX_BYTES
while not is_prime(size // MIX_BYTES):
size -= 2 * MIX_BYTES
return size
def generate_seed(block_number: Uint) -> Hash32:
"""
Obtain the cache generation seed for the block identified by
`block_number`.
Parameters
----------
block_number :
The number of the block of interest.
Returns
-------
seed : `Hash32`
The cache generation seed for the passed in block.
"""
epoch_number = epoch(block_number)
seed = b"\x00" * 32
while epoch_number != 0:
seed = keccak256(seed)
epoch_number -= 1
return Hash32(seed)
def generate_cache(block_number: Uint) -> Tuple[Tuple[Uint32, ...], ...]:
"""
Generate the cache for the block identified by `block_number`. This cache
would later be used to generate the full dataset.
Parameters
----------
block_number :
The number of the block of interest.
Returns
-------
cache : `Tuple[Tuple[Uint32, ...], ...]`
The cache generated for the passed in block.
"""
seed = generate_seed(block_number)
cache_size_bytes = cache_size(block_number)
cache_size_words = cache_size_bytes // HASH_BYTES
cache = [keccak512(seed)]
previous_cache_item = cache[0]
for _ in range(1, cache_size_words):
cache_item = keccak512(previous_cache_item)
cache.append(cache_item)
previous_cache_item = cache_item
for _ in range(CACHE_ROUNDS):
for index in range(cache_size_words):
# Converting `cache_size_words` to int as `-1 + Uint(5)` is an
# error.
first_cache_item = cache[
(index - 1 + int(cache_size_words)) % cache_size_words
]
second_cache_item = cache[
Uint32.from_le_bytes(cache[index][0:4]) % cache_size_words
]
result = bytes(
[a ^ b for a, b in zip(first_cache_item, second_cache_item)]
)
cache[index] = keccak512(result)
return tuple(
le_bytes_to_uint32_sequence(cache_item) for cache_item in cache
)
def fnv(a: Union[Uint, Uint32], b: Union[Uint, Uint32]) -> Uint32:
"""
FNV algorithm is inspired by the FNV hash, which in some cases is used
as a non-associative substitute for XOR.
Note that here we multiply the prime with the full 32-bit input, in
contrast with the FNV-1 spec which multiplies the prime with
one byte (octet) in turn.
Parameters
----------
a:
The first data point.
b :
The second data point.
Returns
-------
modified_mix_integers : `Uint32`
The result of performing fnv on the passed in data points.
"""
# This is a faster way of doing [number % (2 ** 32)]
result = ((Uint(a) * 0x01000193) ^ Uint(b)) & UINT32_MAX_VALUE
return Uint32(result)
def fnv_hash(
mix_integers: Tuple[Uint32, ...], data: Tuple[Uint32, ...]
) -> Tuple[Uint32, ...]:
"""
FNV Hash mixes in data into mix using the ethash fnv method.
Parameters
----------
mix_integers:
Mix data in the form of a sequence of Uint32.
data :
The data (sequence of Uint32) to be hashed into the mix.
Returns
-------
modified_mix_integers : `Tuple[Uint32, ...]`
The result of performing the fnv hash on the mix and the passed in
data.
"""
return tuple(
fnv(mix_integers[i], data[i]) for i in range(len(mix_integers))
)
def generate_dataset_item(
cache: Tuple[Tuple[Uint32, ...], ...], index: Uint
) -> Hash64:
"""
Generate a particular dataset item 0-indexed by `index` using `cache`.
Each dataset item is a byte stream of 64 bytes or a stream of 16 uint32
numbers.
Parameters
----------
cache:
The cache from which a subset of items will be used to generate the
dataset item.
index :
The index of the dataset item to generate.
Returns
-------
dataset_item : `Hash64`
The generated dataset item for passed index.
"""
mix = keccak512(
(
le_uint32_sequence_to_uint(cache[index % len(cache)]) ^ index
).to_le_bytes(number_bytes=HASH_BYTES)
)
mix_integers = le_bytes_to_uint32_sequence(mix)
for j in range(DATASET_PARENTS):
mix_word: Uint32 = mix_integers[j % 16]
cache_index = fnv(index ^ j, mix_word) % len(cache)
parent = cache[cache_index]
mix_integers = fnv_hash(mix_integers, parent)
mix = Hash64(le_uint32_sequence_to_bytes(mix_integers))
return keccak512(mix)
def generate_dataset(block_number: Uint) -> Tuple[Hash64, ...]:
"""
Generate the full dataset for the block identified by `block_number`.
This function is present only for demonstration purposes, as it will take
a long time to execute.
Parameters
----------
block_number :
The number of the block of interest.
Returns
-------
dataset : `Tuple[Hash64, ...]`
The dataset generated for the passed in block.
"""
dataset_size_bytes: Uint = dataset_size(block_number)
cache: Tuple[Tuple[Uint32, ...], ...] = generate_cache(block_number)
# TODO: Parallelize this later on if it adds value
return tuple(
generate_dataset_item(cache, Uint(index))
for index in range(dataset_size_bytes // HASH_BYTES)
)
def hashimoto(
header_hash: Hash32,
nonce: Bytes8,
dataset_size: Uint,
fetch_dataset_item: Callable[[Uint], Tuple[Uint32, ...]],
) -> Tuple[bytes, Hash32]:
"""
Obtain the mix digest and the final value for a header, by aggregating
data from the full dataset.
Parameters
----------
header_hash :
The PoW valid rlp hash of a header.
nonce :
The propogated nonce for the given block.
dataset_size :
Dataset size for the epoch to which the current block belongs to.
fetch_dataset_item :
The function which will be used to obtain a specific dataset item
from an index.
Returns
-------
mix_digest : `bytes`
The mix digest generated from the header hash and propogated nonce.
result : `Hash32`
The final result obtained which will be checked for leading zeros (in
byte representation) in correspondance with the block difficulty.
"""
nonce_le = bytes(reversed(nonce))
seed_hash = keccak512(header_hash + nonce_le)
seed_head = Uint32.from_le_bytes(seed_hash[:4])
rows = dataset_size // 128
mix = le_bytes_to_uint32_sequence(seed_hash) * (MIX_BYTES // HASH_BYTES)
for i in range(HASHIMOTO_ACCESSES):
new_data: Tuple[Uint32, ...] = ()
parent = fnv(i ^ seed_head, mix[i % len(mix)]) % rows
for j in range(MIX_BYTES // HASH_BYTES):
# Typecasting `parent` from Uint32 to Uint as 2*parent + j may
# overflow Uint32.
new_data += fetch_dataset_item(2 * Uint(parent) + j)
mix = fnv_hash(mix, new_data)
compressed_mix = []
for i in range(0, len(mix), 4):
compressed_mix.append(
fnv(fnv(fnv(mix[i], mix[i + 1]), mix[i + 2]), mix[i + 3])
)
mix_digest = le_uint32_sequence_to_bytes(compressed_mix)
result = keccak256(seed_hash + mix_digest)
return mix_digest, result
def hashimoto_light(
header_hash: Hash32,
nonce: Bytes8,
cache: Tuple[Tuple[Uint32, ...], ...],
dataset_size: Uint,
) -> Tuple[bytes, Hash32]:
"""
Run the hashimoto algorithm by generating dataset item using the cache
instead of loading the full dataset into main memory.
Parameters
----------
header_hash :
The PoW valid rlp hash of a header.
nonce :
The propogated nonce for the given block.
cache:
The generated cache for the epoch to which the current block belongs
to.
dataset_size :
Dataset size for the epoch to which the current block belongs to.
Returns
-------
mix_digest : `bytes`
The mix digest generated from the header hash and propogated nonce.
result : `Hash32`
The final result obtained which will be checked for leading zeros (in
byte representation) in correspondance with the block difficulty.
"""
def fetch_dataset_item(index: Uint) -> Tuple[Uint32, ...]:
"""
Generate dataset item (as tuple of Uint32 numbers) from cache.
Parameters
----------
index :
The index of the dataset item to generate.
Returns
-------
dataset_item : `Tuple[Uint32, ...]`
The generated dataset item for passed index.
"""
item: Hash64 = generate_dataset_item(cache, index)
return le_bytes_to_uint32_sequence(item)
return hashimoto(header_hash, nonce, dataset_size, fetch_dataset_item)
|
<filename>lib/pytaf/tafdecoder.py
import re
from .taf import TAF
class DecodeError(Exception):
def __init__(self, msg):
self.strerror = msg
class Decoder(object):
def __init__(self, taf):
if isinstance(taf, TAF):
self._taf = taf
else:
raise DecodeError("Argument is not a TAF parser object")
def decode_taf(self):
form = self._taf.get_header()["form"]
result = ""
result += self._decode_header(self._taf.get_header()) + "\n"
for group in self._taf.get_groups():
# TAF specific stuff
if form == "taf":
if group["header"]:
result += self._decode_group_header(group["header"]) + "\n"
# METAR specific stuff
if form == "metar":
if group["temperature"]:
result += " Temperature: %s\n" % self._decode_temperature(group["temperature"])
if group["pressure"]:
result += " Pressure: %s\n" % self._decode_pressure(group["pressure"])
# Both TAF and METAR
if group["wind"]:
result += " Wind: %s \n" % self._decode_wind(group["wind"])
if group["visibility"]:
result += " Visibility: %s \n" % self._decode_visibility(group["visibility"])
if group["clouds"]:
result += " Sky conditions: %s \n" % self._decode_clouds(group["clouds"])
if group["weather"]:
result += " Weather: %s \n" % self._decode_weather(group["weather"])
if group["windshear"]:
result += " Windshear: %s\n" % self._decode_windshear(group["windshear"])
result += " \n"
if self._taf.get_maintenance():
result += self._decode_maintenance(self._taf.get_maintenance())
return(result)
def _decode_header(self, header):
result = ""
# Ensure it's side effect free
_header = header
if _header["form"] == 'taf':
# Decode TAF header
# Type
if _header["type"] == "AMD":
result += "TAF amended for "
elif _header["type"] == "COR":
result += "TAF corrected for "
elif _header["type"] == "RTD":
result += "TAF related for "
else:
result += "TAF for "
# Add ordinal suffix
_header["origin_date"] = _header["origin_date"] + self._get_ordinal_suffix(_header["origin_date"])
_header["valid_from_date"] = _header["valid_from_date"] + self._get_ordinal_suffix(_header["valid_from_date"])
_header["valid_till_date" ] = _header["valid_till_date"] + self._get_ordinal_suffix(_header["valid_till_date"])
result += ("%(icao_code)s issued %(origin_hours)s:%(origin_minutes)s UTC on the %(origin_date)s, "
"valid from %(valid_from_hours)s:00 UTC on the %(valid_from_date)s to %(valid_till_hours)s:00 UTC on the %(valid_till_date)s")
else:
# Decode METAR header
# Type
if _header["type"] == "COR":
result += "METAR corrected for "
else:
result += "METAR for "
_header["origin_date"] = _header["origin_date"] + self._get_ordinal_suffix(_header["origin_date"])
result += ("%(icao_code)s issued %(origin_hours)s:%(origin_minutes)s UTC on the %(origin_date)s")
result = result % _header
return(result)
def _decode_group_header(self, header):
result = ""
_header = header
from_str = "From %(from_hours)s:%(from_minutes)s on the %(from_date)s: "
prob_str = "Probability %(probability)s%% of the following between %(from_hours)s:00 on the %(from_date)s and %(till_hours)s:00 on the %(till_date)s: "
tempo_str = "Temporarily between %(from_hours)s:00 on the %(from_date)s and %(till_hours)s:00 on the %(till_date)s: "
prob_tempo_str = "Probability %(probability)s%% of the following temporarily between %(from_hours)s:00 on the %(from_date)s and %(till_hours)s:00 on the %(till_date)s: "
becmg_str = "Gradual change to the following between %(from_hours)s:00 on the %(from_date)s and %(till_hours)s:00 on the %(till_date)s: "
if "type" in _header:
# Add ordinal suffix
if "from_date" in _header:
from_suffix = self._get_ordinal_suffix(_header["from_date"])
_header["from_date"] = _header["from_date"] + from_suffix
if "till_date" in _header:
till_suffix = self._get_ordinal_suffix(_header["till_date"])
_header["till_date"] = _header["till_date"] + till_suffix
if _header["type"] == "FM":
result += from_str % { "from_date": _header["from_date"],
"from_hours": _header["from_hours"],
"from_minutes": _header["from_minutes"] }
elif _header["type"] == "PROB%s" % (_header["probability"]):
result += prob_str % { "probability": _header["probability"],
"from_date": _header["from_date"],
"from_hours": _header["from_hours"],
"till_date": _header["till_date"],
"till_hours": _header["till_hours"] }
elif "PROB" in _header["type"] and "TEMPO" in _header["type"]:
result += prob_tempo_str % { "probability": _header["probability"],
"from_date": _header["from_date"],
"from_hours": _header["from_hours"],
"till_date": _header["till_date"],
"till_hours": _header["till_hours"] }
elif _header["type"] == "TEMPO":
result += tempo_str % { "from_date": _header["from_date"],
"from_hours": _header["from_hours"],
"till_date": _header["till_date"],
"till_hours": _header["till_hours"] }
elif _header["type"] == "BECMG":
result += becmg_str % { "from_date": _header["from_date"],
"from_hours": _header["from_hours"],
"till_date": _header["till_date"],
"till_hours": _header["till_hours"] }
return(result)
def _decode_wind(self, wind):
unit = ""
result = ""
if wind["direction"] == "000":
return("calm")
elif wind["direction"] == "VRB":
result += "variable"
else:
result += "from %s degrees" % wind["direction"]
if wind["unit"] == "KT":
unit = "knots"
elif wind["unit"] == "MPS":
unit = "meters per second"
else:
# Unlikely, but who knows
unit = "(unknown unit)"
result += " at %s %s" % (wind["speed"], unit)
if wind["gust"]:
result += " gusting to %s %s" % (wind["gust"], unit)
return(result)
def _decode_visibility(self, visibility):
result = ""
if "more" in visibility:
if visibility["more"]:
result += "more than "
result += visibility["range"]
if visibility["unit"] == "SM":
result += " statute miles"
elif visibility["unit"] == "M":
result += " meters"
return(result)
def _decode_clouds(self, clouds):
result = ""
i_result = ""
list = []
for layer in clouds:
if layer["layer"] == "SKC" or layer["layer"] == "CLR":
return "sky clear"
if layer["layer"] == "NSC":
return "no significant cloud"
if layer["layer"] == "CAVOK":
return "ceiling and visibility are OK"
if layer["layer"] == "CAVU":
return "ceiling and visibility unrestricted"
if layer["layer"] == "VV///":
return "Sky Obscured"
if layer["layer"] == "SCT":
layer_type = "scattered"
elif layer["layer"] == "BKN":
layer_type = "broken"
elif layer["layer"] == "FEW":
layer_type = "few"
elif layer["layer"] == "OVC":
layer_type = "overcast"
if layer["type"] == "CB":
type = "cumulonimbus"
elif layer["type"] == "CU":
type = "cumulus"
elif layer["type"] == "TCU":
type = "towering cumulus"
elif layer["type"] == "CI":
type = "cirrus"
else:
type = ""
result = "%s %s clouds at %d feet" % (layer_type, type, int(layer["ceiling"])*100)
# Remove extra whitespace, if any
result = re.sub(r'\s+', ' ', result)
list.append(result)
layer = ""
type = ""
result = ""
result = ", ".join(list)
return(result)
def _decode_weather(self, weather):
# Dicts for translating the abbreviations
dict_intensities = {
"-" : "light",
"+" : "heavy",
"VC" : "in the vicinity",
"RE" : "recent"
}
dict_modifiers = {
"MI" : "shallow",
"BC" : "patchy",
"DR" : "low drifting",
"BL" : "blowing",
"SH" : "showers",
"TS" : "thunderstorms",
"FZ" : "freezing",
"PR" : "partial"
}
dict_phenomenons = {
"DZ" : "drizzle",
"RA" : "rain",
"SN" : "snow",
"SG" : "snow grains",
"IC" : "ice",
"PL" : "ice pellets",
"GR" : "hail",
"GS" : "small snow/hail pellets",
"UP" : "unknown precipitation",
"BR" : "mist",
"FG" : "fog",
"FU" : "smoke",
"DU" : "dust",
"SA" : "sand",
"HZ" : "haze",
"PY" : "spray",
"VA" : "volcanic ash",
"PO" : "dust/sand whirl",
"SQ" : "squall",
"FC" : "funnel cloud",
"SS" : "sand storm",
"DS" : "dust storm",
}
weather_txt_blocks = []
# Check for special cases first. If a certain combination is found
# then skip parsing the whole weather string and return a defined string
# immediately
for group in weather:
# +FC = Tornado or Waterspout
if "+" in group["intensity"] and "FC" in group["phenomenon"]:
weather_txt_blocks.append("tornado or waterspout")
continue
# Sort the elements of the weather string, if no special combi-
# nation is found.
intensities_pre = []
intensities_post = []
if "RE" in group["intensity"]:
intensities_pre.append("RE")
group["intensity"].remove("RE")
for intensity in group["intensity"]:
if intensity != "VC":
intensities_pre.append(intensity)
else:
intensities_post.append(intensity)
modifiers_pre = []
modifiers_post = []
for modifier in group["modifier"]:
if modifier != "TS" and modifier != "SH":
modifiers_pre.append(modifier)
else:
modifiers_post.append(modifier)
phenomenons_pre = []
phenomenons_post = []
for phenomenon in group["phenomenon"]:
if phenomenon != "UP":
phenomenons_pre.append(phenomenon)
else:
phenomenons_post.append(phenomenon)
# Build the human readable text from the single weather string
# and append it to a list containing all the interpreted text
# blocks from a TAF group
weather_txt = ""
for intensity in intensities_pre:
weather_txt += dict_intensities[intensity] + " "
for modifier in modifiers_pre:
weather_txt += dict_modifiers[modifier] + " "
phenomenons = phenomenons_pre + phenomenons_post
cnt = len(phenomenons)
for phenomenon in phenomenons:
weather_txt += dict_phenomenons[phenomenon]
if cnt > 2:
weather_txt += ", "
if cnt == 2:
weather_txt += " and "
cnt = cnt-1
weather_txt += " "
for modifier in modifiers_post:
weather_txt += dict_modifiers[modifier] + " "
for intensity in intensities_post:
weather_txt += dict_intensities[intensity] + " "
weather_txt_blocks.append(weather_txt.strip())
# Put all the human readable stuff together and return the final
# output as a string.
weather_txt_full = ""
for block in weather_txt_blocks[:-1]:
weather_txt_full += block + " / "
weather_txt_full += weather_txt_blocks[-1]
return(weather_txt_full)
def _decode_temperature(self, temperature, unit='C'):
if temperature["air_prefix"] == 'M':
air_c = int(temperature["air"])*-1
else:
air_c = int(temperature["air"])
if temperature["dewpoint_prefix"] == 'M':
dew_c = int(temperature["dewpoint"])*-1
else:
dew_c = int(temperature["dewpoint"])
if unit == 'C':
air_txt = air_c
dew_txt = dew_c
if unit == 'F':
air_f = int(round(air_c*1.8+32))
dew_f = int(round(dew_c*1.8+32))
air_txt = air_f
dew_txt = dew_f
result = "air at %s°%s, dewpoint at %s°%s" % (air_txt, unit, dew_txt, unit)
return(result)
def _decode_pressure(self, pressure):
result = "%s hPa" % (pressure["athm_pressure"])
return(result)
def _decode_windshear(self, windshear):
result = "at %s, wind %s at %s %s" % ((int(windshear["altitude"])*100), windshear["direction"], windshear["speed"], windshear["unit"])
return(result)
def _decode_maintenance(self, maintenance):
if maintenance:
return "Station is under maintenance check\n"
def _get_ordinal_suffix(self, date):
_date = str(date)
suffix = ""
if re.match(".*(1[12]|[04-9])$", _date):
suffix = "th"
elif re.match(".*1$", _date):
suffix = "st"
elif re.match(".*2$", _date):
suffix = "nd"
elif re.match(".*3$", _date):
suffix = "rd"
return(suffix)
|
# coding: utf-8
import logging
import argparse
import random
import torch
import torchtext
from torch.optim.lr_scheduler import StepLR
import seq2seq
from seq2seq.trainer import SupervisedTrainer
from seq2seq.models import EncoderRNN, DecoderRNN, Seq2seq
from seq2seq.loss import Perplexity
from seq2seq.optim import Optimizer
from seq2seq.dataset import SourceField, TargetField
from seq2seq.evaluator import Predictor
from seq2seq.util.checkpoint import Checkpoint
from nltk.tokenize.treebank import TreebankWordTokenizer
DATA_DIR = '/users4/ythou/Projects/TaskOrientedDialogue/code/TC-Bot/src/deep_dialog/data/'
def treebank_tokenizer(sentence, max_length=0):
"""
Tokenize and truncate sentence
:param sentence: str, a sentence string
:param max_length: int, max token included in the result, 0 for unlimited
:return: list, a list of token
"""
# split 's but also split <>, wait to use in further work
t = TreebankWordTokenizer()
word_lst = t.tokenize(sentence.lower().replace("$", "_B_"))
# word_lst = t.tokenize(sentence.lower().replace("<", "LAB_").replace(">", "_RAB"))
ret = []
for w in word_lst:
ret.append(w.replace("_B_", "$"))
# ret.append(w.replace("LAB_", "<").replace("_RAB", ">"))
if max_length > 0:
return ret[: max_length]
else:
return ret
def data_loader(target_file_path):
pass
def offline_training(opt, traget_file_path):
# Prepare dataset with torchtext
src = SourceField(tokenize=treebank_tokenizer)
tgt = TargetField(tokenize=treebank_tokenizer)
def sample_filter(sample):
""" sample example for future purpose"""
return True
train = torchtext.data.TabularDataset(
path=opt.train_path, format='tsv',
fields=[('src', src), ('tgt', tgt)],
filter_pred=sample_filter
)
dev = torchtext.data.TabularDataset(
path=opt.dev_path, format='tsv',
fields=[('src', src), ('tgt', tgt)],
filter_pred=sample_filter
)
test = torchtext.data.TabularDataset(
path=opt.dev_path, format='tsv',
fields=[('src', src), ('tgt', tgt)],
filter_pred=sample_filter
)
src.build_vocab(train, max_size=opt.src_vocab_size)
tgt.build_vocab(train, max_size=opt.tgt_vocab_size)
input_vocab = src.vocab
output_vocab = tgt.vocab
# NOTE: If the source field name and the target field name
# are different from 'src' and 'tgt' respectively, they have
# to be set explicitly before any training or inference
# seq2seq.src_field_name = 'src'
# seq2seq.tgt_field_name = 'tgt'
# Prepare loss
weight = torch.ones(len(tgt.vocab))
pad = tgt.vocab.stoi[tgt.pad_token]
if opt.loss == 'perplexity':
loss = Perplexity(weight, pad)
else:
raise TypeError
seq2seq = None
optimizer = None
if not opt.resume:
# Initialize model
encoder = EncoderRNN(
vocab_size=len(src.vocab),
max_len=opt.max_length,
hidden_size=opt.hidden_size,
input_dropout_p=opt.intput_dropout_p,
dropout_p=opt.dropout_p,
n_layers=opt.n_layers,
bidirectional=opt.bidirectional,
rnn_cell=opt.rnn_cell,
variable_lengths=True,
embedding=input_vocab.vectors if opt.use_pre_trained_embedding else None,
update_embedding=opt.update_embedding
)
decoder = DecoderRNN(
vocab_size=len(tgt.vocab),
max_len=opt.max_length,
hidden_size=opt.hidden_size * 2 if opt.bidirectional else opt.hidden_size,
sos_id=tgt.sos_id,
eos_id=tgt.eos_id,
n_layers=opt.n_layers,
rnn_cell=opt.rnn_cell,
bidirectional=opt.bidirectional,
input_dropout_p=opt.input_dropout_p,
dropout_p=opt.dropout_p,
use_attention=opt.use_attention
)
seq2seq = Seq2seq(encoder=encoder, decoder=decoder)
if opt.gpu >= 0 and torch.cuda.is_available():
seq2seq.cuda()
for param in seq2seq.parameters():
param.data.uniform_(-0.08, 0.08)
# train
trainer = SupervisedTrainer(
loss=loss,
batch_size=opt.batch_size,
checkpoint_every=opt.checkpoint_every,
print_every=opt.print_every,
expt_dir=opt.expt_dir
)
seq2seq = trainer.train(
model=seq2seq,
data=train,
num_epochs=opt.epochs,
resume=opt.resume,
dev_data=dev,
optimizer=optimizer,
teacher_forcing_ratio=opt.teacher_forcing_rate
)
def online_training():
pass
def test(opt, test_path):
if opt.load_checkpoint is not None:
# load model
logging.info(
"loading check point from {}".format(
os.path.join(opt.expt_dir, Checkpoint.CHECKPOINT_DIR_NAME, opt.load_checkpoint)
)
)
checkpoint_path = os.path.join(opt.expt_dir, Checkpoint.CHECKPOINT_DIR_NAME, opt.load_checkpoint)
checkpoint = Checkpoint.load(checkpoint_path)
seq2seq = checkpoint.model
input_vocab = checkpoint.input_vocab
output_vocab = checkpoint.output_vocab
# Prepare predictor
predictor = Predictor(seq2seq, input_vocab, output_vocab)
with open(test_path, 'r') as reader, open(test_path + '_pred', 'w') as writer:
for line in reader:
source = treebank_tokenizer(line.split("\t")[0])
writer.write(generate(source, predictor) + '\n')
def generate(input_seq, predictor):
return predictor.predict(input_seq)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# file path
parser.add_argument("--train_path", type=str, default=DATA_DIR + "nlg/train.txt", help="file path for train path")
parser.add_argument("--dev_path", type=str, default=DATA_DIR + "nlg/dev.txt", help="file path for dev path")
parser.add_argument("--test_path", type=str, default=DATA_DIR + "nlg/test.txt", help="file path for test path")
parser.add_argument('--expt_dir', action='store', dest='expt_dir', default='./experiment',
help='Path to experiment directory. If load_checkpoint, then this directory be provided')
parser.add_argument('--raw_path', type=str, default=DATA_DIR + "dia_act_nl_pairs.v6.json",
help="file path for raw data file")
parser.add_argument('--pre_trained_embedding_path', type=str, help='path to a pre_trained embedding file')
# train style setting
parser.add_argument('--load_checkpoint', action='store', dest='load_checkpoint',
help='The name of the checkpoint to load, usually an encoded time string')
parser.add_argument('--source_format', type=str, default='seq_action', help='select data\'s source end format',
choices=['seq_action'])
parser.add_argument('--checkpoint_every', type=int, default=50, help='number of batches to checkpoint after')
parser.add_argument('--print_every', type=int, default=10, help='number of batches to print after')
parser.add_argument('--resume', action='store_true', help='use the model loaded from the latest checkpoint')
# tuning setting
parser.add_argument('--random_seed', type=int, default=0, help='set random seed for experiment')
parser.add_argument('--max_length', type=int, default=50, help='max sentence length for encoder & decoder')
parser.add_argument('--tgt_vocab_size', type=int, default=50000,
help='max source vocab size for encoder(the actual size will be no more than this)')
parser.add_argument('--src_vocab_size', type=int, default=50000,
help='max target vocab size for decoder(the actual size will be no more than this)')
parser.add_argument('--hidden_size', type=int, default=128, help='hidden size for RNN')
parser.add_argument('--loss', type=str, default='perplexity', choices=['perplexity'], help='set loss type')
parser.add_argument('--train_epoch', type=int, default=15, help='set training epoch')
parser.add_argument('--optimizer', type=str, default='adam', choices=['adam'], help='set optimizer(do nothing now)')
parser.add_argument('--bidirectional', action='store_true', help='set rnn encoder type of direction')
parser.add_argument('--input_dropout_p', type=float, default=0.0, help='dropout probability for the input sequence')
parser.add_argument('--dropout_p', type=float, default=0.2, help='dropout probability for the rnn cell')
parser.add_argument('--n_layers', type=int, default=2, help='the layer num of rnn cell')
parser.add_argument('--rnn_cell', type=str, default='lstm', choices=['gru', 'lstm'], help='set rnn type')
parser.add_argument('--use_pre_trained_embedding', action=True, help='use pre-trained embedding to init encoder')
parser.add_argument('--update_embedding', action='store_true', help='to update embedding during training')
parser.add_argument('--use_attention', action='store_true', help='use attention during decoding')
parser.add_argument('--batch_size', type=int, default=32, help='batch size for training')
parser.add_argument('--teacher_forcing_rate', type=float, default=0.5, help='set rate for teacher forcing')
# gpu setting
parser.add_argument('--gpu_id', type=int, default=-1, help='set gpu id, -1 for not use gpu')
opt = parser.parse_args()
LOG_FORMAT = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'
logging.basicConfig(format=LOG_FORMAT, level=getattr(logging, opt.log_level.upper()))
logging.info(opt)
# set random seed
random.seed(opt.random_seed)
torch.manual_seed(opt.random_seed)
# set gpu
if opt.gpu >= 0 and torch.cuda.is_available():
torch.cuda.set_device(opt.gpu)
|
<gh_stars>0
{'type':'CustomDialog',
'name':'prefsDialog',
'title':'standaloneBuilder Preferences',
'position':(133, 73),
'size':(705, 395),
'components': [
{'type':'TextField',
'name':'resEditPath',
'position':(205, 30),
'size':(405, -1),
'actionBindings':{},
'userdata':'Select the location on your computer where the PythonCard resource editor is installed.',
},
{'type':'Button',
'name':'resEditPathBtn',
'position':(620, 25),
'size':(30, 30),
'actionBindings':{},
'label':'...',
},
{'type':'Button',
'name':'resEditPathHelpBtn',
'position':(650, 25),
'size':(30, 30),
'actionBindings':{},
'label':'?',
},
{'type':'TextField',
'name':'srcEditPath',
'position':(205, 60),
'size':(405, -1),
'actionBindings':{},
'userdata':'Select the location on your computer where the PythonCard source code editor is installed.\n',
},
{'type':'Button',
'name':'srcEditPathBtn',
'position':(620, 55),
'size':(30, 30),
'actionBindings':{},
'label':'...',
},
{'type':'Button',
'name':'srcEditPathHelpBtn',
'position':(650, 55),
'size':(30, 30),
'actionBindings':{},
'label':'?',
},
{'type':'TextField',
'name':'txtEditPath',
'position':(205, 90),
'size':(405, -1),
'actionBindings':{},
'userdata':'Select the location on your computer where the PythonCard source code editor is installed.\n',
},
{'type':'Button',
'name':'txtEditPathBtn',
'position':(620, 85),
'size':(30, 30),
'actionBindings':{},
'label':'...',
},
{'type':'Button',
'name':'txtEditPathHelpBtn',
'position':(650, 85),
'size':(30, 30),
'actionBindings':{},
'label':'?',
},
{'type':'TextField',
'name':'pixmapEditPath',
'position':(205, 120),
'size':(405, -1),
'actionBindings':{},
'userdata':'Select the location on your computer where your preferred pixmap editor is installed.\n',
},
{'type':'Button',
'name':'pixmapEditPathBtn',
'position':(620, 115),
'size':(30, 30),
'actionBindings':{},
'label':'...',
},
{'type':'Button',
'name':'pixmapEditPathHelpBtn',
'position':(650, 115),
'size':(30, 30),
'actionBindings':{},
'label':'?',
},
{'type':'TextField',
'name':'compilerPath',
'position':(205, 150),
'size':(405, -1),
'actionBindings':{},
'userdata':'Select the location on your computer where the Inno setup compiler is installed.',
},
{'type':'Button',
'name':'compilerPathBtn',
'position':(620, 145),
'size':(30, 30),
'actionBindings':{},
'label':'...',
},
{'type':'Button',
'name':'compilerPathHelpBtn',
'position':(650, 145),
'size':(30, 30),
'actionBindings':{},
'label':'?',
},
{'type':'TextField',
'name':'projectsPath',
'position':(205, 180),
'size':(405, -1),
'actionBindings':{},
'userdata':'Select the location on your computer where you normally keep your PythonCard projects.',
},
{'type':'Button',
'name':'projectsPathBtn',
'position':(620, 175),
'size':(30, 30),
'actionBindings':{},
'label':'...',
},
{'type':'Button',
'name':'projectsPathHelpBtn',
'position':(650, 175),
'size':(30, 30),
'actionBindings':{},
'label':'?',
},
{'type':'Choice',
'name':'buildTool',
'position':(205, 245),
'size':(130, -1),
'actionBindings':{},
'items':[u'py2exe', u'pyInstaller'],
'stringSelection':'pyInstaller',
'userdata':'Select the tool which you would prefer to use when building the executables for your projects.',
},
{'type':'Button',
'name':'buildToolHelpBtn',
'position':(650, 245),
'size':(30, 30),
'actionBindings':{},
'label':'?',
},
{'type':'TextField',
'name':'installerPath',
'position':(205, 280),
'size':(405, -1),
'actionBindings':{},
'userdata':'Select the location on your computer where the pyInstaller software is installed. Note that standaloneBuilder assumes that you have already configured this according to the documentation that comes with it.\n',
},
{'type':'Button',
'name':'installerPathBtn',
'position':(620, 275),
'size':(30, 30),
'actionBindings':{},
'label':'...',
},
{'type':'Button',
'name':'installerPathHelpBtn',
'position':(650, 275),
'size':(30, 30),
'actionBindings':{},
'label':'?',
},
{'type':'TextField',
'name':'appPublisher',
'position':(205, 310),
'size':(405, -1),
'actionBindings':{},
'userdata':"Defines the name which will be used to add an 'AppPublisher' entry to your Inno script file. This name will appear when someone does a right click on your standalone executable and selects 'properties'.",
},
{'type':'Button',
'name':'appPublisherHelpBtn',
'position':(650, 305),
'size':(30, 30),
'actionBindings':{},
'label':'?',
},
{'type':'Button',
'id':5100,
'name':'btnOK',
'position':(520, 355),
'actionBindings':{},
'label':'OK',
},
{'type':'Button',
'id':5101,
'name':'btnCancel',
'position':(610, 355),
'actionBindings':{},
'label':'Cancel',
},
{'type':'StaticText',
'name':'StaticText2',
'position':(80, 185),
'actionBindings':{},
'text':'Projects directory:',
},
{'type':'StaticText',
'name':'StaticText7',
'position':(70, 155),
'actionBindings':{},
'text':'Inno setup compiler:',
},
{'type':'StaticText',
'name':'StaticText5',
'position':(45, 125),
'actionBindings':{},
'text':'Preferred pixmap editor:',
},
{'type':'StaticText',
'name':'StaticText9',
'position':(90, 95),
'actionBindings':{},
'text':'Plain text editor:',
},
{'type':'StaticText',
'name':'StaticText4',
'position':(45, 65),
'actionBindings':{},
'text':'PythonCard code editor:',
},
{'type':'StaticText',
'name':'StaticText3',
'position':(20, 35),
'actionBindings':{},
'text':'PythonCard resource editor:',
},
{'type':'StaticBox',
'name':'StaticBox1',
'position':(10, 5),
'size':(685, 215),
'actionBindings':{},
'label':'Paths to external files',
},
{'type':'StaticText',
'name':'StaticText6',
'position':(90, 315),
'actionBindings':{},
'text':'Publisher name:',
},
{'type':'StaticText',
'name':'StaticText1',
'position':(75, 285),
'actionBindings':{},
'text':'Path to pyInstaller:',
},
{'type':'StaticText',
'name':'StaticText8',
'position':(70, 255),
'actionBindings':{},
'text':'Preferred build tool:',
},
{'type':'StaticBox',
'name':'StaticBox2',
'position':(10, 225),
'size':(685, 125),
'actionBindings':{},
'label':'Other settings',
},
] # end components
} # end CustomDialog
|
'''
@author: <NAME> (jakpra)
@copyright: Copyright 2020, <NAME>
@license: Apache 2.0
'''
import sys
import math
from operator import itemgetter
from collections import OrderedDict, Counter
import time
import random
import torch
import torch.nn.functional as F
import torch.optim as optim
from .oracle.oracle import make_unordered_valid_loss
# import ExAssist as EA
UNK = '<UNKNOWN>'
PAD = '<PADDING>'
START = '<START>'
END = '<END>'
SEP = '<SEP>'
def create_emb_layer(weights_matrix, trainable=True):
num_embeddings, embedding_dim = weights_matrix.size()
emb_layer = torch.nn.Embedding(num_embeddings, embedding_dim)
emb_layer.load_state_dict({'weight': weights_matrix})
if not trainable:
emb_layer.weight.requires_grad = False
return emb_layer, num_embeddings, embedding_dim
OPTIM = {'sgd': lambda params, **kwargs: optim.SGD(params, lr=kwargs['lr'], momentum=kwargs['momentum']),
'adam': lambda params, **kwargs: optim.Adam(params, lr=kwargs['lr'], eps=kwargs['eps'],
weight_decay=kwargs['weight_decay']),
'adamw': lambda params, **kwargs: optim.AdamW(params, lr=kwargs['lr'], eps=kwargs['eps'],
weight_decay=kwargs['weight_decay']),
'adagrad': optim.Adagrad}
def load_model_states(state_dict):
# state_dict = torch.load(filename)
# create new OrderedDict that does not contain `module.`
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if 'O_T' in k: # otherwise the shared trained parameters will be overwritten with untrained ones?
continue
if k.startswith('module.'):
new_state_dict[k[7:]] = v
else:
new_state_dict[k] = v
# checkpoint = torch.load(args.model, map_location=device)
# net.load_state_dict(new_state_dict)
return new_state_dict
def compute_acc_and_loss(task, gen, y, y_hat, address_mask, word_mask, criterion, mapped_criterion,
batch_size, seq_len, address_dim, output_dim, loss_fxn=False, oracle=None,
omega_native_atom=0., omega_atom=0., omega_full=0., lambda_enc=0.1, lambda_dec=0.,
enc_attn=None, dec_attn=None, deps=None, dep_attn_criterion=None,
dep_norm=lambda t: F.normalize(t, p=1, dim=1),
output_correct_bool=False):
y = y.to(y_hat).long()
# print('y', y.size(), y[0, 3], [(i+1, gen.ix_to_out[j.item()]) for i, j in enumerate(y[0, 3])], file=sys.stderr)
word_mask = word_mask.to(address_mask)
words = word_mask.float().sum()
y_hat = y_hat.view(batch_size, -1, address_dim, output_dim)
address_mask = address_mask.view(batch_size, -1, address_dim)
y_hat_len = y_hat.size(1)
if y_hat_len < seq_len:
y_hat = torch.cat([y_hat, torch.zeros(batch_size, seq_len - y_hat_len, address_dim, output_dim).to(y_hat)], dim=1)
address_mask = torch.cat([address_mask, torch.zeros(batch_size, seq_len - y_hat_len, address_dim).to(address_mask)], dim=1)
elif y_hat_len > seq_len:
y_hat = y_hat[:, :seq_len]
address_mask = address_mask[:, :seq_len]
argmaxes = torch.argmax(y_hat, dim=3)
categories_gold = gen.extract_outputs(y.view(batch_size, seq_len, address_dim))
categories_hat = gen.extract_outputs(argmaxes)
if loss_fxn in ('avg', 'all'):
# print('compute dynamic loss')
loss = criterion[0](y_hat, categories_hat if oracle is None else gen.extract_outputs(oracle), categories_gold) / words
else:
# TODO: change SEP to PAD in y, so that loss ignores it
y_hat = y_hat.reshape(-1, output_dim)
address_mask = address_mask.reshape(batch_size, -1, address_dim)
y = y.view(-1)
# native loss
# y = y.view(-1)
# y_hat = y_hat.transpose(1, -1).reshape(batch_size, output_dim, -1)
# sum everything, then normalize over batch and sequence, but not over addresses
# print(criterion)
# print(y_hat.shape, y.shape)
native_loss = criterion[0](y_hat, y) / words # (batch_size * words) # y.view(-1)
# average over everything (incl addresses)
native_atomic_loss = criterion[1](y_hat, y) # y.view(-1)
# category-level loss
# category_loss = atomic_loss / address_dim
# TODO: check which one of these is really correct
# # category-level loss
# y = y.view(-1, address_dim)
# # y_hat = y_hat.view(-1, output_dim, address_dim)
# y_hat = y_hat.view(-1, address_dim, output_dim).transpose(1, 2)
# category_loss = criterion(y_hat, y) / (batch_size * seq_len)
if hasattr(gen, 'address_map'):
address_mask = address_mask.view(batch_size, -1, address_dim)
mask = (~word_mask).unsqueeze(-1).expand(-1, -1, address_dim) | (~address_mask)
argmaxes[mask] = gen.out_to_ix[PAD]
atomic_output_dim = gen.address_map.output_dim
atomic_address_dim = gen.address_map.address_dim
mapped_y = gen.address_map(y.view(-1, address_dim), indices=True, argmax=True)
# print('mapped_y', mapped_y.size(), mapped_y.view(batch_size, seq_len, atomic_address_dim)[0, :, :6], file=sys.stderr)
# exit(0)
# print('mapped_y', mapped_y.size(), mapped_y[3, :6], [(i+1, gen.address_map.ix_to_out[j.item()]) for i, j in enumerate(mapped_y[3])], file=sys.stderr)
# print('y_hat', y_hat.size(), y_hat[0, 3, :6], file=sys.stderr)
mapped_y_hat = gen.address_map(y_hat.view(-1, address_dim, output_dim), norm=True)
# print('mapped_y_hat', mapped_y_hat.size(), mapped_y_hat[3, :6], file=sys.stderr)
if loss_fxn not in ('avg', 'all'):
full_loss = mapped_criterion[0](mapped_y_hat.view(-1, atomic_output_dim), mapped_y.view(-1)) / words
atomic_loss = mapped_criterion[1](mapped_y_hat.view(-1, atomic_output_dim), mapped_y.view(-1))
# full_loss = criterion(mapped_y_hat.view(-1, atomic_address_dim, atomic_output_dim).transpose(1, 2), mapped_y.view(-1, atomic_address_dim)) / (batch_size * seq_len * atomic_address_dim)
# mask = mask.view(-1, atomic_address_dim)
# print('mask', mask.size(), file=sys.stderr)
# print('argmaxes', argmaxes.size(), argmaxes[0, 3, :6], file=sys.stderr)
mapped_argmaxes = gen.address_map(argmaxes.view(-1, address_dim), indices=True, argmax=True).view(batch_size, -1,
atomic_address_dim)
# print('mapped_argmaxes', mapped_argmaxes.size(), mapped_argmaxes[0, :, :6], file=sys.stderr)
correct_bool = torch.all(torch.eq(mapped_argmaxes, mapped_y.view(batch_size, -1, atomic_address_dim)), dim=2)
else:
full_loss = atomic_loss = 0.
argmaxes = argmaxes.view(batch_size, -1)
address_mask = address_mask.view(batch_size, -1)
argmaxes[~address_mask] = gen.out_to_ix[PAD]
y_hat_seps = (argmaxes == gen.out_to_ix[SEP]).nonzero() # indices of separators in pred: [[b0, s0], [b1, s1], ...]
y = y.view(batch_size, -1)
y_seps = (y == gen.out_to_ix[SEP]).nonzero() # indices of separators in gold
max_words = word_mask.size(1)
correct_bool = torch.zeros(batch_size, max_words, dtype=torch.bool).to(word_mask)
# correct_bool = torch.eq(argmaxes, y.view(batch_size, -1, address_dim))
last_batch = 0
last_y_hat_sep = 0
last_y_sep = 0
i = 0
y_hat_seps = iter(y_hat_seps)
try:
for yb, ys in y_seps:
yb, ys = yb.item(), ys.item()
if yb != last_batch:
last_y_sep = 0
i = 0
if i >= max_words:
continue
try:
yhb, yhs = next(y_hat_seps)
yhb, yhs = yhb.item(), yhs.item()
while yhb != yb:
yhb, yhs = next(y_hat_seps)
yhb, yhs = yhb.item(), yhs.item()
except StopIteration:
correct_bool[yb, i] = False
else:
correct_bool[yb, i] = yhs-last_y_hat_sep == ys-last_y_sep and torch.all(torch.eq(argmaxes[yhb, last_y_hat_sep:yhs], y[yb, last_y_sep:ys]))
last_y_hat_sep = yhs
last_batch, last_y_sep, i = yb, ys, i+1
except ValueError as e:
raise ValueError(*e.args, y_hat_seps, y_seps)
except IndexError as e:
raise IndexError(*e.args, f'yb={yb}, last_batch={last_batch}, ys={ys}, last_y_sep={last_y_sep}, i={i}')
category_acc = (correct_bool & word_mask).float().sum() / words
has_enc_attn = enc_attn is not None
has_dec_attn = dec_attn is not None
if loss_fxn not in ('avg', 'all'):
loss = (1. - omega_native_atom - omega_atom - omega_full) * native_loss + \
omega_native_atom * native_atomic_loss + \
omega_atom * atomic_loss + \
omega_full * full_loss
lbda = 1. - int(has_enc_attn) * lambda_enc - int(has_dec_attn) * lambda_dec
loss = loss.clone() * lbda
if deps is None:
# loss += torch.sum(torch.abs(dec_attn)) / (batch_size * seq_len * address_dim)
pass
else:
if has_dec_attn:
dec_deps = torch.diagflat(torch.ones(seq_len * address_dim, dtype=torch.float32)
).view(seq_len, address_dim, seq_len, address_dim)
dec_deps = dec_deps.unsqueeze(0).repeat(batch_size, 1, 1, 1, 1)
if has_enc_attn:
enc_deps = torch.diagflat(torch.ones(seq_len, dtype=torch.float32))
enc_deps = enc_deps.unsqueeze(0).unsqueeze(2).repeat(batch_size, 1, address_dim, 1)
for n, seq in enumerate(deps):
for i, args in enumerate(seq):
if not word_mask[n, i].item():
break
for a, j, b in args:
if not address_mask[n, i, a].item():
continue
d_a = math.floor(math.log2(a+1))
p_a = (a+1) // 2 - 1
if has_enc_attn:
enc_deps[n, i, a, i] = 0.
enc_deps[n, i, a, j] += 1.
# parent slash
enc_deps[n, i, p_a, i] = 0.
enc_deps[n, i, p_a, j] += 1.
# children and descendents
for log_breadth, depth in enumerate(range(d_a+1, gen.max_depth), start=1):
first_addr = 2 ** depth - 1
any_at_depth = False
for c_a in range(first_addr, first_addr+2**log_breadth):
if address_mask[n, i, c_a].item():
any_at_depth = True
enc_deps[n, i, c_a, i] = 0.
enc_deps[n, i, c_a, j] += 1.
if not any_at_depth:
break
# TODO: not sure about this one
# enc_deps[j, n, j, b] = 0.
# enc_deps[i, n, j, b] += 1.
if has_dec_attn:
d_b = math.floor(math.log2(b+1))
if d_b < d_a:
# head's attn to deps (note that key of attn has to be in first dim for KLLoss)
# (key_token, key_addr, batch, query_token, query_addr)
dec_deps[n, i, a, i, a] = 0.
dec_deps[n, i, a, j, b] = 1.
elif d_a < d_b:
# dep's attn to heads (note that key of attn has to be in first dim for KLLoss)
# (key_token, key_addr, batch, query_token, query_addr)
dec_deps[n, j, b, j, b] = 0.
dec_deps[n, j, b, i, a] = 1.
if has_dec_attn:
dec_deps = dec_deps.view(-1, seq_len*address_dim).to(dec_attn)
# total_batch_size, self.address_dim, seq_len, self.address_dim
dec_attn = dec_attn.view(-1, seq_len*address_dim) # .permute(2, 3, 0, 1).reshape
dec_attn_loss = dep_attn_criterion(F.log_softmax(dec_attn, dim=1), dep_norm(dec_deps))
loss += lambda_dec * dec_attn_loss
del dec_attn, dec_deps
if has_enc_attn:
enc_deps = enc_deps.view(-1, seq_len).to(enc_attn)
# total_batch_size, self.address_dim, seq_len
enc_attn = enc_attn.view(-1, seq_len) # .permute(2, 0, 1).reshape
enc_attn_loss = dep_attn_criterion(F.log_softmax(enc_attn, dim=1), dep_norm(enc_deps))
loss += lambda_enc * enc_attn_loss
del enc_attn, enc_deps
result = category_acc, loss
if output_correct_bool:
result = (*result, (argmaxes, correct_bool, categories_hat, categories_gold))
del word_mask, address_mask, y, y_hat
return result
# TODO: append accs and losses to a file so they don't get overwritten by subsequent runs
def train(net, trainloader, optimizer, devloader=None,
criterion=torch.nn.CrossEntropyLoss, dep_attn_criterion=torch.nn.KLDivLoss,
# optimizer_name='sgd', learning_rate=0.001, momentum=0.9,
epochs=1, start_epoch=0, dev_acc=0.0, dev_loss=None, seed=42,
loss_fxn='crossent',
omega_native_atom=0., omega_atom=0., omega_full=0., lambda_enc=0.1, lambda_dec=0.,
batch_size=4, max_batches=None, n_print=100, model='ccg-glove', device='cpu', device_ids=[0]):
# , device='cpu', device_ids=[0]):
random.seed(seed)
# device = device # torch.device(f'cuda:{cuda_device}' if cuda.is_available() else 'cpu')
#
# _optimizer = optimizer if optimizer is not None \
# else OPTIM.get(optimizer_name, optim.SGD)(net.parameters(), lr=learning_rate, momentum=momentum)
torch.autograd.set_detect_anomaly(True)
# atomic_criterion = criterion(ignore_index=net.out_to_ix[PAD], reduction='sum')
# category_criterion = criterion(ignore_index=net.out_to_ix[PAD], reduction='sum')
criteria = []
mapped_criteria = []
dep_attn_criteria = []
for gen in net.generators:
# weight = torch.ones(gen.output_dim, dtype=torch.float32)
# weight.index_fill_(0, gen.grow_label_ix, 2.)
if loss_fxn in ('avg', 'all'):
# print('instantiate dynamic loss')
criteria.append((make_unordered_valid_loss(gen.out_to_ix, fxn=loss_fxn), None))
else:
criteria.append((criterion(ignore_index=gen.out_to_ix[PAD], reduction='sum'),
criterion(ignore_index=gen.out_to_ix[PAD], reduction='mean')))
# crt = lambda x, y: torch.nn.KLDivLoss(reduction='batchmean')(F.log_softmax(x, dim=1), y)
# criteria.append((FuzzyLoss(crt, gen.output_dim+1, 0.2, gen.out_to_ix[PAD]),
# FuzzyLoss(crt, gen.output_dim+1, 0.2, gen.out_to_ix[PAD])))
# TODO: maybe indent this under the `else`?
if hasattr(gen, 'address_map'):
mapped_criteria.append((criterion(ignore_index=gen.address_map.out_to_ix[PAD], reduction='sum'),
criterion(ignore_index=gen.address_map.out_to_ix[PAD], reduction='mean')))
if getattr(gen, 'attention', False):
dep_attn_criteria.append(dep_attn_criterion(reduction='batchmean'))
else:
dep_attn_criteria.append(None)
best_dev_loss = dev_loss
best_dev_acc = dev_acc
best_epoch = start_epoch
train_len = len(trainloader)
if max_batches is not None:
train_len = min(max_batches, train_len)
if devloader is not None:
dev_len = len(devloader)
_optimizer = optimizer['optimizer']
optimizer_name = optimizer['optimizer_name']
optimizer_kwargs = optimizer['optimizer_kwargs']
if optimizer_kwargs['use_schedule']:
steps_per_epoch = train_len // batch_size + int(train_len % batch_size != 0)
scheduler = optim.lr_scheduler.OneCycleLR(_optimizer,
optimizer_kwargs['lr'],
epochs=epochs,
steps_per_epoch=steps_per_epoch,
pct_start=optimizer_kwargs['pct_start'],
anneal_strategy=optimizer_kwargs['anneal_strategy'],
last_epoch=start_epoch * steps_per_epoch - 1)
# with EA.start(_assist) as assist:
# try:
epoch_running_loss = 0.0
epoch_running_acc = 0.0
# assist.info['epoch'] = 0
# assist.info['train_loss'] = 0
# assist.info['train_acc'] = 0
# assist.info['dev_loss'] = 0
# assist.info['dev_acc'] = 0
# assist.info['batch'] = 0
# assist.info['batch_loss'] = 0
# assist.info['batch_acc'] = 0
# assist.info['ex_per_s'] = 0
# assist.step()
for epoch in range(start_epoch, start_epoch+epochs): # loop over the dataset multiple times
if True: # epoch > 0:
net.eval()
if devloader is None:
dev_acc = dev_loss = 0.0
else:
with torch.no_grad():
running_dev_acc = 0.0
running_dev_loss = 0.0
running_batch_time = 0.0
n_words = 0
gold_categories = Counter()
generated_categories = Counter()
correct_categories = Counter()
for data in devloader:
x, ys = data['x'], data['y']
word_mask = (x['word'] != net.span_encoder.pad_token_id)
start_time = time.time()
result = net(x, word_mask=word_mask)
running_batch_time += time.time() - start_time
# y_hats, masks = net(x)
y_hat, address_mask = result['y_hat'], result['mask']
task, gen, y, criterion = net.tasks[0], \
net.generators[0], \
ys[0], \
criteria[0]
mapped_criterion = mapped_criteria[0] if hasattr(gen, 'address_map') else None
seq_len = y.size(1)
output_dim = gen.output_dim
address_dim = gen.address_dim
# y_hat = y_hat.view(batch_size, -1, address_dim, output_dim)
# argmaxes = torch.argmax(y_hat, dim=3)
acc, loss, (argmaxes, correct_bool, categories_hat, categories_gold) = \
compute_acc_and_loss(task, gen, y, y_hat,
address_mask, word_mask,
criterion, mapped_criterion,
batch_size, seq_len, address_dim,
output_dim,
loss_fxn=loss_fxn,
omega_native_atom=omega_native_atom,
omega_atom=omega_atom, omega_full=omega_full,
lambda_enc=lambda_enc, lambda_dec=lambda_dec,
output_correct_bool=True)
# print(argmaxes)
running_dev_acc += acc.item()
running_dev_loss += loss.item()
correct_indices = correct_bool.nonzero().tolist()
# print('gold', end=' ')
# print(y.size(), y[0, 0].tolist())
# categories_gold = gen.extract_outputs(y.view(batch_size, -1, address_dim))
# print(categories_gold)
# print('pred', end=' ')
# print(argmaxes.size(), argmaxes[0, 0].tolist())
# categories_hat = gen.extract_outputs(argmaxes)
# print(len(categories_gold), len(categories_hat))
for b, (sequence, sequence_gold) in enumerate(zip(categories_hat, categories_gold)):
# print('gold', sequence_gold, file=sys.stderr)
# print('pred', sequence, file=sys.stderr)
for s, cat_gold in enumerate(sequence_gold):
cat = sequence[s] if s < len(sequence) else None
n_words += 1
correct_index = [b, s] in correct_indices
# assert (cat == cat_gold) == (correct_index), (
# b, s, cat, cat_gold, argmaxes[b, s], mask[b, s], y[b, s],
# f'[{b}, {s}] {"not " if not correct_index else ""}in correct_indices')
if cat is None:
cat = 'None'
else:
msg = cat.validate()
if msg != 0:
if hasattr(gen, 'max_depth') and cat.depth() >= gen.max_depth:
msg = 'Max depth reached'
# print(b, s, msg, str(cat), cat.s_expr())
cat = msg
elif hasattr(gen, 'max_len') and argmaxes.size(1) >= gen.max_len:
msg = 'Max length reached'
# print(b, s, msg, str(cat), cat.s_expr())
cat = msg
else:
# print(b, s, msg[0], str(cat), cat.s_expr(), file=sys.stderr)
# print(argmaxes[b, s], file=sys.stderr)
cat = msg[0]
gold_categories[str(cat_gold)] += 1
generated_categories[str(cat)] += 1
if correct_index:
correct_categories[str(cat)] += 1
for k in x:
if isinstance(x[k], torch.Tensor):
x[k] = x[k].cpu()
for i in range(len(ys)):
_y = ys[i]
if isinstance(_y, torch.Tensor):
ys[i] = _y.cpu()
del x, word_mask, ys, y_hat, address_mask, argmaxes, correct_bool, acc, loss
torch.cuda.empty_cache()
dev_acc = running_dev_acc / dev_len
dev_loss = running_dev_loss / dev_len
epoch_loss = epoch_running_loss / train_len
epoch_acc = epoch_running_acc / train_len
# assist.info['epoch'] = epoch
# assist.info['train_loss'] = epoch_loss
# assist.info['train_acc'] = epoch_acc
# assist.info['dev_loss'] = dev_loss
# assist.info['dev_acc'] = dev_acc
# assist.step()
print('[epoch %d summary] train loss: %.3f | train acc: %.3f | dev loss: %.3f | dev acc: %.3f | %.3f batches/s | %.3f expls/s' %
(epoch,
epoch_loss,
epoch_acc,
dev_loss,
dev_acc,
dev_len / running_batch_time,
(dev_len * batch_size) / running_batch_time
),
file=sys.stderr)
if devloader is not None:
print(f'most common gold categories (out of {n_words} in dev): '
f'{" | ".join(str(item) for item in gold_categories.most_common(10))}',
file=sys.stderr)
print(f'most common generated categories (out of {n_words} in dev): '
f'{" | ".join(str(item) for item in generated_categories.most_common(10))}',
file=sys.stderr)
print(f'most common correct categories (out of {n_words} in dev): '
f'{" | ".join(str(item) for item in correct_categories.most_common(10))}',
file=sys.stderr)
sys.stderr.flush()
if devloader is None \
or dev_acc > best_dev_acc \
or dev_acc == best_dev_acc and (best_dev_loss is None or dev_loss < best_dev_loss):
best_dev_acc = dev_acc
best_dev_loss = dev_loss
best_epoch = epoch
torch.save({'model': net.to_dict(),
'model_state_dict': net.state_dict(),
'optimizer_state_dict': _optimizer.state_dict(),
'optimizer_name': optimizer_name,
'optim_kwargs': optimizer_kwargs,
'epoch': best_epoch,
'dev_acc': best_dev_acc,
'dev_loss': best_dev_loss}, model)
net.train()
epoch_running_loss = 0.0
epoch_running_acc = 0.0
# running_atom_loss = 0.0
# running_cat_loss = 0.0
running_loss = 0.0
running_acc = 0.0
running_batch_time = 0.0
# batch_indices = random.sample(range(train_len), train_len)
random.shuffle(trainloader)
for train_i, data in enumerate(trainloader):
start_time = time.time()
if max_batches is not None and train_i > max_batches:
break
# get the inputs; data is a list of [inputs, labels]
# *x, y = data
# x, y = data # supertagger
# print('x', x, file=sys.stderr)
x, ys = data['x'], data['y']
# seq_len = x['word'].size(1)
word_mask = (x['word'] != net.span_encoder.pad_token_id)
task, gen, y, criterion, dep_attn_criterion = net.tasks[0], \
net.generators[0], \
ys[0], \
criteria[0], \
dep_attn_criteria[0]
mapped_criterion = mapped_criteria[0] if hasattr(gen, 'address_map') else None
deps = data.get('dependencies', [None])[0]
seq_len = y.size(1)
result = net(x, ys=ys, word_mask=word_mask)
y_hat, address_mask = result['y_hat'], result['mask']
# y_hats, masks = y_hats.transpose(0, 1), masks.transpose(0, 1)
# mtl_loss = 0.
# zero the parameter gradients
_optimizer.zero_grad()
# print('y', y.size(), file=sys.stderr)
# print('y_hat', y_hat.size(), file=sys.stderr)
output_dim = gen.output_dim
address_dim = gen.address_dim
# y_hat = y_hat.view(batch_size, -1, address_dim, output_dim)
# seq_len = y_hat.size(1)
# with torch.no_grad():
# argmaxes = torch.argmax(y_hat, dim=3)
acc, loss = compute_acc_and_loss(task, gen, y, y_hat,
address_mask, word_mask,
criterion, mapped_criterion,
batch_size, seq_len, address_dim,
output_dim,
loss_fxn=loss_fxn, oracle=result.get('y'),
omega_native_atom=omega_native_atom,
omega_atom=omega_atom, omega_full=omega_full,
lambda_enc=lambda_enc, lambda_dec=lambda_dec,
enc_attn=result.get('enc_attn'),
dec_attn=result.get('dec_attn'),
deps=deps,
dep_attn_criterion=dep_attn_criterion)
# with torch.no_grad():
running_acc += acc.item()
running_loss += loss.item()
epoch_running_loss += loss.item()
epoch_running_acc += acc.item()
# categories_hat = gen.extract_outputs(argmaxes)
# val = [(cat.validate(), cat) for s in categories_hat for cat in s]
# assert all([not v[0] for v in val]), val
# mtl_loss = loss
#
# if len(net.tasks) > 1:
# # y_hat, address_mask = net(x)
# for i, (task, gen, y, y_hat, address_mask, criterion) in enumerate(zip(net.tasks, net.generators,
# ys, y_hats, masks, criteria)
# )[1:]:
# output_dim = gen.output_dim
# address_dim = gen.address_dim
# # print('y_hat', y_hat.size(), file=sys.stderr)
# # y_hat = y_hat.view(batch_size, seq_len, address_dim, output_dim)
# # print('y_hat', y_hat.size(), file=sys.stderr)
# # y_hat = y_hat.view(-1, output_dim)
# # print('y_hat', y_hat.size(), file=sys.stderr)
# argmaxes = torch.argmax(y_hat, dim=3)
# acc, loss = compute_acc_and_loss(task, gen, y, y_hat, argmaxes,
# address_mask, word_mask,
# criterion,
# batch_size, seq_len, address_dim,
# output_dim)
#
# mtl_loss += loss
running_batch_time += time.time() - start_time
# mtl_loss = torch.sum(torch.cat(losses, dim=0), dim=0, keepdim=True)
# mtl_loss.backward()
loss.backward()
_optimizer.step()
if train_i % n_print == n_print - 1: # print every n mini-batches
batch_time = running_batch_time / n_print
print('[%d, %5d] loss: %.3f | acc: %.3f | %.1f %s | %.1f %s' % (epoch + 1, train_i + 1,
running_loss / n_print,
running_acc / n_print,
batch_time if batch_time >= 1 else 1 / batch_time,
's/batch' if batch_time >= 1 else 'batch(es)/s',
batch_time/batch_size if batch_time/batch_size >= 1 else batch_size/batch_time,
's/expl' if batch_time/batch_size >= 1 else 'expl(s)/s'),
file=sys.stderr)
# if str(device).startswith('cuda'):
# print(torch.cuda.memory_summary(abbreviated=False), file=sys.stderr)
# assist.info['batch'] = train_i + 1
# assist.info['batch_loss'] = running_loss / n_print
# assist.info['batch_acc'] = running_acc / n_print
# assist.info['ex_per_s'] = batch_size / batch_time
# assist.step()
running_loss = 0.0
running_acc = 0.0
running_batch_time = 0.0
for k in x:
if isinstance(x[k], torch.Tensor):
x[k] = x[k].cpu()
for k in result:
if isinstance(result[k], torch.Tensor):
result[k] = result[k].cpu()
for i in range(len(ys)):
_y = ys[i]
if isinstance(_y, torch.Tensor):
ys[i] = _y.cpu()
del x, word_mask, ys, y_hat, address_mask, acc, loss, result
torch.cuda.empty_cache()
if optimizer_kwargs['use_schedule']:
scheduler.step()
# except:
# # raise
# exit(1)
net.eval()
if devloader is None:
dev_acc = dev_loss = 0.0
else:
with torch.no_grad():
running_dev_acc = 0.0
running_dev_loss = 0.0
n_words = 0
gold_categories = Counter()
generated_categories = Counter()
correct_categories = Counter()
for data in devloader:
x, ys = data['x'], data['y']
word_mask = (x['word'] != net.span_encoder.pad_token_id)
result = net(x, word_mask=word_mask)
# y_hats, masks = net(x)
y_hat, address_mask = result['y_hat'], result['mask']
# on dev, only compute acc and loss for primary task
task, gen, y, criterion = net.tasks[0], \
net.generators[0], \
ys[0], \
criteria[0]
mapped_criterion = mapped_criteria[0] if hasattr(gen, 'address_map') else None
seq_len = y.size(1)
output_dim = gen.output_dim
address_dim = gen.address_dim
y_hat = y_hat.view(batch_size, -1, address_dim, output_dim)
# argmaxes = torch.argmax(y_hat, dim=3)
acc, loss, (argmaxes, correct_bool, categories_hat, categories_gold) = \
compute_acc_and_loss(task, gen, y, y_hat,
address_mask, word_mask,
criterion, mapped_criterion,
batch_size, seq_len, address_dim,
output_dim,
loss_fxn=loss_fxn,
omega_native_atom=omega_native_atom,
omega_atom=omega_atom, omega_full=omega_full,
lambda_enc=lambda_enc, lambda_dec=lambda_dec,
output_correct_bool=True)
running_dev_acc += acc.item()
running_dev_loss += loss.item()
correct_indices = correct_bool.nonzero().tolist()
# categories_gold = gen.extract_outputs(y.view(batch_size, -1, address_dim))
# categories_hat = gen.extract_outputs(argmaxes)
for b, (sequence, sequence_gold) in enumerate(zip(categories_hat, categories_gold)):
# print(sequence, sequence_gold, file=sys.stderr)
# print('gold', sequence_gold, file=sys.stderr)
# print('pred', sequence, file=sys.stderr)
for s, (cat, cat_gold) in enumerate(zip(sequence, sequence_gold)):
n_words += 1
correct_index = [b, s] in correct_indices
# assert (cat == cat_gold) == (correct_index), (
# b, s, cat, cat_gold, argmaxes[b, s], mask[b, s], y[b, s],
# f'[{b}, {s}] {"not " if not correct_index else ""}in correct_indices')
if cat is None:
cat = 'None'
else:
msg = cat.validate()
if msg != 0:
if hasattr(gen, 'max_depth') and cat.depth() >= gen.max_depth:
msg = 'Max depth reached'
# print(b, s, msg, str(cat), cat.s_expr())
cat = msg
elif hasattr(gen, 'max_len') and argmaxes.size(1) >= gen.max_len:
msg = 'Max length reached'
# print(b, s, msg, str(cat), cat.s_expr())
cat = msg
else:
print(b, s, msg[0], str(cat), cat.s_expr(), file=sys.stderr)
print(argmaxes[b, s], file=sys.stderr)
cat = msg[0]
gold_categories[str(cat_gold)] += 1
generated_categories[str(cat)] += 1
if correct_index:
correct_categories[str(cat)] += 1
for k in x:
if isinstance(x[k], torch.Tensor):
x[k] = x[k].cpu()
for i in range(len(ys)):
_y = ys[i]
if isinstance(_y, torch.Tensor):
ys[i] = _y.cpu()
del x, word_mask, ys, y_hat, address_mask, argmaxes, correct_bool, acc, loss
torch.cuda.empty_cache()
dev_acc = running_dev_acc / dev_len
dev_loss = running_dev_loss / dev_len
if devloader is None \
or dev_acc > best_dev_acc \
or dev_acc == best_dev_acc and (best_dev_loss is None or dev_loss < best_dev_loss):
best_dev_acc = dev_acc
best_dev_loss = dev_loss
best_epoch = epoch + 1
torch.save({'model': net.to_dict(),
'model_state_dict': net.state_dict(),
'optimizer_state_dict': _optimizer.state_dict(),
'optimizer_name': optimizer_name,
'optim_kwargs': optimizer_kwargs,
'epoch': best_epoch,
'dev_acc': best_dev_acc,
'dev_loss': best_dev_loss
}, model)
epoch_loss = epoch_running_loss / train_len
epoch_acc = epoch_running_acc / train_len
# assist.info['epoch'] = epoch + 1
# assist.info['train_loss'] = epoch_loss
# assist.info['train_acc'] = epoch_acc
# assist.info['dev_loss'] = dev_loss
# assist.info['dev_acc'] = dev_acc
# assist.step()
print('[epoch %d summary] train loss: %.3f | train acc: %.3f | dev loss: %.3f | dev acc: %.3f' %
(epoch + 1,
epoch_loss,
epoch_acc,
dev_loss,
dev_acc),
file=sys.stderr)
if devloader is not None:
print(f'most common gold categories (out of {n_words} in dev): '
f'{" | ".join(str(item) for item in gold_categories.most_common(10))}',
file=sys.stderr)
print(f'most common generated categories (out of {n_words} in dev): '
f'{" | ".join(str(item) for item in generated_categories.most_common(10))}',
file=sys.stderr)
print(f'most common correct categories (out of {n_words} in dev): '
f'{" | ".join(str(item) for item in correct_categories.most_common(10))}',
file=sys.stderr)
print('Finished Training', file=sys.stderr)
sys.stderr.flush()
class NN(torch.nn.Module):
def __init__(self):
super().__init__()
# self.activation = torch.nn.ModuleList()
self.hidden = torch.nn.ModuleList()
def prepare_input(self, *args, **kwargs):
pass
def prepare_output(self, *args, **kwargs):
pass
def forward(self, x):
pass
def to_dict(self):
return {}
class Encoder(NN):
def __init__(self, *args, hidden_dims=[0], **kwargs):
super().__init__()
self.hidden_dims = hidden_dims
self.hidden_dim = hidden_dims[-1]
# self._cache = None
# @property
# def cache(self):
# return self._cache
#
# @cache.setter
# def cache(self, new):
# del self._cache
# self._cache = new
def prepare_input(self, *args, **kwargs):
pass
def prepare_inputs(self, seqs, padding=None, train=False, lower=True, device=torch.device('cpu')):
_seqs = {k: [] for k in self.to_ix}
_seqs['idx'] = []
padding = (max(len(s) for s in seqs) if padding is None else padding) + 2
for seq in seqs:
idxs = {k: [] for k in _seqs}
seq = [START] + seq + [END]
for w in seq:
if lower and w not in (START, END):
w = w.lower()
if train and w not in self.to_ix['word'] and len(self.to_ix['word']) < self.vocab_sizes['word']:
self.to_ix['word'][w] = len(self.to_ix['word'])
idxs['idx'].append(self.to_ix['word'].get(w, self.to_ix['word'][UNK]))
for _n in range(1, self.feat_chars+1):
pre_key = f'pre_{_n}'
suf_key = f'<KEY>
if w not in (START, END) and _n <= len(w):
pre = w[:_n]
if train and pre not in self.to_ix[pre_key] and len(self.to_ix[pre_key]) < self.vocab_sizes[pre_key]:
self.to_ix[pre_key][pre] = len(self.to_ix[pre_key])
idxs[pre_key].append(self.to_ix[pre_key].get(pre, self.to_ix[pre_key][UNK]))
suf = w[-_n:]
if train and suf not in self.to_ix[suf_key] and len(self.to_ix[suf_key]) < self.vocab_sizes[suf_key]:
self.to_ix[suf_key][suf] = len(self.to_ix[suf_key])
idxs[suf_key].append(self.to_ix[suf_key].get(suf, self.to_ix[suf_key][UNK]))
else:
idxs[pre_key].append(self.to_ix[pre_key][PAD])
idxs[suf_key].append(self.to_ix[suf_key][PAD])
while len(idxs['idx']) < padding:
for key in idxs:
idxs[key].append(self.to_ix.get(key, self.to_ix['word'])[PAD])
for key in idxs:
_seqs[key].append(idxs[key])
_seqs['idx'] = torch.tensor(_seqs['idx'], dtype=torch.long, device=device)
word_mask = ((_seqs['idx'] != self.bos_token_id) & (_seqs['idx'] != self.eos_token_id)).to(device)
_seqs['word'] = _seqs['idx'][word_mask].view(-1, padding - 2)
# print('idx', _seqs['idx'].size(), _seqs['idx'])
# print('word', _seqs['word'].size(), _seqs['word'])
for k in _seqs:
if k not in ('idx', 'word'):
_seqs[k] = torch.tensor(_seqs[k], dtype=torch.long, device=device)
return _seqs
def forward(self, x):
pass
def to_dict(self):
result = super().to_dict()
result.update({'hidden_dim': self.hidden_dim})
return result
class Scorer(NN):
def __init__(self, input_dim, output_dim, hidden_dims=[300, 300], dropout=[0.0, 0.0],
activation=F.gelu, norm=F.log_softmax): # TODO: try cube activation
super().__init__()
self.input_dim = input_dim
self.hidden_dims = hidden_dims
self.output_dim = output_dim
self.norm = norm
# if isinstance(activation, list):
# assert len(activation) == len(hidden_dims) + 1
# self.activation = torch.nn.ModuleList(activation)
# else:
# self.activation = torch.nn.ModuleList([activation()] * len(hidden_dims))
self.activation = activation
# assert len(dropout) == len(hidden_dims)
self.dropout = torch.nn.ModuleList([torch.nn.Dropout(d) for d in dropout])
if len(hidden_dims) > 0:
self.hidden.append(torch.nn.Linear(input_dim, hidden_dims[0]))
for i in range(len(hidden_dims) - 1):
self.hidden.append(torch.nn.Linear(hidden_dims[i], hidden_dims[i+1]))
# The activation layer that maps from hidden state space to output space
self.hidden2out = torch.nn.Linear(hidden_dims[-1], output_dim)
else:
# self.in2hidden = lambda x: x
self.hidden2out = torch.nn.Linear(input_dim, output_dim)
def prepare_input(self, x, *args, **kwargs):
return torch.tensor(x, dtype=torch.float32)
def prepare_output(self, y, *args, **kwargs):
return torch.tensor(y, dtype=torch.long)
def _forward(self, x):
# print('Scorer._forward', file=sys.stderr)
# print(x, file=sys.stderr)
# print(self.in2hidden, file=sys.stderr)
# print(self.activation[0], file=sys.stderr)
# x = self.activation[0](self.in2hidden(x))
# x = self.dropout[0](self.activation[0](self.in2hidden(x)))
# print(x, file=sys.stderr)
for i, h in enumerate(self.hidden):
# print(h, file=sys.stderr)
# print(self.activation[i+1], file=sys.stderr)
x = self.dropout[i](self.activation(h(x)))
# out = x if self.hidden_dims == 0 else self.hidden2out(x)
# print(out.shape)
# scores = self.norm(out, dim=1)
# print(scores.shape)
return x
def forward(self, x):
# print(out.shape)
# scores = self.norm(out, dim=1)
x = self._forward(x)
out = self.hidden2out(x)
scores = self.norm(out, dim=1)
return scores
def score(self, outcome, x) -> float:
with torch.no_grad():
scores = self.forward(x).view(-1)
# scores = self.forward(*x).view(-1)
return scores[outcome]
def classify(self, x):
with torch.no_grad():
scores = self.forward(x).view(-1)
return max(enumerate(scores), key=itemgetter(1)), scores
def to_dict(self):
result = super().to_dict()
result.update({'input_dim': self.input_dim,
'output_dim': self.output_dim,
'hidden_dims': self.hidden_dims}) # ,
# 'hidden': self.hidden})
return result
class SequenceTagger(Scorer):
def __init__(self, span_encoder, labels, hidden_dims=[300, 300], dropout=[0.0, 0.0], **kwargs):
super(SequenceTagger, self).__init__(input_dim=span_encoder.hidden_dim,
output_dim=len(labels),
hidden_dims=hidden_dims,
dropout=dropout, **kwargs)
self.span_encoder = span_encoder
self.out_to_ix = labels
self.ix_to_out = {v: k for k, v in self.out_to_ix.items()}
def prepare_inputs(self, *args, **kwargs):
return self.span_encoder.prepare_inputs(*args, **kwargs)
def forward(self, x):
x = self.span_encoder(x) # (seq, batch, in)
seq_len = x.size(0)
x = x.view(-1, self.span_encoder.hidden_dim) # (seq*batch, in)
h = super(SequenceTagger, self)._forward(x) # (seq*batch, out)
y = self.hidden2out(h)
return self.norm(y.view(-1, seq_len, self.output_dim), dim=2) # (batch, seq, out)
def to_dict(self):
result = super().to_dict()
result.update({'span_encoder': self.span_encoder.to_dict()})
# 'labels': self.out_to_ix})
return result
if __name__ == '__main__':
pass
|
<reponame>markdewing/qmcpack
##################################################################
## (c) Copyright 2015- by <NAME> ##
##################################################################
#====================================================================#
# fileio.py #
# Support for I/O with various file formats. Currently this only #
# contains a generic file I/O class for XSF files. In the future #
# generic XML and HDF5 support should go here. Input only #
# interfaces to these formats can be found in hdfreader.py and #
# xmlreader.py. #
# #
# Content summary: #
# XsfFile #
# Represents generic XSF, AXSF, and BXSF files. #
# Can read/write arbitrary files of these formats. #
# Useful for atomic structure and electronic density I/O. #
# #
#====================================================================#
import os
import mmap
from numpy import array,zeros,ndarray,around,arange,dot,savetxt,empty
from numpy.linalg import det,norm
from generic import obj
from developer import DevBase,error
from periodic_table import pt as ptable,is_element
from unit_converter import convert
from debug import *
class TextFile(DevBase):
# interface to mmap files
# see Python 2 documentation for mmap
def __init__(self,filepath=None):
self.mm = None
self.f = None
if filepath!=None:
self.open(filepath)
#end if
#end def __init__
def open(self,filepath):
if not os.path.exists(filepath):
self.error('cannot open non-existent file: {0}'.format(filepath))
#end if
f = open(filepath,'r')
fno = f.fileno()
#fno = os.open(filepath,os.O_RDONLY)
self.f = f
self.mm = mmap.mmap(fno,0,prot=mmap.PROT_READ)
#end def open
def __iter__(self):
for line in self.f:
yield line
#end for
#end def __iter__
def __getitem__(self,slc):
return self.mm[slc]
#end def __getitem__
def lines(self):
return self.read().splitlines()
#end def lines
def tokens(self):
return self.read().split()
#end def tokens
def readtokens(self,s=None):
return self.readline(s).split()
#end def readtokens
def readtokensf(self,s=None,*formats):
if s!=None:
self.seek(s)
#end if
self.mm.readline()
line = self.mm.readline()
stokens = line.split()
all_same = False
if len(formats)==1 and len(stokens)>1:
format = formats[0]
all_same = True
elif len(formats)>len(stokens):
self.error('formatted line read failed\nnumber of tokens and provided number of formats do not match\nline: {0}\nnumber of tokens: {1}\nnumber of formats provided: {2}'.format(line,len(stokens),len(formats)))
#end if
tokens = []
if all_same:
for stoken in stokens:
tokens.append(format(stoken))
#end for
else:
for i in xrange(len(formats)):
tokens.append(formats[i](stokens[i]))
#end for
#end if
if len(tokens)==1:
return tokens[0]
else:
return tokens
#end if
#end def readtokensf
# extended mmap interface below
def close(self):
r = self.mm.close()
self.f.close()
return r
#end def close
def seek(self,pos,whence=0,start=None,end=None):
if isinstance(pos,str):
if whence!=2 and start is None:
if whence==0:
start = 0
elif whence==1:
start = self.mm.tell()
else:
self.error('relative positioning must be either 0 (begin), 1 (current), or 2 (end)\nyou provided: {0}'.format(whence))
#end if
#end if
if whence!=2:
if end!=None:
pos = self.mm.find(pos,start,end)
else:
pos = self.mm.find(pos,start)
#end if
else:
if end!=None:
pos = self.mm.rfind(pos,start,end)
else:
pos = self.mm.rfind(pos,start)
#end if
#end if
if pos!=-1:
return self.mm.seek(pos,0)
else:
return -1
#end if
else:
return self.mm.seek(pos,whence)
#end if
#end def seek
def readline(self,s=None):
if s!=None:
self.seek(s)
#end if
return self.mm.readline()
#end def readline
def read(self,num=None):
if num is None:
return self.mm[:]
else:
return self.mm.read(num)
#end if
#end def read
# unchanged mmap interface below
def find(self,*a,**kw):
return self.mm.find(*a,**kw)
#end def find
def flush(self,*a,**kw):
return self.mm(*a,**kw)
#end def flush
def move(self,dest,src,count):
return self.mm.move(dest,src,count)
#end def move
def read_byte(self):
return self.mm.read_byte()
#end def read_byte
def resize(self,newsize):
return self.mm.resize(newsize)
#end def resize
def rfind(self,*a,**kw):
return self.mm.rfind(*a,**kw)
#end def rfind
def size(self):
return self.mm.size()
#end def size
def tell(self):
return self.mm.tell()
#end def tell
def write(self,string):
return self.mm.write(string)
#end def write
def write_byte(self,byte):
return self.mm.write_byte(byte)
#end def write_byte
#end class TextFile
class StandardFile(DevBase):
sftype = ''
def __init__(self,filepath=None):
if filepath is None:
None
elif isinstance(filepath,str):
self.read(filepath)
else:
self.error('unsupported input: {0}'.format(filepath))
#end if
#end def __init__
def read(self,filepath):
if not os.path.exists(filepath):
self.error('read failed\nfile does not exist: {0}'.format(filepath))
#end if
self.read_text(open(filepath,'r').read())
self.check_valid('read failed')
#end def read
def write(self,filepath=None):
self.check_valid('write failed')
text = self.write_text()
if filepath!=None:
open(filepath,'w').write(text)
#end if
return text
#end def write
def is_valid(self):
return len(self.validity_checks())==0
#end def is_valid
def check_valid(self,header=None):
messages = self.validity_checks()
if len(messages)>0:
msg = ''
if header is not None:
msg += header+'\n'
#end if
msg += 'not a valid {0} file, see below for details\n'.format(self.sftype)
for m in messages:
msg+=m+'\n'
#end for
self.error(msg)
#end if
#end def check_valid
def validity_checks(self):
messages = []
return messages
#end def validity_checks
def read_text(self,text):
self.not_implemented()
#end def read_text
def write_text(self):
self.not_implemented()
#end def write_text
#end class StandardFile
class XsfFile(StandardFile):
sftype = 'xsf'
filetypes = set(['xsf','axsf','bxsf'])
periodicities = set(['molecule','polymer','slab','crystal'])
dimensions = obj(molecule=0,polymer=1,slab=2,crystal=3)
# ATOMS are in units of Angstrom, only provided for 'molecule'
# forces are in units of Hatree/Angstrom
# each section should be followed by a blank line
def __init__(self,filepath):
self.filetype = None
self.periodicity = None
StandardFile.__init__(self,filepath)
#end def __init__
def add_to_image(self,image,name,value):
if image is None:
self[name] = value
else:
if 'images' not in self:
self.images = obj()
#end if
if not image in self.images:
self.images[image] = obj()
#end if
self.images[image][name] = value
#end if
#end def add_to_image
def read_text(self,text):
lines = text.splitlines()
i=0
self.filetype = 'xsf'
while(i<len(lines)):
line = lines[i].strip().lower()
if len(line)>0 and line[0]!='#':
tokens = line.split()
keyword = tokens[0]
image = None
if len(tokens)==2:
image = int(tokens[1])
#end if
if keyword in self.periodicities:
self.periodicity = keyword
elif keyword=='animsteps':
self.animsteps = int(tokens[1])
self.filetype = 'axsf'
elif keyword=='primvec':
primvec = array((lines[i+1]+' '+
lines[i+2]+' '+
lines[i+3]).split(),dtype=float)
primvec.shape = 3,3
self.add_to_image(image,'primvec',primvec)
i+=3
elif keyword=='convvec':
convvec = array((lines[i+1]+' '+
lines[i+2]+' '+
lines[i+3]).split(),dtype=float)
convvec.shape = 3,3
self.add_to_image(image,'convvec',convvec)
i+=3
elif keyword=='atoms':
if self.periodicity is None:
self.periodicity='molecule'
#end if
i+=1
tokens = lines[i].strip().split()
elem = []
pos = []
force = []
natoms = 0
while len(tokens)==4 or len(tokens)==7:
natoms+=1
elem.append(tokens[0])
pos.extend(tokens[1:4])
if len(tokens)==7:
force.extend(tokens[4:7])
#end if
i+=1
tokens = lines[i].strip().split()
#end while
elem = array(elem,dtype=int)
pos = array(pos,dtype=float)
pos.shape = natoms,3
self.add_to_image(image,'elem',elem)
self.add_to_image(image,'pos',pos)
if len(force)>0:
force = array(force,dtype=float)
force.shape = natoms,3
self.add_to_image(image,'force',force)
#end if
i-=1
elif keyword=='primcoord':
natoms = int(lines[i+1].split()[0])
elem = []
pos = []
force = []
for iat in range(natoms):
tokens = lines[i+2+iat].split()
elem.append(tokens[0])
pos.extend(tokens[1:4])
if len(tokens)==7:
force.extend(tokens[4:7])
#end if
#end for
try:
elem = array(elem,dtype=int)
except:
elem = array(elem,dtype=str)
#end try
pos = array(pos,dtype=float)
pos.shape = natoms,3
self.add_to_image(image,'elem',elem)
self.add_to_image(image,'pos',pos)
if len(force)>0:
force = array(force,dtype=float)
force.shape = natoms,3
self.add_to_image(image,'force',force)
#end if
i+=natoms+1
elif keyword.startswith('begin_block_datagrid'):
if keyword.endswith('2d'):
d=2
elif keyword.endswith('3d'):
d=3
else:
self.error('dimension of datagrid could not be identified: '+line)
#end if
i+=1
block_identifier = lines[i].strip().lower()
if not 'data' in self:
self.data = obj()
#end if
if not d in self.data:
self.data[d] = obj()
#end if
if not block_identifier in self.data[d]:
self.data[d][block_identifier]=obj()
#end if
data = self.data[d][block_identifier]
line = ''
while not line.startswith('end_block_datagrid'):
line = lines[i].strip().lower()
if line.startswith('begin_datagrid') or line.startswith('datagrid_'):
grid_identifier = line.replace('begin_datagrid_{0}d_'.format(d),'')
grid = array(lines[i+1].split(),dtype=int)[:d]
corner = array(lines[i+2].split(),dtype=float)
if d==2:
cell = array((lines[i+3]+' '+
lines[i+4]).split(),dtype=float)
i+=5
elif d==3:
cell = array((lines[i+3]+' '+
lines[i+4]+' '+
lines[i+5]).split(),dtype=float)
i+=6
#end if
cell.shape = d,3
dtokens = []
line = lines[i].strip().lower()
while not line.startswith('end_datagrid'):
dtokens.extend(line.split())
i+=1
line = lines[i].strip().lower()
#end while
grid_data = array(dtokens,dtype=float)
grid_data.shape = tuple(grid)
data[grid_identifier] = obj(
grid = grid,
corner = corner,
cell = cell,
values = grid_data
)
#end if
i+=1
#end while
elif keyword=='begin_info':
self.info = obj()
while line.lower()!='end_info':
line = lines[i].strip()
if len(line)>0 and line[0]!='#' and ':' in line:
k,v = line.split(':')
self.info[k.strip()] = v.strip()
#end if
i+=1
#end while
elif keyword.startswith('begin_block_bandgrid'):
self.filetype = 'bxsf'
if keyword.endswith('2d'):
d=2
elif keyword.endswith('3d'):
d=3
else:
self.error('dimension of bandgrid could not be identified: '+line)
#end if
i+=1
block_identifier = lines[i].strip().lower()
if not 'band' in self:
self.band = obj()
#end if
if not d in self.band:
self.band[d] = obj()
#end if
if not block_identifier in self.band[d]:
self.band[d][block_identifier]=obj()
#end if
band = self.band[d][block_identifier]
line = ''
while not line.startswith('end_block_bandgrid'):
line = lines[i].strip().lower()
if line.startswith('begin_bandgrid'):
grid_identifier = line.replace('begin_bandgrid_{0}d_'.format(d),'')
nbands = int(lines[i+1].strip())
grid = array(lines[i+2].split(),dtype=int)[:d]
corner = array(lines[i+3].split(),dtype=float)
if d==2:
cell = array((lines[i+4]+' '+
lines[i+5]).split(),dtype=float)
i+=6
elif d==3:
cell = array((lines[i+4]+' '+
lines[i+5]+' '+
lines[i+6]).split(),dtype=float)
i+=7
#end if
cell.shape = d,3
bands = obj()
line = lines[i].strip().lower()
while not line.startswith('end_bandgrid'):
if line.startswith('band'):
band_index = int(line.split(':')[1].strip())
bands[band_index] = []
else:
bands[band_index].extend(line.split())
#end if
i+=1
line = lines[i].strip().lower()
#end while
for bi,bv in bands.iteritems():
bands[bi] = array(bv,dtype=float)
bands[bi].shape = tuple(grid)
#end for
band[grid_identifier] = obj(
grid = grid,
corner = corner,
cell = cell,
bands = bands
)
#end if
i+=1
#end while
else:
self.error('invalid keyword encountered: {0}'.format(keyword))
#end if
#end if
i+=1
#end while
#end def read_text
def write_text(self):
c=''
if self.filetype=='xsf': # only write structure/datagrid if present
if self.periodicity=='molecule' and 'elem' in self:
c += self.write_coord()
elif 'primvec' in self:
c += ' {0}\n'.format(self.periodicity.upper())
c += self.write_vec('primvec',self.primvec)
if 'convvec' in self:
c += self.write_vec('convvec',self.convvec)
#end if
if 'elem' in self:
c+= self.write_coord()
#end if
#end if
if 'data' in self:
c += self.write_data()
#end if
elif self.filetype=='axsf': # only write image structures
c += ' ANIMSTEPS {0}\n'.format(self.animsteps)
if self.periodicity!='molecule':
c += ' {0}\n'.format(self.periodicity.upper())
#end if
if 'primvec' in self:
c += self.write_vec('primvec',self.primvec)
#end if
if 'convvec' in self:
c += self.write_vec('convvec',self.convvec)
#end if
for i in range(1,len(self.images)+1):
image = self.images[i]
if 'primvec' in image:
c += self.write_vec('primvec',image.primvec,i)
#end if
if 'convvec' in image:
c += self.write_vec('convvec',image.convvec,i)
#end if
c += self.write_coord(image,i)
#end for
elif self.filetype=='bxsf': # only write bandgrid
c += self.write_band()
#end if
return c
#end def write_text
def write_coord(self,image=None,index=''):
if image is None:
s = self
else:
s = image
#end if
c = ''
if self.periodicity=='molecule':
c += ' ATOMS {0}\n'.format(index)
else:
c += ' PRIMCOORD {0}\n'.format(index)
c += ' {0} 1\n'.format(len(s.elem))
if not 'force' in s:
for i in range(len(s.elem)):
r = s.pos[i]
c += ' {0:>3} {1:12.8f} {2:12.8f} {3:12.8f}\n'.format(s.elem[i],r[0],r[1],r[2])
#end for
else:
for i in range(len(s.elem)):
r = s.pos[i]
f = s.force[i]
c += ' {0:>3} {1:12.8f} {2:12.8f} {3:12.8f} {4:12.8f} {5:12.8f} {6:12.8f}\n'.format(s.elem[i],r[0],r[1],r[2],f[0],f[1],f[2])
#end for
#end if
return c
#end def write_coord
def write_vec(self,name,vec,index=''):
c = ' {0} {1}\n'.format(name.upper(),index)
for v in vec:
c += ' {0:12.8f} {1:12.8f} {2:12.8f}\n'.format(v[0],v[1],v[2])
#end for
return c
#end def write_vec
def write_data(self):
c = ''
ncols = 4
data = self.data
for d in sorted(data.keys()):
bdg_xd = data[d] # all block datagrids 2 or 3 D
for bdgk in sorted(bdg_xd.keys()):
c += ' BEGIN_BLOCK_DATAGRID_{0}D\n'.format(d)
c += ' {0}\n'.format(bdgk)
bdg = bdg_xd[bdgk] # single named block data grid
for dgk in sorted(bdg.keys()):
c += ' BEGIN_DATAGRID_{0}D_{1}\n'.format(d,dgk)
dg = bdg[dgk] # single named data grid
if d==2:
c += ' {0} {1}\n'.format(*dg.grid)
elif d==3:
c += ' {0} {1} {2}\n'.format(*dg.grid)
#end if
c += ' {0:12.8f} {1:12.8f} {2:12.8f}\n'.format(*dg.corner)
for v in dg.cell:
c += ' {0:12.8f} {1:12.8f} {2:12.8f}\n'.format(*v)
#end for
c = c[:-1]
n=0
for v in dg.values.ravel():
if n%ncols==0:
c += '\n '
#end if
c += ' {0:12.8f}'.format(v)
n+=1
#end for
c += '\n END_DATAGRID_{0}D_{1}\n'.format(d,dgk)
#end for
c += ' END_BLOCK_DATAGRID_{0}D\n'.format(d)
#end for
#end for
return c
#end def write_data
def write_band(self):
c = ''
ncols = 4
band = self.band
for d in sorted(band.keys()):
bdg_xd = band[d] # all block bandgrids 2 or 3 D
for bdgk in sorted(bdg_xd.keys()):
c += ' BEGIN_BLOCK_BANDGRID_{0}D\n'.format(d)
c += ' {0}\n'.format(bdgk)
bdg = bdg_xd[bdgk] # single named block band grid
for dgk in sorted(bdg.keys()):
c += ' BEGIN_BANDGRID_{0}D_{1}\n'.format(d,dgk)
dg = bdg[dgk] # single named band grid
if d==2:
c += ' {0} {1}\n'.format(*dg.grid)
elif d==3:
c += ' {0} {1} {2}\n'.format(*dg.grid)
#end if
c += ' {0:12.8f} {1:12.8f} {2:12.8f}\n'.format(*dg.corner)
for v in dg.cell:
c += ' {0:12.8f} {1:12.8f} {2:12.8f}\n'.format(*v)
#end for
for bi in sorted(dg.bands.keys()):
c += ' BAND: {0}'.format(bi)
n=0
for v in dg.bands[bi].ravel():
if n%ncols==0:
c += '\n '
#end if
c += ' {0:12.8f}'.format(v)
n+=1
#end for
c += '\n'
#end for
c += ' END_BANDGRID_{0}D_{1}\n'.format(d,dgk)
#end for
c += ' END_BLOCK_BANDGRID_{0}D\n'.format(d)
#end for
#end for
return c
#end def write_band
def dimension(self):
if self.periodicity in self.dimensions:
return self.dimensions[self.periodicity]
else:
return None
#end if
#end def dimension
def initialized(self):
return self.filetype!=None
#end def initialized
def has_animation(self):
return self.filetype=='axsf' and 'animsteps' in self
#end def has_animation
def has_bands(self):
return self.filetype=='bxsf' and 'band' in self and 'info' in self
#end def has_bands
def has_structure(self):
hs = self.filetype=='xsf'
hs &= 'elem' in self and 'pos' in self
d = self.dimension()
if d!=0:
hs &= 'primvec' in self
#end if
return hs
#end def has_structure
def has_data(self):
return self.filetype=='xsf' and 'data' in self
#end def has_data
def validity_checks(self):
ha = self.has_animation()
hb = self.has_bands()
hs = self.has_structure()
hd = self.has_data()
v = ha or hb or hs or hd
if v:
return []
else:
return ['xsf file must have animation, bands, structure, or data\nthe current file is missing all of these']
#end if
#end def validity_checks
def incorporate_structure(self,structure):
s = structure.copy()
s.change_units('A')
s.recenter()
elem = []
for e in s.elem:
ne = len(e)
if ne>1:
if ne==2 and not e[1].isalpha():
e = e[0]
elif ne>2:
e = e[0:2]
#end if
#end if
elem.append(ptable.elements[e].atomic_number)
#end for
self.filetype = 'xsf'
self.periodicity = 'crystal' # assumed
self.primvec = s.axes
self.elem = array(elem,dtype=int)
self.pos = s.pos
#end def incorporate_structure
def add_density(self,cell,density,name='density',corner=None,grid=None,centered=False,add_ghost=False,transpose=False):
if corner is None:
corner = zeros((3,),dtype=float)
#end if
if grid is None:
grid = density.shape
#end if
grid = array(grid,dtype=int)
corner = array(corner,dtype=float)
cell = array(cell ,dtype=float)
density = array(density,dtype=float)
density.shape = tuple(grid)
if centered: # shift corner by half a grid cell to center it
dc = 0.5/grid
dc = dot(dc,cell)
corner += dc
#end if
if add_ghost: # add ghost points to make a 'general' xsf grid
g = grid # this is an extra shell of points in PBC
d = density
grid = g+1
density = zeros(tuple(grid),dtype=float)
density[:g[0],:g[1],:g[2]] = d[:,:,:] # volume copy
density[ -1,:g[1],:g[2]] = d[0,:,:] # face copies
density[:g[0], -1,:g[2]] = d[:,0,:]
density[:g[0],:g[1], -1] = d[:,:,0]
density[ -1, -1,:g[2]] = d[0,0,:] # edge copies
density[ -1,:g[1], -1] = d[0,:,0]
density[:g[0], -1, -1] = d[:,0,0]
density[ -1, -1, -1] = d[0,0,0] # corner copy
#end if
if transpose: # shift from row major to column major
g = grid
d = density
density = zeros((d.size,))
n = 0
for k in xrange(g[2]):
for j in xrange(g[1]):
for i in xrange(g[0]):
density[n] = d[i,j,k]
n+=1
#end for
#end for
#end for
density.shape = tuple(grid)
#end if
self.data = obj()
self.data[3] = obj()
self.data[3][name] = obj()
self.data[3][name][name] = obj(
grid = grid,
corner = corner,
cell = cell,
values = density
)
#end def add_density
def get_density(self):
return self.data.first().first().first()
#end def get_density
def change_units(self,in_unit,out_unit):
fac = 1.0/convert(1.0,in_unit,out_unit)**3
density = self.get_density()
density.values *= fac
if 'values_noghost' in density:
density.values_noghost *= fac
#end if
#end def change_units
def remove_ghost(self,density=None,transpose=True):
if density is None:
density = self.get_density()
#end if
if 'values_noghost' in density:
return density.values_noghost
#end if
data = density.values
if transpose: # switch from column major to row major
g = data.shape
d = data.ravel()
data = zeros(g,dtype=float)
n = 0
for k in xrange(g[2]):
for j in xrange(g[1]):
for i in xrange(g[0]):
data[i,j,k] = d[n]
n+=1
#end for
#end for
#end for
#end if
# remove the ghost cells
d = data
g = array(d.shape,dtype=int)-1
data = zeros(tuple(g),dtype=float)
data[:,:,:] = d[:g[0],:g[1],:g[2]]
density.values_noghost = data
return data
#end def remove_ghost
def norm(self,density=None,vnorm=True):
if density is None:
density = self.get_density()
#end if
if 'values_noghost' not in density:
self.remove_ghost(density)
#end if
data = density.values_noghost
if vnorm:
dV = det(density.cell)/data.size
else:
dV = 1.0
#end if
return data.ravel().sum()*dV
#end def norm
def line_data(self,dim,density=None):
if density is None:
density = self.get_density()
#end if
if 'values_noghost' not in density:
self.remove_ghost(density)
#end if
data = density.values_noghost
dV = det(density.cell)/data.size
dr = norm(density.cell[dim])/data.shape[dim]
ndim = 3
permute = dim!=0
if permute:
r = range(0,ndim)
r.pop(dim)
permutation = tuple([dim]+r)
data = data.transpose(permutation)
#end if
s = data.shape
data.shape = s[0],s[1]*s[2]
line_data = data.sum(1)*dV/dr
r_data = density.corner[dim] + dr*arange(len(line_data),dtype=float)
return r_data,line_data
#end def line_data
def line_plot(self,dim,filepath):
r,d = self.line_data(dim)
savetxt(filepath,array(zip(r,d)))
#end def line_plot
#end class XsfFile
class PoscarFile(StandardFile):
sftype = 'POSCAR'
def __init__(self,filepath=None):
self.description = None
self.scale = None
self.axes = None
self.elem = None
self.elem_count = None
self.coord = None
self.pos = None
self.dynamic = None
self.vel_coord = None
self.vel = None
StandardFile.__init__(self,filepath)
#end def __init__
def assign_defaults(self):
if self.description is None:
self.description = 'System cell and coordinates'
#end if
#end def assign_defaults
def validity_checks(self):
msgs = []
if self.description is None:
msgs.append('description is missing')
elif not isinstance(self.description,str):
msgs.append('description must be text')
#end if
if self.scale is None:
msgs.append('scale is missing')
elif not isinstance(self.scale,(float,int)):
msgs.append('scale must be a real number')
elif self.scale<0:
msgs.append('scale must be greater than zero')
#end if
if self.axes is None:
msgs.append('axes is missing')
elif not isinstance(self.axes,ndarray):
msgs.append('axes must be an array')
elif self.axes.shape!=(3,3):
msgs.append('axes must be a 3x3 array, shape provided is {0}'.format(self.axes.shape))
elif not isinstance(self.axes[0,0],float):
msgs.append('axes must be an array of real numbers')
#end if
natoms = -1
if self.elem_count is None:
msgs.append('elem_count is missing')
elif not isinstance(self.elem_count,ndarray):
msgs.append('elem_count must be an array')
elif len(self.elem_count)==0:
msgs.append('elem_count array must contain at least one entry')
elif not isinstance(self.elem_count[0],int):
msgs.append('elem_count must be an array of integers')
else:
if (self.elem_count<1).sum()>0:
msgs.append('all elem_count entries must be greater than zero')
#end if
natoms = self.elem_count.sum()
#end if
if self.elem is not None: # presence depends on vasp version
if not isinstance(self.elem,ndarray):
msgs.append('elem must be an array')
elif isinstance(self.elem_count,ndarray) and len(self.elem)!=len(self.elem_count):
msgs.append('elem and elem_count arrays must be the same length')
elif not isinstance(self.elem[0],str):
msgs.append('elem must be an array of text')
else:
for e in self.elem:
iselem,symbol = is_element(e,symbol=True)
if not iselem:
msgs.append('elem entry "{0}" is not an element'.format(e))
#end if
#end for
#end for
#end if
if self.coord is None:
msgs.append('coord is missing')
elif not isinstance(self.coord,str):
msgs.append('coord must be text')
#end if
if self.pos is None:
msgs.append('pos is missing')
elif not isinstance(self.pos,ndarray):
msgs.append('pos must be an array')
elif natoms>0 and self.pos.shape!=(natoms,3):
msgs.append('pos must be a {0}x3 array, shape provided is {1}'.format(natoms),self.pos.shape)
elif natoms>0 and not isinstance(self.pos[0,0],float):
msgs.append('pos must be an array of real numbers')
#end if
if self.dynamic is not None: # dynamic is optional
if not isinstance(self.dynamic,ndarray):
msgs.append('dynamic must be an array')
elif natoms>0 and self.dynamic.shape!=(natoms,3):
msgs.append('dynamic must be a {0}x3 array, shape provided is {1}'.format(natoms),self.dynamic.shape)
elif natoms>0 and not isinstance(self.dynamic[0,0],bool):
msgs.append('dynamic must be an array of booleans (true/false)')
#end if
#end if
if self.vel_coord is not None: # velocities are optional
if not isinstance(self.vel_coord,str):
msgs.append('vel_coord must be text')
#end if
#end if
if self.vel is not None: # velocities are optional
if not isinstance(self.vel,ndarray):
msgs.append('vel must be an array')
elif natoms>0 and self.vel.shape!=(natoms,3):
msgs.append('vel must be a {0}x3 array, shape provided is {1}'.format(natoms),self.vel.shape)
elif natoms>0 and not isinstance(self.vel[0,0],float):
msgs.append('vel must be an array of real numbers')
#end if
#end if
return msgs
#end def validity_checks
def read_text(self,text):
read_poscar_chgcar(self,text)
#end def read_text
def write_text(self):
text = ''
if self.description is None:
text += 'System cell and coordinates\n'
else:
text += self.description+'\n'
#end if
text += ' {0}\n'.format(self.scale)
for a in self.axes:
text += ' {0:20.14f} {1:20.14f} {2:20.14f}\n'.format(*a)
#end for
if self.elem is not None:
for e in self.elem:
iselem,symbol = is_element(e,symbol=True)
if not iselem:
self.error('{0} is not an element'.format(e))
#end if
text += symbol+' '
#end for
text += '\n'
#end if
for ec in self.elem_count:
text += ' {0}'.format(ec)
#end for
text += '\n'
if self.dynamic!=None:
text += 'selective dynamics\n'
#end if
text += self.coord+'\n'
if self.dynamic is None:
for p in self.pos:
text += ' {0:20.14f} {1:20.14f} {2:20.14f}\n'.format(*p)
#end for
else:
bm = self.bool_map
for i in xrange(len(self.pos)):
p = self.pos[i]
d = self.dynamic[i]
text += ' {0:20.14f} {1:20.14f} {2:20.14f} {3} {4} {5}\n'.format(p[0],p[1],p[2],bm[d[0]],bm[d[1]],bm[d[2]])
#end for
#end if
if self.vel!=None:
text += self.vel_coord+'\n'
for v in self.vel:
text += ' {0:20.14f} {1:20.14f} {2:20.14f}\n'.format(*v)
#end for
#end if
return text
#end def write_text
def incorporate_xsf(self,xsf):
if 'primvec' in xsf:
axes = xsf.primvec.copy()
#end if
if 'convvec' in xsf:
axes = xsf.convvec.copy()
#end if
elem = xsf.elem.copy()
pos = xsf.pos.copy()
species = []
species_counts = []
elem_indices = []
spec_set = set()
for i in xrange(len(elem)):
e = elem[i]
if not e in spec_set:
spec_set.add(e)
species.append(e)
species_counts.append(0)
elem_indices.append([])
#end if
sindex = species.index(e)
species_counts[sindex] += 1
elem_indices[sindex].append(i)
#end for
elem_order = []
for elem_inds in elem_indices:
elem_order.extend(elem_inds)
#end for
pos = pos[elem_order]
species_ind = species
species = []
for i in species_ind:
species.append(ptable.simple_elements[i].symbol)
#end for
self.scale = 1.0
self.axes = axes
self.elem = array(species,dtype=str)
self.elem_count = array(species_counts,dtype=int)
self.coord = 'cartesian'
self.pos = pos
self.assign_defaults()
#end def incorporate_xsf
#end class PoscarFile
class ChgcarFile(StandardFile):
sftype = 'CHGCAR'
def __init__(self,filepath=None):
self.poscar = None
self.grid = None
self.charge_density = None
self.spin_density = None
StandardFile.__init__(self,filepath)
#end def __init__
def validity_checks(self):
msgs = []
if self.poscar is None:
msgs.append('poscar elements are missing')
elif not isinstance(self.poscar,PoscarFile):
msgs.append('poscar is not an instance of PoscarFile')
else:
msgs.extend(self.poscar.validity_checks())
#end if
if self.grid is None:
msgs.append('grid is missing')
elif not isinstance(self.grid,ndarray):
msgs.append('grid must be an array')
elif len(self.grid)!=3 or self.grid.size!=3:
msgs.append('grid must have 3 entries')
elif not isinstance(self.grid[0],int):
msgs.append('grid must be an array of integers')
elif (self.grid<1).sum()>0:
msgs.append('all grid entries must be greater than zero')
#end if
ng = self.grid.prod()
if self.charge_density is None:
msgs.append('charge_density is missing')
elif not isinstance(self.charge_density,ndarray):
msgs.append('charge_density must be an array')
elif len(self.charge_density)!=ng:
msgs.append('charge_density must have {0} entries ({1} present by length)'.format(ng,len(self.charge_density)))
elif self.charge_density.size!=ng:
msgs.append('charge_density must have {0} entries ({1} present by size)'.format(ng,self.charge_density.size))
elif not isinstance(self.charge_density[0],float):
msgs.append('charge_density must be an array of real numbers')
#end if
if self.spin_density is not None: # spin density is optional
if not isinstance(self.spin_density,ndarray):
msgs.append('spin_density must be an array')
elif len(self.spin_density)!=ng:
msgs.append('spin_density must have {0} entries ({1} present)'.format(ng,len(self.spin_density)))
elif self.spin_density.size!=ng and self.spin_density.shape!=(ng,3):
msgs.append('non-collinear spin_density must be a {0}x3 array, shape provided: {1}'.format(ng,self.spin_density.shape))
elif not isinstance(self.spin_density.ravel()[0],float):
msgs.append('spin_density must be an array of real numbers')
#end if
#end if
return msgs
#end def validity_checks
def read_text(self,text):
read_poscar_chgcar(self,text)
#end def read_text
def write_text(self):
text = self.poscar.write_text()
text+= '\n {0} {1} {2}\n'.format(*self.grid)
densities = [self.charge_density]
if self.spin_density is not None:
if self.spin_density.size==self.charge_density.size:
densities.append(self.spin_density)
else:
for i in range(3):
densities.append(self.spin_density[:,i])
#end for
#end if
#end if
n=0
for dens in densities:
for d in dens:
text += '{0:20.12E}'.format(d)
n+=1
if n%5==0:
text+='\n'
#end if
#end for
#end for
return text
#end def write_text
def incorporate_xsf(self,xsf):
poscar = PoscarFile()
poscar.incorporate_xsf(xsf)
density = xsf.remove_ghost()
self.poscar = poscar
self.grid = array(density.shape,dtype=int)
self.charge_density = density.ravel()
self.check_valid()
#end def incorporate_xsf
#end class ChgcarFile
def read_poscar_chgcar(host,text):
is_poscar = isinstance(host,PoscarFile)
is_chgcar = isinstance(host,ChgcarFile)
if not is_poscar and not is_chgcar:
error('read_poscar_chgcar must be used in conjunction with PoscarFile or ChgcarFile objects only\nencountered object of type: {0}'.format(host.__class__.__name__))
#end if
# read lines and remove fortran comments
raw_lines = text.splitlines()
lines = []
for line in raw_lines:
# remove fortran comments
cloc1 = line.find('!')
cloc2 = line.find('#')
has1 = cloc1!=-1
has2 = cloc2!=-1
if has1 or has2:
if has1 and has2:
cloc = min(cloc1,cloc2)
elif has1:
cloc = cloc1
else:
cloc = cloc2
#end if
line = line[:cloc]
#end if
lines.append(line.strip())
#end for
# extract file information
nlines = len(lines)
min_lines = 8
if nlines<min_lines:
host.error('file {0} must have at least {1} lines\nonly {2} lines found'.format(filepath,min_lines,nlines))
#end if
description = lines[0]
dim = 3
scale = float(lines[1].strip())
axes = empty((dim,dim))
axes[0] = array(lines[2].split(),dtype=float)
axes[1] = array(lines[3].split(),dtype=float)
axes[2] = array(lines[4].split(),dtype=float)
tokens = lines[5].split()
if tokens[0].isdigit():
counts = array(tokens,dtype=int)
elem = None
lcur = 6
else:
elem = array(tokens,dtype=str)
counts = array(lines[6].split(),dtype=int)
lcur = 7
#end if
if lcur<len(lines) and len(lines[lcur])>0:
c = lines[lcur].lower()[0]
lcur+=1
else:
host.error('file {0} is incomplete (missing positions)'.format(filepath))
#end if
selective_dynamics = c=='s'
if selective_dynamics: # Selective dynamics
if lcur<len(lines) and len(lines[lcur])>0:
c = lines[lcur].lower()[0]
lcur+=1
else:
host.error('file {0} is incomplete (missing positions)'.format(filepath))
#end if
#end if
cartesian = c=='c' or c=='k'
if cartesian:
coord = 'cartesian'
else:
coord = 'direct'
#end if
npos = counts.sum()
if lcur+npos>len(lines):
host.error('file {0} is incomplete (missing positions)'.format(filepath))
#end if
spos = []
for i in range(npos):
spos.append(lines[lcur+i].split())
#end for
lcur += npos
spos = array(spos)
pos = array(spos[:,0:3],dtype=float)
if selective_dynamics:
dynamic = array(spos[:,3:6],dtype=str)
dynamic = dynamic=='T'
else:
dynamic = None
#end if
def is_empty(lines,start=None,end=None):
if start is None:
start = 0
#end if
if end is None:
end = len(lines)
#end if
is_empty = True
for line in lines[start:end]:
is_empty &= len(line)==0
#end for
return is_empty
#end def is_empty
# velocities may be present for poscar
# assume they are not for chgcar
if is_poscar and lcur<len(lines) and not is_empty(lines,lcur):
cline = lines[lcur].lower()
lcur+=1
if lcur+npos>len(lines):
host.error('file {0} is incomplete (missing velocities)'.format(filepath))
#end if
cartesian = len(cline)>0 and (cline[0]=='c' or cline[0]=='k')
if cartesian:
vel_coord = 'cartesian'
else:
vel_coord = 'direct'
#end if
svel = []
for i in range(npos):
svel.append(lines[lcur+i].split())
#end for
lcur += npos
vel = array(svel,dtype=float)
else:
vel_coord = None
vel = None
#end if
# grid data is present for chgcar
if is_chgcar:
lcur+=1
if lcur<len(lines) and len(lines[lcur])>0:
grid = array(lines[lcur].split(),dtype=int)
lcur+=1
else:
host.error('file {0} is incomplete (missing grid)'.format(filepath))
#end if
if lcur<len(lines):
ng = grid.prod()
density = []
for line in lines[lcur:]:
density.extend(line.split())
#end for
if len(density)>0:
def is_float(val):
try:
v = float(val)
return True
except:
return False
#end try
#end def is_float
# remove anything but the densities (e.g. augmentation charges)
n=0
while is_float(density[n]) and n+ng<len(density):
n+=ng
#end while
density = array(density[:n],dtype=float)
else:
host.error('file {0} is incomplete (missing density)'.format(filepath))
#end if
if density.size%ng!=0:
host.error('number of density data entries is not a multiple of the grid\ngrid shape: {0}\ngrid size: {1}\ndensity size: {2}'.format(grid,ng,density.size))
#end if
ndens = density.size/ng
if ndens==1:
charge_density = density
spin_density = None
elif ndens==2:
charge_density = density[:ng]
spin_density = density[ng:]
elif ndens==4:
charge_density = density[:ng]
spin_density = empty((ng,3),dtype=float)
for i in range(3):
spin_density[:,i] = density[(i+1)*ng:(i+2)*ng]
#end for
else:
host.error('density data must be present for one of the following situations\n 1) charge density only (1 density)\n2) charge and collinear spin densities (2 densities)\n 3) charge and non-collinear spin densities (4 densities)\nnumber of densities found: {0}'.format(ndens))
#end if
else:
host.error('file {0} is incomplete (missing density)'.format(filepath))
#end if
#end if
if is_poscar:
poscar = host
elif is_chgcar:
poscar = PoscarFile()
#end if
poscar.set(
description = description,
scale = scale,
axes = axes,
elem = elem,
elem_count = counts,
coord = coord,
pos = pos,
dynamic = dynamic,
vel_coord = vel_coord,
vel = vel
)
if is_chgcar:
host.set(
poscar = poscar,
grid = grid,
charge_density = charge_density,
spin_density = spin_density,
)
#end if
#end def read_poscar_chgcar
|
<reponame>glader/airflow-clickhouse-plugin
from unittest import TestCase, mock
from clickhouse_driver.errors import ServerException, ErrorCodes
from tests.util import LocalClickHouseHook
class ClientFromUrlTestCase(TestCase):
def test_temp_table(self):
hook = LocalClickHouseHook()
temp_table_name = 'test_temp_table'
result = hook.run((
f'CREATE TEMPORARY TABLE {temp_table_name} (test_field UInt8)',
f'INSERT INTO {temp_table_name} '
f'SELECT number FROM system.numbers WHERE number < 5 LIMIT 5',
f'SELECT SUM(test_field) FROM {temp_table_name}',
))
self.assertListEqual([(10,)], result)
try:
# a new connection is created
hook.run(f'SELECT * FROM {temp_table_name}')
except ServerException as err:
self.assertEqual(ErrorCodes.UNKNOWN_TABLE, err.code)
else:
raise AssertionError('server did not raise an error')
class HookLogQueryTestCase(TestCase):
def setUp(self) -> None:
self.hook = LocalClickHouseHook()
def test_log_params_dict(self):
self.assertEqual('{}', self.hook._log_params({}))
self.assertEqual('{1: 1}', self.hook._log_params({1: 1}))
self.assertEqual('{1: 1}', self.hook._log_params({1: 1}, limit=1))
self.assertEqual(
'{1: 1 … and 1 more parameters}',
self.hook._log_params({1: 1, 2: 2}, limit=1),
)
self.assertEqual(
'{1: 1, 2: 2}',
self.hook._log_params({1: 1, 2: 2}),
)
self.assertEqual(
'{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9 …'
' and 1 more parameters}',
self.hook._log_params({k: k for k in range(11)}),
)
self.assertEqual(
'{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9 …'
' and 10 more parameters}',
self.hook._log_params({k: k for k in range(20)}),
)
self.assertEqual(
'{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9 …'
' and 10 more parameters}',
self.hook._log_params({k: k for k in range(20)}, limit=10),
)
def test_log_params_generator(self):
def gen():
yield
g = gen()
self.assertEqual(str(g), self.hook._log_params(g))
def test_log_params_tuple(self):
self.assertEqual('()', self.hook._log_params(()))
self.assertEqual('(1,)', self.hook._log_params((1, )))
self.assertEqual('(1,)', self.hook._log_params((1, ), limit=1))
self.assertEqual(
'(1, … and 1 more parameters)',
self.hook._log_params((1, 2), limit=1),
)
self.assertEqual(
'(1, 2)',
self.hook._log_params((1, 2)),
)
self.assertEqual(
'(0, 1, 2, 3, 4, 5, 6, 7, 8, 9 … and 1 more parameters)',
self.hook._log_params(tuple(range(11))),
)
self.assertEqual(
'(0, 1, 2, 3, 4, 5, 6, 7, 8, 9 … and 10 more parameters)',
self.hook._log_params(tuple(range(20))),
)
self.assertEqual(
'(0, 1, 2, 3, 4, 5, 6, 7, 8, 9 … and 10 more parameters)',
self.hook._log_params(tuple(range(20)), limit=10),
)
def test_log_params_list(self):
self.assertEqual('[]', self.hook._log_params([]))
self.assertEqual('[1]', self.hook._log_params([1]))
self.assertEqual('[1]', self.hook._log_params([1], limit=1))
self.assertEqual(
'[1 … and 1 more parameters]',
self.hook._log_params([1, 2], limit=1),
)
self.assertEqual(
'[1, 2]',
self.hook._log_params([1, 2]),
)
self.assertEqual(
'[0, 1, 2, 3, 4, 5, 6, 7, 8, 9 … and 1 more parameters]',
self.hook._log_params(list(range(11))),
)
self.assertEqual(
'[0, 1, 2, 3, 4, 5, 6, 7, 8, 9 … and 10 more parameters]',
self.hook._log_params(list(range(20))),
)
self.assertEqual(
'[0, 1, 2, 3, 4, 5, 6, 7, 8, 9 … and 10 more parameters]',
self.hook._log_params(list(range(20)), limit=10),
)
def test_log_query(self):
_ = self.hook.log # to initialize .log property
with mock.patch.object(self.hook, '_log') as patched:
self.hook._log_query('SELECT 1', {})
patched.info.assert_called_with('%s%s', 'SELECT 1', '')
self.hook._log_query('SELECT 1', {1: 1})
patched.info.assert_called_with('%s%s', 'SELECT 1', ' with {1: 1}')
self.hook._log_query('SELECT 1', [1])
patched.info.assert_called_with('%s%s', 'SELECT 1', ' with [1]')
class HookGetAsPandasTestCase(TestCase):
def test_get_pandas_df(self):
import pandas as pd
hook = LocalClickHouseHook()
for sql, expected in (
(
'''
SELECT
number,
concat('result: ', toString(number + number)) AS n_sum
FROM system.numbers
WHERE number < 4
LIMIT 3
''',
pd.DataFrame.from_dict({
'number': (0, 1, 2),
'n_sum': ('result: 0', 'result: 2', 'result: 4'),
})
),
# empty df
(
'''
SELECT
number,
concat('result: ', toString(number + number)) AS n_sum
FROM (
SELECT number
FROM system.numbers
WHERE number < 4
LIMIT 3
)
WHERE number > 4
''',
pd.DataFrame(columns=['number', 'n_sum'])
)
):
df = hook.get_pandas_df(sql)
self.assertListEqual(list(df.columns), list(expected.columns))
self.assertListEqual(
df.to_dict('records'),
expected.to_dict('records'),
)
|
<filename>ubiops_cli/src/deployments.py
import ubiops as api
import os
from time import sleep
from ubiops_cli.utils import init_client, read_yaml, write_yaml, zip_dir, get_current_project, \
set_dict_default, write_blob, default_version_zip_name, parse_json
from ubiops_cli.src.helpers.deployment_helpers import define_deployment_version, update_deployment_file, \
update_existing_deployment_version, DEPLOYMENT_VERSION_FIELDS
from ubiops_cli.src.helpers.helpers import get_label_filter
from ubiops_cli.src.helpers.formatting import print_list, print_item, format_yaml, format_requests_reference, \
format_requests_oneline, format_json, parse_datetime, format_datetime
from ubiops_cli.src.helpers.options import *
from ubiops_cli.constants import STATUS_UNAVAILABLE, STRUCTURED_TYPE, DEFAULT_IGNORE_FILE, UPDATE_TIME
LIST_ITEMS = ['last_updated', 'name', 'labels']
REQUEST_LIST_ITEMS = ['id', 'status', 'success', 'time_created']
@click.group(["deployments", "dpl"], short_help="Manage your deployments")
def commands():
"""
Manage your deployments.
"""
pass
@commands.command("list", short_help="List deployments")
@LABELS_FILTER
@LIST_FORMATS
def deployments_list(labels, format_):
"""
List all your deployments in your project.
The `<labels>` option can be used to filter on specific labels.
"""
label_filter = get_label_filter(labels)
project_name = get_current_project()
if project_name:
client = init_client()
deployments = client.deployments_list(project_name=project_name, labels=label_filter)
client.api_client.close()
print_list(items=deployments, attrs=LIST_ITEMS, sorting_col=1, fmt=format_)
@commands.command("get", short_help="Get details of a deployment")
@DEPLOYMENT_NAME_ARGUMENT
@DEPLOYMENT_YAML_OUTPUT
@QUIET
@GET_FORMATS
def deployments_get(deployment_name, output_path, quiet, format_):
"""
Get the deployment settings, like, input_type and output_type.
If you specify the `<output_path>` option, this location will be used to store the
deployment settings in a yaml file. You can either specify the `<output_path>` as file or
directory. If the specified `<output_path>` is a directory, the settings will be
stored in `deployment.yaml`.
"""
project_name = get_current_project(error=True)
client = init_client()
deployment = client.deployments_get(project_name=project_name, deployment_name=deployment_name)
client.api_client.close()
if output_path is not None:
dictionary = format_yaml(
item=deployment,
required_front=['name', 'description', 'labels', 'input_type', 'output_type'],
optional=['input_fields', 'output_fields'],
rename={'name': 'deployment_name', 'description': 'deployment_description', 'labels': 'deployment_labels'},
as_str=False
)
yaml_file = write_yaml(output_path, dictionary, default_file_name="deployment.yaml")
if not quiet:
click.echo('Deployment file is stored in: %s' % yaml_file)
else:
print_item(
item=deployment,
row_attrs=LIST_ITEMS,
rename={'name': 'deployment_name', 'description': 'deployment_description', 'labels': 'deployment_labels'},
fmt=format_
)
@commands.command("create", short_help="Create a deployment")
@DEPLOYMENT_NAME_OVERRULE
@DEPLOYMENT_YAML_FILE
@CREATE_FORMATS
def deployments_create(deployment_name, yaml_file, format_):
"""
Create a new deployment.
\b
Define the deployment parameters using a yaml file.
For example:
```
deployment_name: my-deployment-name
deployment_description: Deployment created via command line.
deployment_labels:
my-key-1: my-label-1
my-key-2: my-label-2
input_type: structured
input_fields:
- name: param1
data_type: int
- name: param2
data_type: string
output_type: plain
```
The deployment name can either be passed as argument or specified inside the yaml
file. If it is both passed as argument and specified inside the yaml file, the value
passed as argument is used.
Possible input/output types: [structured, plain]. Possible data_types: [blob, int,
string, double, bool, array_string, array_int, array_double].
"""
project_name = get_current_project(error=True)
yaml_content = read_yaml(yaml_file, required_fields=DEPLOYMENT_REQUIRED_FIELDS)
client = init_client()
assert 'deployment_name' in yaml_content or deployment_name, 'Please, specify the deployment name in either the ' \
'yaml file or as a command argument'
deployment_name = set_dict_default(deployment_name, yaml_content, 'deployment_name')
description = set_dict_default(None, yaml_content, 'deployment_description')
if 'input_fields' in yaml_content and isinstance(yaml_content['input_fields'], list):
input_fields = [api.DeploymentInputFieldCreate(name=item['name'], data_type=item['data_type'])
for item in yaml_content['input_fields']]
else:
input_fields = None
if 'output_fields' in yaml_content and isinstance(yaml_content['output_fields'], list):
output_fields = [api.DeploymentInputFieldCreate(name=item['name'], data_type=item['data_type'])
for item in yaml_content['output_fields']]
else:
output_fields = None
if 'deployment_labels' in yaml_content:
labels = yaml_content['deployment_labels']
else:
labels = {}
deployment = api.DeploymentCreate(
name=deployment_name,
description=description,
input_type=yaml_content['input_type'],
output_type=yaml_content['output_type'],
input_fields=input_fields,
output_fields=output_fields,
labels=labels
)
response = client.deployments_create(project_name=project_name, data=deployment)
client.api_client.close()
print_item(
item=response,
row_attrs=LIST_ITEMS,
rename={'name': 'deployment_name', 'description': 'deployment_description', 'labels': 'deployment_labels'},
fmt=format_
)
@commands.command("update", short_help="Update a deployment")
@DEPLOYMENT_NAME_ARGUMENT
@DEPLOYMENT_NAME_UPDATE
@VERSION_DEFAULT_UPDATE
@DEPLOYMENT_YAML_FILE_OPTIONAL
@QUIET
def deployments_update(deployment_name, new_name, default_version, yaml_file, quiet):
"""
Update a deployment.
If you only want to update the name of the deployment or the default deployment version,
use the options `<new_name>` and `<default_version>`.
If you want to update the deployment input/output fields, description or labels, please use a yaml file to define
the new deployment.
\b
For example:
```
deployment_description: Deployment created via command line.
deployment_labels:
my-key-1: my-label-1
my-key-2: my-label-2
input_fields:
- name: param1
data_type: int
- name: param2
data_type: string
output_fields:
- name: param1
data_type: int
- name: param2
data_type: string
```
"""
project_name = get_current_project(error=True)
yaml_content = read_yaml(yaml_file)
deployment = api.DeploymentUpdate(name=new_name, default_version=default_version)
if 'deployment_description' in yaml_content:
deployment.description = yaml_content['deployment_description']
if 'deployment_labels' in yaml_content:
deployment.labels = yaml_content['deployment_labels']
if 'input_fields' in yaml_content and isinstance(yaml_content['input_fields'], list):
deployment.input_fields = [
api.DeploymentInputFieldCreate(name=item['name'], data_type=item['data_type'])
for item in yaml_content['input_fields']
]
if 'output_fields' in yaml_content and isinstance(yaml_content['output_fields'], list):
deployment.output_fields = [
api.DeploymentInputFieldCreate(name=item['name'], data_type=item['data_type'])
for item in yaml_content['output_fields']
]
client = init_client()
client.deployments_update(project_name=project_name, deployment_name=deployment_name, data=deployment)
client.api_client.close()
if not quiet:
click.echo("Deployment was successfully updated")
@commands.command("delete", short_help="Delete a deployment")
@DEPLOYMENT_NAME_ARGUMENT
@ASSUME_YES
@QUIET
def deployments_delete(deployment_name, assume_yes, quiet):
"""
Delete a deployment.
"""
project_name = get_current_project(error=True)
if assume_yes or click.confirm("Are you sure you want to delete deployment <%s> "
"of project <%s>?" % (deployment_name, project_name)):
client = init_client()
client.deployments_delete(project_name=project_name, deployment_name=deployment_name)
client.api_client.close()
if not quiet:
click.echo("Deployment was successfully deleted")
@commands.command("package", short_help="Package deployment code")
@DEPLOYMENT_NAME_ZIP
@VERSION_NAME_ZIP
@PACKAGE_DIR
@ZIP_OUTPUT
@IGNORE_FILE
@ASSUME_YES
@QUIET
def deployments_package(deployment_name, version_name, directory, output_path, ignore_file, assume_yes, quiet):
"""
Package code to ZIP file which is ready to be deployed.
Please, specify the code `<directory>` that should be deployed. The files in this directory
will be zipped and uploaded. Subdirectories and files that shouldn't be contained in
the ZIP can be specified in an ignore file, which is by default '.ubiops-ignore'. The structure of
this file is assumed to be equal to the wellknown .gitignore file.
Use the `<output_path>` option to specify the output location of the zip file. If not specified,
the current directory will be used. If the `<output_path>` is a directory, the zip will be saved in
`[deployment_name]_[deployment_version]_[datetime.now()].zip`. Use the `<assume_yes>` option to overwrite
without confirmation if file specified in `<output_path>` already exists.
"""
ignore_file = DEFAULT_IGNORE_FILE if ignore_file is None else ignore_file
zip_path = zip_dir(
directory=directory,
output_path=output_path,
ignore_filename=ignore_file,
deployment_name=deployment_name,
version_name=version_name,
force=assume_yes
)
if not quiet:
click.echo("Created zip: %s" % zip_path)
@commands.command("upload", short_help="Upload a deployment package")
@DEPLOYMENT_NAME_ARGUMENT
@VERSION_NAME_OPTION
@ZIP_FILE
@OVERWRITE
@QUIET
def deployments_upload(deployment_name, version_name, zip_path, overwrite, quiet):
"""
Upload ZIP to a version of a deployment.
Please, specify the deployment package `<zip_path>` that should be uploaded.
Use the `<overwrite>` option to overwrite the deployment package on UbiOps if one already exists for this version.
"""
project_name = get_current_project(error=True)
client = init_client()
current_version = client.deployment_versions_get(
project_name=project_name, deployment_name=deployment_name, version=version_name
)
if overwrite or current_version.status == STATUS_UNAVAILABLE:
client.revisions_file_upload(
project_name=project_name, deployment_name=deployment_name, version=version_name, file=zip_path
)
client.api_client.close()
if not quiet:
click.echo("Deployment was successfully uploaded")
else:
client.api_client.close()
raise Exception("A deployment package already exists for this deployment version")
@commands.command("download", short_help="Download a deployment package")
@DEPLOYMENT_NAME_ARGUMENT
@VERSION_NAME_OPTION
@ZIP_OUTPUT
@QUIET
def deployments_download(deployment_name, version_name, output_path, quiet):
"""
Get the version of a deployment.
The `<output_path>` option will be used as output location of the zip file. If not specified,
the current directory will be used. If the `<output_path>` is a directory, the zip will be
saved in `[deployment_name]_[deployment_version]_[datetime.now()].zip`.
"""
project_name = get_current_project(error=True)
client = init_client()
version = client.deployment_versions_get(
project_name=project_name, deployment_name=deployment_name, version=version_name
)
if not version.active_revision:
raise Exception("No active revision available for this deployment")
with client.revisions_file_download(project_name=project_name, deployment_name=deployment_name,
version=version_name, revision_id=version.active_revision) as response:
filename = default_version_zip_name(deployment_name, version_name)
output_path = write_blob(response.read(), output_path, filename)
client.api_client.close()
if not quiet:
click.echo("Zip stored in: %s" % output_path)
@commands.command("deploy", short_help="Deploy a new version of a deployment")
@DEPLOYMENT_NAME_OVERRULE
@VERSION_NAME_OPTIONAL
@PACKAGE_DIR
@DEPLOYMENT_FILE
@IGNORE_FILE
@ZIP_OUTPUT_STORE
@VERSION_YAML_FILE
@LANGUAGE
@INSTANCE_TYPE
@MEMORY_ALLOCATION
@MIN_INSTANCES
@MAX_INSTANCES
@MAX_IDLE_TIME
@DEPLOYMENT_MODE
@RETENTION_MODE
@RETENTION_TIME
@VERSION_LABELS
@VERSION_DESCRIPTION
@OVERWRITE
@ASSUME_YES
@QUIET
def deployments_deploy(deployment_name, version_name, directory, output_path, yaml_file, overwrite, assume_yes, quiet,
**kwargs):
"""
Deploy a new version of a deployment.
Please, specify the code `<directory>` that should be deployed. The files in this directory
will be zipped and uploaded. Subdirectories and files that shouldn't be contained in the
ZIP can be specified in an ignore file, which is by default '.ubiops-ignore'. The structure of this
file is assumed to be equal to the wellknown '.gitignore' file.
If you want to store a local copy of the uploaded zip file, please use the `<output_path>` option.
The `<output_path>` option will be used as output location of the zip file. If the `<output_path>` is a
directory, the zip will be saved in `[deployment_name]_[deployment_version]_[datetime.now()].zip`. Use
the `<assume_yes>` option to overwrite without confirmation if file specified in `<output_path>` already exists.
Provide either deployment mode 'express' or 'batch', default is 'express'.
\b
It is possible to define the parameters using a yaml file.
For example:
```
deployment_name: my-deployment-name
version_name: my-deployment-version
version_description: Version created via command line.
version_labels:
my-key-1: my-label-1
my-key-2: my-label-2
language: python3.7
instance_type: 2048mb
minimum_instances: 0
maximum_instances: 1
maximum_idle_time: 300
request_retention_mode: none
request_retention_time: 604800
deployment_mode: express
```
Those parameters can also be provided as command options. If both a `<yaml_file>` is set and options are given,
the options defined by `<yaml_file>` will be overwritten by the specified command options. The deployment name can
either be passed as command argument or specified inside the yaml file using `<deployment_name>`.
It's not possible to update the programming language and deployment mode of an existing deployment version.
"""
if output_path is None:
store_zip = False
output_path = ''
else:
store_zip = True
project_name = get_current_project(error=True)
client = init_client()
yaml_content = read_yaml(yaml_file, required_fields=[])
assert 'deployment_name' in yaml_content or deployment_name, 'Please, specify the deployment name in either the ' \
'yaml file or as a command argument'
assert 'version_name' in yaml_content or version_name, 'Please, specify the version name in either the yaml ' \
'file or as a command option'
deployment_name = set_dict_default(deployment_name, yaml_content, 'deployment_name')
version_name = set_dict_default(version_name, yaml_content, 'version_name')
existing_version = None
if overwrite:
try:
existing_version = client.deployment_versions_get(
project_name=project_name, deployment_name=deployment_name, version=version_name
)
except api.exceptions.ApiException:
# Do nothing if version doesn't exist
pass
kwargs = define_deployment_version(
kwargs, yaml_content, extra_yaml_fields=['deployment_file', 'ignore_file']
)
kwargs['ignore_file'] = DEFAULT_IGNORE_FILE if kwargs['ignore_file'] is None else kwargs['ignore_file']
if not quiet and kwargs['memory_allocation'] and not kwargs['instance_type']:
click.secho(
"Deprecation warning: parameter 'memory_allocation' is deprecated, use 'instance_type' instead",
fg='red'
)
zip_path = zip_dir(
directory=directory,
output_path=output_path,
ignore_filename=kwargs['ignore_file'],
deployment_name=deployment_name,
version_name=version_name,
force=assume_yes
)
try:
has_uploaded_zips = False
has_changed_fields = False
if not (overwrite and existing_version):
version = api.DeploymentVersionCreate(
version=version_name, **{k: kwargs[k] for k in DEPLOYMENT_VERSION_FIELDS}
)
client.deployment_versions_create(project_name=project_name, deployment_name=deployment_name, data=version)
else:
revisions = client.revisions_list(
project_name=project_name, deployment_name=deployment_name, version=version_name
)
has_uploaded_zips = len(revisions) > 0
if overwrite and existing_version:
has_changed_fields = update_existing_deployment_version(
client, project_name, deployment_name, version_name, existing_version, kwargs
)
has_changed_env_vars = update_deployment_file(
client, project_name, deployment_name, version_name, kwargs['deployment_file']
)
if has_uploaded_zips and (has_changed_fields or has_changed_env_vars):
# Wait for changes being applied
click.echo("Waiting for changes to take effect... This takes %d seconds." % UPDATE_TIME)
sleep(UPDATE_TIME)
client.revisions_file_upload(
project_name=project_name, deployment_name=deployment_name, version=version_name, file=zip_path
)
client.api_client.close()
except Exception as e:
if os.path.isfile(zip_path) and not store_zip:
os.remove(zip_path)
client.api_client.close()
raise e
if os.path.isfile(zip_path):
if store_zip:
if not quiet:
click.echo("Created zip: %s" % zip_path)
else:
os.remove(zip_path)
if not quiet:
click.echo("Deployment was successfully deployed")
@commands.group("requests", short_help="Manage your deployment requests")
def requests():
"""
Manage your deployment requests.
"""
pass
@requests.command("create", short_help="Create deployment request")
@DEPLOYMENT_NAME_ARGUMENT
@VERSION_NAME_OPTIONAL
@REQUEST_BATCH
@REQUEST_DATA_MULTI
@REQUEST_TIMEOUT
@REQUESTS_FORMATS
def requests_create(deployment_name, version_name, batch, data, timeout, format_):
"""
Create a deployment request and retrieve request IDs to collect the results later.
Use the option `timeout` to specify the timeout of the request. The minimum value is 10 seconds. The maximum value
is 3600 (1 hour) for express deployments and 345600 (96 hours) for batch deployments. The default value is 300
(5 minutes) for express deployments and 14400 (4 hours) for batch deployments.
Use the version option to make a request to a specific deployment version:
`ubiops deployments requests create <my-deployment> -v <my-version> --data <input>`
If not specified, a request is made to the default version:
`ubiops deployments requests create <my-deployment> --data <input>`
Use `--batch` to make an asynchronous batch request:
`ubiops deployments requests create <my-deployment> --batch --data <input>`
Multiple data inputs can be specified at ones and send as batch by using the '--data' options multiple times:
`ubiops deployments requests create <my-deployment> --batch --data <input-1> --data <input-2> --data <input-3>`
For structured input, specify data input as JSON formatted string. For example:
`ubiops deployments requests create <my-deployment> --data "{\\"param1\\": 1, \\"param2\\": \\"two\\"}"`
"""
data = list(data)
project_name = get_current_project(error=True)
client = init_client()
deployment = client.deployments_get(project_name=project_name, deployment_name=deployment_name)
if deployment.input_type == STRUCTURED_TYPE:
input_data = []
for d in data:
input_data.append(parse_json(d))
else:
input_data = data
if version_name is not None:
if batch:
response = client.batch_deployment_version_requests_create(
project_name=project_name, deployment_name=deployment_name,
version=version_name, data=input_data, timeout=timeout
)
else:
response = [client.deployment_version_requests_create(
project_name=project_name, deployment_name=deployment_name,
version=version_name, data=input_data, timeout=timeout
)]
else:
if batch:
response = client.batch_deployment_requests_create(
project_name=project_name, deployment_name=deployment_name, data=input_data, timeout=timeout
)
else:
response = [client.deployment_requests_create(
project_name=project_name, deployment_name=deployment_name, data=input_data, timeout=timeout
)]
client.api_client.close()
if format_ == 'reference':
click.echo(format_requests_reference(response))
elif format_ == 'oneline':
click.echo(format_requests_oneline(response))
elif format_ == 'json':
click.echo(format_json(response))
else:
click.echo(format_requests_reference(response))
@requests.command("get", short_help="Get deployment request")
@DEPLOYMENT_NAME_ARGUMENT
@VERSION_NAME_OPTIONAL
@REQUEST_ID_MULTI
@REQUESTS_FORMATS
def requests_get(deployment_name, version_name, request_id, format_):
"""
Get one or more stored deployment requests.
Deployment requests are only stored for deployment versions with `request_retention_mode` 'full' or 'metadata'.
Use the version option to get a request for a specific deployment version.
If not specified, the request is retrieved for the default version.
Multiple request ids can be specified at ones by using the '-id' options multiple times:
`ubiops deployments requests get <my-deployment> -v <my-version> -id <id-1> -id <id-2> -id <id-3>`
"""
request_ids = list(request_id)
project_name = get_current_project(error=True)
client = init_client()
if version_name is not None:
response = client.deployment_version_requests_batch_get(
project_name=project_name, deployment_name=deployment_name, version=version_name, data=request_ids
)
else:
response = client.deployment_requests_batch_get(
project_name=project_name, deployment_name=deployment_name, data=request_ids
)
client.api_client.close()
if format_ == 'reference':
click.echo(format_requests_reference(response))
elif format_ == 'oneline':
click.echo(format_requests_oneline(response))
elif format_ == 'json':
click.echo(format_json(response))
else:
click.echo(format_requests_reference(response))
@requests.command("list", short_help="List deployment requests")
@DEPLOYMENT_NAME_ARGUMENT
@VERSION_NAME_OPTIONAL
@OFFSET
@REQUEST_LIMIT
@REQUEST_SORT
@REQUEST_FILTER_DEPLOYMENT_STATUS
@REQUEST_FILTER_SUCCESS
@REQUEST_FILTER_START_DATE
@REQUEST_FILTER_END_DATE
@REQUEST_FILTER_SEARCH_ID
@REQUEST_FILTER_IN_PIPELINE
@LIST_FORMATS
def requests_list(deployment_name, version_name, limit, format_, **kwargs):
"""
List stored deployment requests.
Deployment requests are only stored for deployment versions with `request_retention_mode` 'full' or 'metadata'.
Use the version option to list the requests for a specific deployment version.
If not specified, the requests are listed for the default version.
"""
project_name = get_current_project(error=True)
if 'start_date' in kwargs and kwargs['start_date']:
try:
kwargs['start_date'] = format_datetime(parse_datetime(kwargs['start_date']), fmt='%Y-%m-%dT%H:%M:%SZ')
except ValueError:
raise Exception("Failed to parse start_date. Please use iso-format, "
"for example, '2020-01-01T00:00:00.000000Z'")
if 'end_date' in kwargs and kwargs['end_date']:
try:
kwargs['end_date'] = format_datetime(parse_datetime(kwargs['end_date']), fmt='%Y-%m-%dT%H:%M:%SZ')
except ValueError:
raise Exception("Failed to parse end_date. Please use iso-format, "
"for example, '2020-01-01T00:00:00.000000Z'")
client = init_client()
if version_name is not None:
response = client.deployment_version_requests_list(
project_name=project_name, deployment_name=deployment_name, version=version_name, limit=limit, **kwargs
)
else:
response = client.deployment_requests_list(
project_name=project_name, deployment_name=deployment_name, limit=limit, **kwargs
)
client.api_client.close()
print_list(response, REQUEST_LIST_ITEMS, fmt=format_)
if len(response) == limit:
click.echo("\n(Use the <offset> and <limit> options to load more)")
@commands.command("request", short_help="[DEPRECATED] Create deployment direct requests")
@DEPLOYMENT_NAME_ARGUMENT
@VERSION_NAME_OPTIONAL
@REQUEST_DATA
@REQUEST_DEPLOYMENT_TIMEOUT_DEPRECATED
@REQUESTS_FORMATS
def deprecated_deployments_request(deployment_name, version_name, data, timeout, format_):
"""
[DEPRECATED] Create a deployment request and retrieve the result.
Use the version option to make a request to a specific deployment version:
`ubiops deployments request <my-deployment> -v <my-version> --data <input>`
If not specified, a request is made to the default version:
`ubiops deployments request <my-deployment> --data <input>`
For structured input, specify the data as JSON formatted string. For example:
`ubiops deployments request <my-deployment> --data "{\\"param1\\": 1, \\"param2\\": \\"two\\"}"`
"""
if format_ != 'json':
click.secho(
"Deprecation warning: 'request' is deprecated, use 'requests create' instead",
fg='red'
)
project_name = get_current_project(error=True)
client = init_client()
deployment = client.deployments_get(project_name=project_name, deployment_name=deployment_name)
if deployment.input_type == STRUCTURED_TYPE:
data = parse_json(data)
if version_name is not None:
response = client.deployment_version_requests_create(
project_name=project_name, deployment_name=deployment_name, version=version_name, data=data, timeout=timeout
)
else:
response = client.deployment_requests_create(
project_name=project_name, deployment_name=deployment_name, data=data, timeout=timeout
)
client.api_client.close()
if format_ == 'reference':
click.echo(format_requests_reference([response]))
elif format_ == 'oneline':
click.echo(format_requests_oneline([response]))
elif format_ == 'json':
click.echo(format_json(response))
else:
click.echo(format_requests_reference([response]))
@commands.group("batch_requests", short_help="[DEPRECATED] Manage your deployment batch requests")
def deprecated_batch_requests():
"""
[DEPRECATED] Manage your deployment batch requests.
"""
pass
@deprecated_batch_requests.command("create", short_help="[DEPRECATED] Create deployment batch request")
@DEPLOYMENT_NAME_ARGUMENT
@VERSION_NAME_OPTIONAL
@REQUEST_DATA_MULTI
@REQUEST_TIMEOUT
@REQUESTS_FORMATS
def deprecated_batch_requests_create(deployment_name, version_name, data, timeout, format_):
"""
[DEPRECATED] Create a deployment batch request and retrieve request IDs to collect the results later.
Deployment requests are only stored for deployment versions with `request_retention_mode` 'full' or 'metadata'.
Use the option `timeout` to specify the timeout of the request. The minimum value is 10 seconds. The maximum value
is 345600 (96 hours). The default value is 14400 (4 hours).
Use the version option to make a batch request to a specific deployment version:
`ubiops deployments batch_requests create <my-deployment> -v <my-version> --data <input>`
If not specified, a batch request is made to the default version:
`ubiops deployments batch_requests create <my-deployment> --data <input>`
Multiple data inputs can be specified at ones by using the '--data' options multiple times:
`ubiops deployments batch_requests create <my-deployment> --data <input-1> --data <input-2> --data <input-3>`
For structured input, specify each data input as JSON formatted string. For example:
`ubiops deployments batch_requests create <my-deployment> --data "{\\"param1\\": 1, \\"param2\\": \\"two\\"}"`
"""
if format_ != 'json':
click.secho(
"Deprecation warning: 'batch_requests create' is deprecated, use 'requests create --batch' instead",
fg='red'
)
data = list(data)
project_name = get_current_project(error=True)
client = init_client()
deployment = client.deployments_get(project_name=project_name, deployment_name=deployment_name)
if deployment.input_type == STRUCTURED_TYPE:
input_data = []
for d in data:
input_data.append(parse_json(d))
else:
input_data = data
if version_name is not None:
response = client.batch_deployment_version_requests_create(
project_name=project_name, deployment_name=deployment_name, version=version_name, data=input_data,
timeout=timeout
)
else:
response = client.batch_deployment_requests_create(
project_name=project_name, deployment_name=deployment_name, data=input_data, timeout=timeout
)
client.api_client.close()
if format_ == 'reference':
click.echo(format_requests_reference(response))
elif format_ == 'oneline':
click.echo(format_requests_oneline(response))
elif format_ == 'json':
click.echo(format_json(response))
else:
click.echo(format_requests_reference(response))
@deprecated_batch_requests.command("get", short_help="[DEPRECATED] Get deployment batch request")
@DEPLOYMENT_NAME_ARGUMENT
@VERSION_NAME_OPTIONAL
@REQUEST_ID_MULTI
@REQUESTS_FORMATS
def deprecated_batch_requests_get(deployment_name, version_name, request_id, format_):
"""
[DEPRECATED] Get the results of one or more deployment batch requests.
Deployment requests are only stored for deployment versions with `request_retention_mode` 'full' or 'metadata'.
Use the version option to get a batch request for a specific deployment version.
If not specified, the batch request is retrieved for the default version.
Multiple request ids can be specified at ones by using the '-id' options multiple times:
`ubiops deployments batch_requests get <my-deployment> -v <my-version> -id <id-1> -id <id-2> -id <id-3>`
"""
if format_ != 'json':
click.secho(
"Deprecation warning: 'batch_requests get' is deprecated, use 'requests get' instead",
fg='red'
)
request_ids = list(request_id)
project_name = get_current_project(error=True)
client = init_client()
if version_name is not None:
response = client.deployment_version_requests_batch_get(
project_name=project_name, deployment_name=deployment_name, version=version_name, data=request_ids
)
else:
response = client.deployment_requests_batch_get(
project_name=project_name, deployment_name=deployment_name, data=request_ids
)
client.api_client.close()
if format_ == 'reference':
click.echo(format_requests_reference(response))
elif format_ == 'oneline':
click.echo(format_requests_oneline(response))
elif format_ == 'json':
click.echo(format_json(response))
else:
click.echo(format_requests_reference(response))
@deprecated_batch_requests.command("list", short_help="[DEPRECATED] List deployment batch requests")
@DEPLOYMENT_NAME_ARGUMENT
@VERSION_NAME_OPTIONAL
@OFFSET
@REQUEST_LIMIT
@LIST_FORMATS
def deprecated_batch_requests_list(deployment_name, version_name, offset, limit, format_):
"""
[DEPRECATED] List deployment batch requests.
Deployment requests are only stored for deployment versions with `request_retention_mode` 'full' or 'metadata'.
Use the version option to list the batch requests for a specific deployment version.
If not specified, the batch requests are listed for the default version.
"""
if format_ != 'json':
click.secho(
"Deprecation warning: 'batch_requests list' is deprecated, use 'requests list' instead",
fg='red'
)
project_name = get_current_project(error=True)
client = init_client()
if version_name is not None:
response = client.deployment_version_requests_list(
project_name=project_name, deployment_name=deployment_name, version=version_name, limit=limit, offset=offset
)
else:
response = client.deployment_requests_list(
project_name=project_name, deployment_name=deployment_name, limit=limit, offset=offset
)
client.api_client.close()
print_list(response, REQUEST_LIST_ITEMS, fmt=format_)
if len(response) == limit:
click.echo("\n(Use the <offset> and <limit> options to load more)")
|
from restclients_core import models
# Create your models here.
class Status(models.Model):
STATUS_TYPE_BOOKED_SPACE = -14
STATUS_TYPE_WAIT = -13
STATUS_TYPE_CANCEL = -12
STATUS_TYPE_INFO_ONLY = -11
STATUS_TYPE_CHOICES = (
(STATUS_TYPE_BOOKED_SPACE, 'Booked Space'),
(STATUS_TYPE_WAIT, 'Wait'),
(STATUS_TYPE_CANCEL, 'Cancel'),
(STATUS_TYPE_INFO_ONLY, 'Info Only'),
)
description = models.CharField(max_length=30)
id = models.PositiveIntegerField(primary_key=True)
status_type_id = models.SmallIntegerField(choices=STATUS_TYPE_CHOICES)
display_on_web = models.BooleanField(default=None)
def __str__(self):
return self.description
class EventType(models.Model):
description = models.CharField(max_length=30)
id = models.PositiveIntegerField(primary_key=True)
display_on_web = models.BooleanField(default=None)
def __str__(self):
return self.description
class Building(models.Model):
description = models.CharField(max_length=50)
building_code = models.CharField(max_length=20, null=True)
id = models.PositiveIntegerField(primary_key=True)
time_zone_description = models.CharField(max_length=255)
time_zone_abbreviation = models.CharField(max_length=10)
def __str__(self):
return self.description
class Room(models.Model):
room = models.CharField(max_length=20)
description = models.CharField(max_length=50)
dv_building = models.CharField(max_length=50)
active = models.BooleanField()
building = models.ForeignKey(Building, on_delete=models.PROTECT)
id = models.PositiveIntegerField(primary_key=True)
external_reference = models.CharField(max_length=255, null=True)
def __str__(self):
return self.description
class Booking(models.Model):
booking_date = models.DateField()
room_description = models.CharField(max_length=75)
time_event_start = models.DateTimeField()
time_event_end = models.DateTimeField()
group_name = models.CharField(max_length=50)
event_name = models.CharField(max_length=255)
reservation_id = models.PositiveIntegerField()
event_type_description = models.CharField(max_length=30)
contact = models.CharField(max_length=113)
id = models.PositiveIntegerField(primary_key=True)
building = models.ForeignKey(Building, on_delete=models.PROTECT)
time_booking_start = models.DateTimeField()
time_booking_end = models.DateTimeField()
time_zone = models.CharField(max_length=10)
building_code = models.CharField(max_length=20)
dv_building = models.CharField(max_length=50)
room_code = models.CharField(max_length=20)
dv_room = models.CharField(max_length=50)
room = models.ForeignKey(Room, on_delete=models.PROTECT)
status = models.ForeignKey(Status, on_delete=models.PROTECT)
status_type_id = models.SmallIntegerField(
choices=Status.STATUS_TYPE_CHOICES)
date_added = models.DateTimeField(null=True)
date_changed = models.DateTimeField(null=True)
contact_email_address = models.CharField(max_length=75, null=True)
class ServiceOrderDetail(models.Model):
booking_date = models.DateField()
service_order_start_time = models.TimeField(null=True)
service_order_end_time = models.TimeField(null=True)
resource_description = models.CharField(max_length=50)
resource_external_reference = models.CharField(max_length=255, blank=True)
service_order_id = models.PositiveIntegerField()
booking = models.ForeignKey(Booking, on_delete=models.PROTECT)
|
<gh_stars>1-10
from Simulation import MMCC
from Event import Event
from pylab import *
from math import factorial
def blockingProbability(num_servers: int, arrival_rate: float, departure_rate: float) -> float:
""" Static function to be able to analytical determine the expected blocking
probability for a simulation of MMCC given its parameters.
Params:
- num_servers: The number of clients in the simulation
- arrival_rate: The exponential mean of arrival for the clients
- departure_rate: the exponential mean of the departure of the clients
Return:
- float: Blocking probability of the system
"""
numerator = double(((arrival_rate/departure_rate)**num_servers)/double(factorial(num_servers)))
demoninator = sum( [((arrival_rate/departure_rate)**k)/factorial(k) for k in range(1,num_servers)])
return numerator/demoninator
def serverUtilisation(arrival_rate: float, departure_rate: float) -> float:
""" Calculate the expected server Utilisation of the system with the given
parameters.
Params:
- arrival_rate: The exponential mean of arrival for the clients
- departure_rate: the exponential mean of the departure of the clients
Returns:
- float: Value representing the server utilisation value
"""
return arrival_rate/departure_rate
if __name__ == "__main__":
matplotlib.pyplot.show() # Allow plotting package to display figures
machine = MMCC() # Generate simulation machine
# Initialise investigation parameters
servers = 16 # Number of servers in the simulation
arrival_range = logspace(-2,-1,50) # Range or arrival values to test
departure_rate = 0.01 # Departure rate of simulation events
clients = 10000 # Number of client arrivals
# Data structures to hold obversations
prob_blocking = [] # Probability of blocking evaluated from the simulation
pred_blocking = [] # Predicted probability of blocking from an analytical model
utilisation = [] # Recorded utilisation value from the simulation
pred_utilisation = [] # Predicted utilisation value from an analytical model
# Begin investigation
index = 0
for i, arrival_rate in enumerate(arrival_range):
Event.arrivalRate = arrival_rate # Set the arrival rate value
# run the simulation
machine.run( total_servers = servers, total_arrival = clients )
# Collect the statistical information from the simulation
probability = machine.blockingProbability()
# Record simulation that yielded a blocking probability less than 0.01
if probability < 0.01: index = i
# Add information into data structures
prob_blocking.append(probability)
pred_blocking.append(blockingProbability(servers, arrival_rate, departure_rate))
utilisation.append(machine.serverUtilisation())
pred_utilisation.append(serverUtilisation(arrival_rate, departure_rate))
# Display best simulation values
print("Values for re-run on best arrival rate:")
Event.arrivalRate = arrival_range[index]
machine.run(total_servers = servers, total_arrival = clients)
machine.report()
print()
# Statistical information about the progress of the investigation
difference = [i-j for i, j in zip(prob_blocking, pred_blocking)]
print("For blocking rate lower than 0.01:")
print("\tArrival rate:", arrival_range[index])
print("\tSimulation blocking probability:", prob_blocking[index])
print("\tSimulations variance from predictions:", sum(difference)/len(difference) )
# Plot the findings of the investigation for blocking probability
figure()
plot(arrival_range, prob_blocking, "b*", label="Simulation blocking percentage")
plot(arrival_range, pred_blocking, "r--", label="Analytic blocking percentage")
plot([0.01, arrival_range[index]], [prob_blocking[index]]*2, "y--", label="Setup with probability under 0.01")
plot([arrival_range[index]]*2, [-0.0005, prob_blocking[index]], "y--")
legend()
ylabel("Blocking Probability")
xlabel("Arrival rate")
xlim(0.01, 0.1)
ylim(-0.0005,0.025)
show(block=True)
# Plot the findings of the investigation for server utility
figure()
plot(arrival_range, utilisation, "b*", label="Utilisation of servers")
plot(arrival_range, pred_utilisation, "r--", label="Predicted utilisation of servers")
ylabel("Server Utility")
xlabel("Arrival rate")
legend()
xlim(0.01, 0.1)
show(block=True) |
<reponame>onlyyou2023/buildtools
#!/usr/bin/env python
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that files include headers from allowed directories.
Checks DEPS files in the source tree for rules, and applies those rules to
"#include" and "import" directives in the .cpp, .java, and .proto source files.
Any source file including something not permitted by the DEPS files will fail.
See README.md for a detailed description of the DEPS format.
"""
import os
import optparse
import re
import sys
import proto_checker
import cpp_checker
import java_checker
import results
from builddeps import DepsBuilder
from rules import Rule, Rules
def _IsTestFile(filename):
"""Does a rudimentary check to try to skip test files; this could be
improved but is good enough for now.
"""
return re.match('(test|mock|dummy)_.*|.*_[a-z]*test\.(cc|mm|java)', filename)
class DepsChecker(DepsBuilder):
"""Parses include_rules from DEPS files and verifies files in the
source tree against them.
"""
def __init__(self,
base_directory=None,
extra_repos=[],
verbose=False,
being_tested=False,
ignore_temp_rules=False,
skip_tests=False,
resolve_dotdot=True):
"""Creates a new DepsChecker.
Args:
base_directory: OS-compatible path to root of checkout, e.g. C:\chr\src.
verbose: Set to true for debug output.
being_tested: Set to true to ignore the DEPS file at tools/checkdeps/DEPS.
ignore_temp_rules: Ignore rules that start with Rule.TEMP_ALLOW ("!").
"""
DepsBuilder.__init__(
self, base_directory, extra_repos, verbose, being_tested,
ignore_temp_rules)
self._skip_tests = skip_tests
self._resolve_dotdot = resolve_dotdot
self.results_formatter = results.NormalResultsFormatter(verbose)
def Report(self):
"""Prints a report of results, and returns an exit code for the process."""
if self.results_formatter.GetResults():
self.results_formatter.PrintResults()
return 1
print '\nSUCCESS\n'
return 0
def CheckDirectory(self, start_dir):
"""Checks all relevant source files in the specified directory and
its subdirectories for compliance with DEPS rules throughout the
tree (starting at |self.base_directory|). |start_dir| must be a
subdirectory of |self.base_directory|.
On completion, self.results_formatter has the results of
processing, and calling Report() will print a report of results.
"""
java = java_checker.JavaChecker(self.base_directory, self.verbose)
cpp = cpp_checker.CppChecker(
self.verbose, self._resolve_dotdot, self.base_directory)
proto = proto_checker.ProtoChecker(
self.verbose, self._resolve_dotdot, self.base_directory)
checkers = dict(
(extension, checker)
for checker in [java, cpp, proto] for extension in checker.EXTENSIONS)
for rules, file_paths in self.GetAllRulesAndFiles(start_dir):
for full_name in file_paths:
if self._skip_tests and _IsTestFile(os.path.basename(full_name)):
continue
file_extension = os.path.splitext(full_name)[1]
if not file_extension in checkers:
continue
checker = checkers[file_extension]
file_status = checker.CheckFile(rules, full_name)
if file_status.HasViolations():
self.results_formatter.AddError(file_status)
def CheckIncludesAndImports(self, added_lines, checker):
"""Check new import/#include statements added in the change
being presubmit checked.
Args:
added_lines: ((file_path, (changed_line, changed_line, ...), ...)
checker: CppChecker/JavaChecker/ProtoChecker checker instance
Return:
A list of tuples, (bad_file_path, rule_type, rule_description)
where rule_type is one of Rule.DISALLOW or Rule.TEMP_ALLOW and
rule_description is human-readable. Empty if no problems.
"""
problems = []
for file_path, changed_lines in added_lines:
if not checker.ShouldCheck(file_path):
continue
rules_for_file = self.GetDirectoryRules(os.path.dirname(file_path))
if not rules_for_file:
continue
for line in changed_lines:
is_include, violation = checker.CheckLine(
rules_for_file, line, file_path, True)
if not violation:
continue
rule_type = violation.violated_rule.allow
if rule_type == Rule.ALLOW:
continue
violation_text = results.NormalResultsFormatter.FormatViolation(
violation, self.verbose)
problems.append((file_path, rule_type, violation_text))
return problems
def CheckAddedCppIncludes(self, added_includes):
"""This is used from PRESUBMIT.py to check new #include statements added in
the change being presubmit checked.
Args:
added_includes: ((file_path, (include_line, include_line, ...), ...)
Return:
A list of tuples, (bad_file_path, rule_type, rule_description)
where rule_type is one of Rule.DISALLOW or Rule.TEMP_ALLOW and
rule_description is human-readable. Empty if no problems.
"""
return self.CheckIncludesAndImports(
added_includes, cpp_checker.CppChecker(self.verbose))
def CheckAddedJavaImports(self, added_imports):
"""This is used from PRESUBMIT.py to check new import statements added in
the change being presubmit checked.
Args:
added_imports: ((file_path, (import_line, import_line, ...), ...)
Return:
A list of tuples, (bad_file_path, rule_type, rule_description)
where rule_type is one of Rule.DISALLOW or Rule.TEMP_ALLOW and
rule_description is human-readable. Empty if no problems.
"""
return self.CheckIncludesAndImports(
added_imports,
java_checker.JavaChecker(self.base_directory, self.verbose,
added_imports))
def CheckAddedProtoImports(self, added_imports):
"""This is used from PRESUBMIT.py to check new #import statements added in
the change being presubmit checked.
Args:
added_imports : ((file_path, (import_line, import_line, ...), ...)
Return:
A list of tuples, (bad_file_path, rule_type, rule_description)
where rule_type is one of Rule.DISALLOW or Rule.TEMP_ALLOW and
rule_description is human-readable. Empty if no problems.
"""
return self.CheckIncludesAndImports(
added_imports, proto_checker.ProtoChecker(
verbose=self.verbose, root_dir=self.base_directory))
def PrintUsage():
print """Usage: python checkdeps.py [--root <root>] [tocheck]
--root ROOT Specifies the repository root. This defaults to "../../.."
relative to the script file. This will be correct given the
normal location of the script in "<root>/tools/checkdeps".
--(others) There are a few lesser-used options; run with --help to show them.
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python checkdeps.py
python checkdeps.py --root c:\\source chrome"""
def main():
option_parser = optparse.OptionParser()
option_parser.add_option(
'', '--root',
default='', dest='base_directory',
help='Specifies the repository root. This defaults '
'to "../../.." relative to the script file, which '
'will normally be the repository root.')
option_parser.add_option(
'', '--extra-repos',
action='append', dest='extra_repos', default=[],
help='Specifies extra repositories relative to root repository.')
option_parser.add_option(
'', '--ignore-temp-rules',
action='store_true', dest='ignore_temp_rules', default=False,
help='Ignore !-prefixed (temporary) rules.')
option_parser.add_option(
'', '--generate-temp-rules',
action='store_true', dest='generate_temp_rules', default=False,
help='Print rules to temporarily allow files that fail '
'dependency checking.')
option_parser.add_option(
'', '--count-violations',
action='store_true', dest='count_violations', default=False,
help='Count #includes in violation of intended rules.')
option_parser.add_option(
'', '--skip-tests',
action='store_true', dest='skip_tests', default=False,
help='Skip checking test files (best effort).')
option_parser.add_option(
'-v', '--verbose',
action='store_true', default=False,
help='Print debug logging')
option_parser.add_option(
'', '--json',
help='Path to JSON output file')
option_parser.add_option(
'', '--no-resolve-dotdot',
action='store_false', dest='resolve_dotdot', default=True,
help='resolve leading ../ in include directive paths relative '
'to the file perfoming the inclusion.')
options, args = option_parser.parse_args()
deps_checker = DepsChecker(options.base_directory,
extra_repos=options.extra_repos,
verbose=options.verbose,
ignore_temp_rules=options.ignore_temp_rules,
skip_tests=options.skip_tests,
resolve_dotdot=options.resolve_dotdot)
base_directory = deps_checker.base_directory # Default if needed, normalized
# Figure out which directory we have to check.
start_dir = base_directory
if len(args) == 1:
# Directory specified. Start here. It's supposed to be relative to the
# base directory.
start_dir = os.path.abspath(os.path.join(base_directory, args[0]))
elif len(args) >= 2 or (options.generate_temp_rules and
options.count_violations):
# More than one argument, or incompatible flags, we don't handle this.
PrintUsage()
return 1
if not start_dir.startswith(deps_checker.base_directory):
print 'Directory to check must be a subdirectory of the base directory,'
print 'but %s is not a subdirectory of %s' % (start_dir, base_directory)
return 1
print 'Using base directory:', base_directory
print 'Checking:', start_dir
if options.generate_temp_rules:
deps_checker.results_formatter = results.TemporaryRulesFormatter()
elif options.count_violations:
deps_checker.results_formatter = results.CountViolationsFormatter()
if options.json:
deps_checker.results_formatter = results.JSONResultsFormatter(
options.json, deps_checker.results_formatter)
deps_checker.CheckDirectory(start_dir)
return deps_checker.Report()
if '__main__' == __name__:
sys.exit(main())
|
from datetime import datetime
from datetime import timedelta
from yelp_beans.matching.match import generate_meetings
from yelp_beans.matching.match_utils import get_counts_for_pairs
from yelp_beans.matching.match_utils import get_previous_meetings
from yelp_beans.matching.match_utils import save_meetings
from yelp_beans.models import Meeting
from yelp_beans.models import MeetingParticipant
from yelp_beans.models import MeetingRequest
from yelp_beans.models import MeetingSpec
from yelp_beans.models import MeetingSubscription
from yelp_beans.models import SubscriptionDateTime
from yelp_beans.models import User
from yelp_beans.models import UserSubscriptionPreferences
MEETING_COOLDOWN_WEEKS = 10
def test_pair_to_counts():
pairs = [('user1', 'user2'), ('user1', 'user2'), ('user2', 'user3')]
counts = get_counts_for_pairs(pairs)
assert (counts[('user2', 'user3')] == 1)
assert(counts[('user1', 'user2')] == 2)
def test_generate_save_meetings(session, subscription):
pref_1 = SubscriptionDateTime(datetime=datetime.now() - timedelta(weeks=MEETING_COOLDOWN_WEEKS - 1))
subscription = MeetingSubscription(title='all engineering weekly', datetime=[pref_1])
user_pref = UserSubscriptionPreferences(preference=pref_1, subscription=subscription)
user1 = User(email='<EMAIL>', meta_data={'department': 'dept'}, subscription_preferences=[user_pref])
user2 = User(email='<EMAIL>', meta_data={'department': 'dept2'}, subscription_preferences=[user_pref])
meeting_spec = MeetingSpec(meeting_subscription=subscription, datetime=pref_1.datetime)
mr1 = MeetingRequest(user=user1, meeting_spec=meeting_spec)
mr2 = MeetingRequest(user=user2, meeting_spec=meeting_spec)
session.add(pref_1)
session.add(subscription)
session.add(user_pref)
session.add(user1)
session.add(user2)
session.add(meeting_spec)
session.add(mr1)
session.add(mr2)
session.commit()
matches, unmatched = generate_meetings([user1, user2], meeting_spec)
save_meetings(matches, meeting_spec)
assert unmatched == []
participants = [
participant.user
for participant in MeetingParticipant.query.all()
]
assert participants == [user1, user2]
def test_get_previous_meetings(session):
pref_1 = SubscriptionDateTime(datetime=datetime.now() - timedelta(weeks=MEETING_COOLDOWN_WEEKS - 1))
subscription = MeetingSubscription(title='all engineering weekly', datetime=[pref_1])
user_pref = UserSubscriptionPreferences(preference=pref_1, subscription=subscription)
user1 = User(email='<EMAIL>', meta_data={'department': 'dept'}, subscription_preferences=[user_pref])
user2 = User(email='<EMAIL>', meta_data={'department': 'dept2'}, subscription_preferences=[user_pref])
meeting_spec = MeetingSpec(meeting_subscription=subscription, datetime=pref_1.datetime)
meeting = Meeting(meeting_spec=meeting_spec, cancelled=False)
mp1 = MeetingParticipant(meeting=meeting, user=user2)
mp2 = MeetingParticipant(meeting=meeting, user=user1)
session.add(pref_1)
session.add(subscription)
session.add(user_pref)
session.add(user1)
session.add(user2)
session.add(meeting_spec)
session.add(meeting)
session.add(mp1)
session.add(mp2)
session.commit()
assert get_previous_meetings(subscription) == set([(user1.id, user2.id)])
def test_get_previous_meetings_multi_subscription(session):
pref_1 = SubscriptionDateTime(datetime=datetime.now() - timedelta(weeks=MEETING_COOLDOWN_WEEKS - 1))
subscription1 = MeetingSubscription(title='all engineering weekly', datetime=[pref_1])
subscription2 = MeetingSubscription(title='all sales weekly', datetime=[pref_1])
user_pref1 = UserSubscriptionPreferences(preference=pref_1, subscription=subscription1)
user_pref2 = UserSubscriptionPreferences(preference=pref_1, subscription=subscription2)
user1 = User(email='<EMAIL>', meta_data={'department': 'dept'}, subscription_preferences=[user_pref1, user_pref2])
user2 = User(email='<EMAIL>', meta_data={'department': 'dept2'}, subscription_preferences=[user_pref1, user_pref2])
meeting_spec1 = MeetingSpec(meeting_subscription=subscription1, datetime=pref_1.datetime)
meeting = Meeting(meeting_spec=meeting_spec1, cancelled=False)
mp1 = MeetingParticipant(meeting=meeting, user=user2)
mp2 = MeetingParticipant(meeting=meeting, user=user1)
session.add(pref_1)
session.add(subscription1)
session.add(subscription2)
session.add(user_pref1)
session.add(user_pref2)
session.add(user1)
session.add(user2)
session.add(meeting_spec1)
session.add(meeting)
session.add(mp1)
session.add(mp2)
session.commit()
assert get_previous_meetings(subscription1) == set([(user1.id, user2.id)])
assert get_previous_meetings(subscription2) == set([])
def test_get_previous_multi_meetings(session):
pref_1 = SubscriptionDateTime(datetime=datetime.now() - timedelta(weeks=MEETING_COOLDOWN_WEEKS - 1))
subscription = MeetingSubscription(title='all engineering weekly', datetime=[pref_1])
user_pref = UserSubscriptionPreferences(preference=pref_1, subscription=subscription)
user1 = User(email='<EMAIL>', meta_data={'department': 'dept'}, subscription_preferences=[user_pref])
user2 = User(email='<EMAIL>', meta_data={'department': 'dept2'}, subscription_preferences=[user_pref])
meeting_spec = MeetingSpec(meeting_subscription=subscription, datetime=pref_1.datetime)
meeting1 = Meeting(meeting_spec=meeting_spec, cancelled=False)
meeting2 = Meeting(meeting_spec=meeting_spec, cancelled=False)
mp1 = MeetingParticipant(meeting=meeting1, user=user2)
mp2 = MeetingParticipant(meeting=meeting1, user=user1)
mp3 = MeetingParticipant(meeting=meeting2, user=user2)
mp4 = MeetingParticipant(meeting=meeting2, user=user1)
session.add(pref_1)
session.add(subscription)
session.add(user_pref)
session.add(user1)
session.add(user2)
session.add(meeting_spec)
session.add(meeting1)
session.add(meeting2)
session.add(mp1)
session.add(mp2)
session.add(mp3)
session.add(mp4)
session.commit()
assert get_previous_meetings(subscription) == set([(user1.id, user2.id), (user1.id, user2.id)])
def test_get_previous_meetings_no_specs(database_no_specs, session):
pref_1 = SubscriptionDateTime(datetime=datetime.now() - timedelta(weeks=MEETING_COOLDOWN_WEEKS + 1))
subscription = MeetingSubscription(title='all engineering weekly', datetime=[pref_1])
user_pref = UserSubscriptionPreferences(preference=pref_1, subscription=subscription)
user1 = User(email='<EMAIL>', meta_data={'department': 'dept'}, subscription_preferences=[user_pref])
user2 = User(email='<EMAIL>', meta_data={'department': 'dept2'}, subscription_preferences=[user_pref])
meeting_spec = MeetingSpec(meeting_subscription=subscription, datetime=pref_1.datetime)
meeting = Meeting(meeting_spec=meeting_spec, cancelled=False)
mp1 = MeetingParticipant(meeting=meeting, user=user2)
mp2 = MeetingParticipant(meeting=meeting, user=user1)
session.add(pref_1)
session.add(subscription)
session.add(user_pref)
session.add(user1)
session.add(user2)
session.add(meeting_spec)
session.add(meeting)
session.add(mp1)
session.add(mp2)
session.commit()
assert get_previous_meetings(subscription) == set([])
|
<filename>utils/optimize_thresholds.py
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import h5py
import math
import time
import logging
import sklearn
import pickle
from sklearn import metrics
import matplotlib.pyplot as plt
from autoth.core import HyperParamsOptimizer
from utilities import (get_filename, create_folder,
frame_prediction_to_event_prediction, write_submission, official_evaluate)
from calculate_metrics import calculate_precision_recall_f1, calculate_metrics
import config
class AudioTaggingScoreCalculator(object):
def __init__(self, prediction_path):
"""Used to calculate score (such as F1) given prediction, target and hyper parameters.
"""
self.output_dict = pickle.load(open(prediction_path, 'rb'))
def __call__(self, params):
"""Use hyper parameters to threshold prediction to obtain output.
Then, the scores are calculated between output and target.
"""
(precision, recall, f1) = calculate_precision_recall_f1(
self.output_dict['target'], self.output_dict['clipwise_output'],
thresholds=params)
return f1
class SoundEventDetectionScoreCalculator(object):
def __init__(self, prediction_path, reference_csv_path, submission_path, classes_num):
"""Used to calculate score (such as F1) given prediction, target and hyper parameters.
"""
self.output_dict = pickle.load(open(prediction_path, 'rb'))
self.reference_csv_path = reference_csv_path
self.submission_path = submission_path
self.classes_num = classes_num
def params_dict_to_params_list(self, sed_params_dict):
params = sed_params_dict['audio_tagging_threshold'] + \
sed_params_dict['sed_high_threshold'] + \
sed_params_dict['sed_low_threshold']
return params
def params_list_to_params_dict(self, params):
sed_params_dict = {
'audio_tagging_threshold': params[0 : self.classes_num],
'sed_high_threshold': params[self.classes_num : 2 * self.classes_num],
'sed_low_threshold': params[2 * self.classes_num :],
'n_smooth': 10,
'n_salt': 10
}
return sed_params_dict
def __call__(self, params):
"""Use hyper parameters to threshold prediction to obtain output.
Then, the scores are calculated between output and target.
"""
params_dict = self.params_list_to_params_dict(params)
# params_dict['n_smooth'] = 1
# params_dict['n_salt'] = 1
predict_event_list = frame_prediction_to_event_prediction(
self.output_dict, params_dict)
# Write predicted events to submission file
write_submission(predict_event_list, self.submission_path)
# SED with official tool
results = official_evaluate(self.reference_csv_path, self.submission_path)
f1 = results['overall']['f_measure']['f_measure']
return f1
def optimize_at_thresholds(args):
"""Calculate audio tagging metrics with optimized thresholds.
Args:
dataset_dir: str
workspace: str
filename: str
holdout_fold: '1'
model_type: str, e.g., 'Cnn_9layers_Gru_FrameAtt'
loss_type: str, e.g., 'clip_bce'
augmentation: str, e.g., 'mixup'
batch_size: int
iteration: int
"""
# Arugments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
filename = args.filename
holdout_fold = args.holdout_fold
model_type = args.model_type
loss_type = args.loss_type
augmentation = args.augmentation
batch_size = args.batch_size
iteration = args.iteration
data_type = 'test'
classes_num = config.classes_num
# Paths
if data_type == 'test':
reference_csv_path = os.path.join(dataset_dir, 'metadata',
'groundtruth_strong_label_testing_set.csv')
prediction_path = os.path.join(workspace, 'predictions',
'{}'.format(filename), 'holdout_fold={}'.format(holdout_fold),
'model_type={}'.format(model_type), 'loss_type={}'.format(loss_type),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'{}_iterations.prediction.{}.pkl'.format(iteration, data_type))
tmp_submission_path = os.path.join(workspace, '_tmp_submission',
'{}'.format(filename), 'holdout_fold={}'.format(holdout_fold),
'model_type={}'.format(model_type), 'loss_type={}'.format(loss_type),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'_submission.csv')
opt_thresholds_path = os.path.join(workspace, 'opt_thresholds',
'{}'.format(filename), 'holdout_fold={}'.format(holdout_fold),
'model_type={}'.format(model_type), 'loss_type={}'.format(loss_type),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'{}_iterations.at.{}.pkl'.format(iteration, data_type))
create_folder(os.path.dirname(opt_thresholds_path))
# Score calculator
score_calculator = AudioTaggingScoreCalculator(prediction_path)
# Thresholds optimizer
hyper_params_opt = HyperParamsOptimizer(score_calculator, learning_rate=1e-2, epochs=100)
# Initialize thresholds
init_params = [0.3] * classes_num
score_no_opt = score_calculator(init_params)
# Optimize thresholds
(opt_score, opt_params) = hyper_params_opt.do_optimize(init_params=init_params)
print('\n------ Optimized thresholds ------')
print(np.around(opt_params, decimals=4))
print('\n------ Without optimized thresholds ------')
print('Score: {:.3f}'.format(score_no_opt))
print('\n------ With optimized thresholds ------')
print('Score: {:.3f}'.format(opt_score))
# Write out optimized thresholds
pickle.dump(opt_params, open(opt_thresholds_path, 'wb'))
print('\nSave optimized thresholds to {}'.format(opt_thresholds_path))
def optimize_sed_thresholds(args):
"""Calculate sound event detection metrics with optimized thresholds.
Args:
dataset_dir: str
workspace: str
filename: str
holdout_fold: '1'
model_type: str, e.g., 'Cnn_9layers_Gru_FrameAtt'
loss_type: str, e.g., 'clip_bce'
augmentation: str, e.g., 'mixup'
batch_size: int
iteration: int
"""
# Arugments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
filename = args.filename
holdout_fold = args.holdout_fold
model_type = args.model_type
loss_type = args.loss_type
augmentation = args.augmentation
batch_size = args.batch_size
iteration = args.iteration
data_type = 'test'
classes_num = config.classes_num
# Paths
if data_type == 'test':
reference_csv_path = os.path.join(dataset_dir, 'metadata',
'groundtruth_strong_label_testing_set.csv')
prediction_path = os.path.join(workspace, 'predictions',
'{}'.format(filename), 'holdout_fold={}'.format(holdout_fold),
'model_type={}'.format(model_type), 'loss_type={}'.format(loss_type),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'{}_iterations.prediction.{}.pkl'.format(iteration, data_type))
tmp_submission_path = os.path.join(workspace, '_tmp_submission',
'{}'.format(filename), 'holdout_fold={}'.format(holdout_fold),
'model_type={}'.format(model_type), 'loss_type={}'.format(loss_type),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'_submission.csv')
opt_thresholds_path = os.path.join(workspace, 'opt_thresholds',
'{}'.format(filename), 'holdout_fold={}'.format(holdout_fold),
'model_type={}'.format(model_type), 'loss_type={}'.format(loss_type),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'{}_iterations.sed.{}.pkl'.format(iteration, data_type))
create_folder(os.path.dirname(opt_thresholds_path))
# Score calculator
score_calculator = SoundEventDetectionScoreCalculator(
prediction_path=prediction_path, reference_csv_path=reference_csv_path,
submission_path=tmp_submission_path, classes_num=classes_num)
# Thresholds optimizer
hyper_params_opt = HyperParamsOptimizer(score_calculator,
learning_rate=1e-2, epochs=50, step=0.02, max_search=5)
# Initialize thresholds
sed_params_dict = {
'audio_tagging_threshold': [0.5] * classes_num,
'sed_high_threshold': [0.3] * classes_num,
'sed_low_threshold': [0.1] * classes_num}
init_params = score_calculator.params_dict_to_params_list(sed_params_dict)
score_no_opt = score_calculator(init_params)
# Optimize thresholds
(opt_score, opt_params) = hyper_params_opt.do_optimize(init_params=init_params)
opt_params = score_calculator.params_list_to_params_dict(opt_params)
print('\n------ Optimized thresholds ------')
print(opt_params)
print('\n------ Without optimized thresholds ------')
print('Score: {:.3f}'.format(score_no_opt))
print('\n------ With optimized thresholds ------')
print('Score: {:.3f}'.format(opt_score))
# Write out optimized thresholds
pickle.dump(opt_params, open(opt_thresholds_path, 'wb'))
print('\nSave optimized thresholds to {}'.format(opt_thresholds_path))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
parser_optimize_at_thresholds = subparsers.add_parser('optimize_at_thresholds')
parser_optimize_at_thresholds.add_argument('--dataset_dir', type=str, required=True)
parser_optimize_at_thresholds.add_argument('--workspace', type=str, required=True)
parser_optimize_at_thresholds.add_argument('--filename', type=str, required=True)
parser_optimize_at_thresholds.add_argument('--holdout_fold', type=str, choices=['1', 'none'], required=True)
parser_optimize_at_thresholds.add_argument('--model_type', type=str, required=True)
parser_optimize_at_thresholds.add_argument('--loss_type', type=str, required=True)
parser_optimize_at_thresholds.add_argument('--augmentation', type=str, choices=['none', 'mixup'], required=True)
parser_optimize_at_thresholds.add_argument('--batch_size', type=int, required=True)
parser_optimize_at_thresholds.add_argument('--iteration', type=int, required=True)
parser_optimize_sed_thresholds = subparsers.add_parser('optimize_sed_thresholds')
parser_optimize_sed_thresholds.add_argument('--dataset_dir', type=str, required=True)
parser_optimize_sed_thresholds.add_argument('--workspace', type=str, required=True)
parser_optimize_sed_thresholds.add_argument('--filename', type=str, required=True)
parser_optimize_sed_thresholds.add_argument('--holdout_fold', type=str, choices=['1', 'none'], required=True)
parser_optimize_sed_thresholds.add_argument('--model_type', type=str, required=True)
parser_optimize_sed_thresholds.add_argument('--loss_type', type=str, required=True)
parser_optimize_sed_thresholds.add_argument('--augmentation', type=str, choices=['none', 'mixup'], required=True)
parser_optimize_sed_thresholds.add_argument('--batch_size', type=int, required=True)
parser_optimize_sed_thresholds.add_argument('--iteration', type=int, required=True)
args = parser.parse_args()
if args.mode == 'optimize_at_thresholds':
optimize_at_thresholds(args)
elif args.mode == 'optimize_sed_thresholds':
optimize_sed_thresholds(args)
else:
raise Exception('Error argument!')
|
<gh_stars>0
from __future__ import unicode_literals
import os
import sys
import unittest
import yaml
from aws_okta_keyman.config import Config
if sys.version_info[0] < 3: # Python 2
import mock
else:
from unittest import mock
class ConfigTest(unittest.TestCase):
def test_full_app_url(self):
config = Config(['aws_okta_keyman.py'])
config.org = 'example'
config.appid = 'some/thing'
ret = config.full_app_url()
self.assertEqual(ret, 'https://example.okta.com/some/thing')
def test_full_app_url_preview(self):
config = Config(['aws_okta_keyman.py'])
config.org = 'example'
config.appid = 'some/thing'
config.oktapreview = True
ret = config.full_app_url()
self.assertEqual(ret, 'https://example.oktapreview.com/some/thing')
@mock.patch('aws_okta_keyman.config.sys.exit')
@mock.patch('aws_okta_keyman.config.Config.interactive_config')
def test_start_interactive_config(self, int_mock, exit_mock):
Config(['aws_okta_keyman.py', 'config'])
assert int_mock.called
assert exit_mock.called
@mock.patch('aws_okta_keyman.config.Config.parse_config')
@mock.patch('aws_okta_keyman.config.os.path.isfile')
def test_set_appid_from_account_id(self, isfile_mock, parse_mock):
isfile_mock.return_value = True
parse_mock.return_value = None
config = Config(['aws_okta_keyman.py'])
config.accounts = [{'appid': 'A123'}]
config.set_appid_from_account_id(0)
self.assertEqual(config.appid, 'A123')
def test_validate_good_with_accounts(self):
config = Config(['aws_okta_keyman.py'])
config.accounts = [{'appid': 'A123'}]
config.org = 'example'
config.username = '<EMAIL>'
self.assertEqual(config.validate(), None)
def test_validate_good_with_appid(self):
config = Config(['aws_okta_keyman.py'])
config.appid = 'A123'
config.org = 'example'
config.username = '<EMAIL>'
self.assertEqual(config.validate(), None)
def test_validate_missing_org(self):
config = Config(['aws_okta_keyman.py'])
config.username = '<EMAIL>'
with self.assertRaises(ValueError):
config.validate()
@mock.patch('aws_okta_keyman.config.getpass')
def test_validate_automatic_username_from_none(self, getpass_mock):
getpass_mock.getuser.return_value = 'user'
config = Config(['aws_okta_keyman.py'])
config.org = 'example'
config.validate()
self.assertEqual(config.username, 'user')
@mock.patch('aws_okta_keyman.config.getpass')
def test_validate_automatic_username_from_partial_config(self,
getpass_mock):
getpass_mock.getuser.return_value = 'user'
config = Config(['aws_okta_keyman.py'])
config.accounts = [{'appid': 'A123'}]
config.org = 'example'
config.username = 'automatic-username'
config.validate()
self.assertEqual(config.username, 'user')
@mock.patch('aws_okta_keyman.config.getpass')
def test_validate_automatic_username_from_full_config(self, getpass_mock):
getpass_mock.getuser.return_value = 'user'
config = Config(['aws_okta_keyman.py'])
config.accounts = [{'appid': 'A123'}]
config.org = 'example'
config.username = '<EMAIL>'
config.validate()
self.assertEqual(config.username, '<EMAIL>')
def test_validate_short_duration(self):
config = Config(['aws_okta_keyman.py'])
config.org = 'example'
config.duration = 1
with self.assertRaises(ValueError):
config.validate()
def test_validate_long_duration(self):
config = Config(['aws_okta_keyman.py'])
config.org = 'example'
config.duration = 100000000
with self.assertRaises(ValueError):
config.validate()
@mock.patch('aws_okta_keyman.config.Config.validate')
@mock.patch('aws_okta_keyman.config.Config.parse_args')
@mock.patch('aws_okta_keyman.config.os.path.isfile')
def test_get_config_args_only(self, isfile_mock, parse_mock, valid_mock):
isfile_mock.return_value = False
parse_mock.return_value = None
valid_mock.return_value = None
argv = [
'aws_okta_keyman.py',
'-a', 'app/id',
'-o', 'foobar',
'-u', 'test'
]
config = Config(argv)
config.get_config()
parse_mock.assert_has_calls([
mock.call(),
])
@mock.patch('aws_okta_keyman.config.os.path.expanduser')
@mock.patch('aws_okta_keyman.config.Config.parse_config')
@mock.patch('aws_okta_keyman.config.Config.validate')
@mock.patch('aws_okta_keyman.config.Config.parse_args')
@mock.patch('aws_okta_keyman.config.os.path.isfile')
def test_get_config_auto_config_only(self, isfile_mock, parse_mock,
valid_mock, config_mock,
expuser_mock):
isfile_mock.return_value = True
parse_mock.return_value = None
valid_mock.return_value = None
config_mock.return_value = None
expuser_mock.return_value = ''
config = Config(['aws_okta_keyman.py'])
config.get_config()
parse_mock.assert_has_calls([
mock.call(main_required=False),
])
config_mock.assert_has_calls([
mock.call('/.config/aws_okta_keyman.yml'),
])
@mock.patch('aws_okta_keyman.config.Config.parse_args')
@mock.patch('aws_okta_keyman.config.os.path.expanduser')
@mock.patch('aws_okta_keyman.config.Config.parse_config')
@mock.patch('aws_okta_keyman.config.Config.validate')
@mock.patch('aws_okta_keyman.config.os.path.isfile')
def test_get_config_specified_config_only(self, isfile_mock, valid_mock,
config_mock, expuser_mock,
_parse_mock):
isfile_mock.return_value = True
valid_mock.return_value = None
config_mock.return_value = None
expuser_mock.return_value = ''
config = Config(['aws_okta_keyman.py', '-c'])
config.config = '/.config/aws_okta_keyman.yml'
config.get_config()
config_mock.assert_has_calls([
mock.call('/.config/aws_okta_keyman.yml'),
])
@mock.patch('aws_okta_keyman.config.Config.write_config')
@mock.patch('aws_okta_keyman.config.os.path.expanduser')
@mock.patch('aws_okta_keyman.config.Config.validate')
@mock.patch('aws_okta_keyman.config.Config.parse_args')
@mock.patch('aws_okta_keyman.config.os.path.isfile')
def test_get_config_write_mixed_config(self, isfile_mock, _parse_mock,
valid_mock, expuser_mock,
write_mock):
isfile_mock.return_value = True
valid_mock.return_value = None
write_mock.return_value = None
expuser_mock.return_value = ''
config = Config(['aws_okta_keyman.py', '-w'])
config.get_config()
config.write = './.config/aws_okta_keyman.yml'
self.assertEqual(config.write, './.config/aws_okta_keyman.yml')
write_mock.assert_has_calls([
mock.call(),
])
def test_parse_args_no_req_main(self):
argv = [
'aws_okta_keyman.py',
'-D'
]
config = Config(argv)
config.parse_args(main_required=False)
# Should succeed without throwing due to missing args
self.assertEqual(config.debug, True)
@mock.patch('argparse.ArgumentParser._print_message', mock.MagicMock())
def test_parse_args_req_main_missing(self):
argv = [
'aws_okta_keyman.py',
'-D'
]
config = Config(argv)
# Main required but not passed, should raise
with self.assertRaises(SystemExit):
config.parse_args(main_required=True)
def test_parse_args_req_main_present(self):
argv = [
'aws_okta_keyman.py',
'-a', 'app/id',
'-o', 'foobar',
'-u', 'test'
]
config = Config(argv)
config.parse_args(main_required=True)
# Should succeed without throwing due to missing args
self.assertEqual(config.appid, 'app/id')
self.assertEqual(config.org, 'foobar')
self.assertEqual(config.username, 'test')
def test_parse_args_verify_all_parsed_short(self):
argv = [
'aws_okta_keyman.py',
'-a', 'app/id',
'-o', 'foobar',
'-u', 'test',
'-n', 'profilename',
'-c', 'config_file_path',
'-w', 'write_file_path',
'-d', 'push',
'-D', '-r', '-p'
]
config = Config(argv)
config.parse_args(main_required=True)
self.assertEqual(config.appid, 'app/id')
self.assertEqual(config.org, 'foobar')
self.assertEqual(config.username, 'test')
self.assertEqual(config.name, 'profilename')
self.assertEqual(config.config, 'config_file_path')
self.assertEqual(config.writepath, 'write_file_path')
self.assertEqual(config.duo_factor, 'push')
self.assertEqual(config.debug, True)
self.assertEqual(config.reup, True)
self.assertEqual(config.oktapreview, True)
def test_parse_args_verify_all_parsed_full(self):
argv = [
'aws_okta_keyman.py',
'--appid', 'app/id',
'--org', 'foobar',
'--username', 'test',
'--name', 'profilename',
'--config', 'config_file_path',
'--writepath', 'write_file_path',
'--duo_factor', 'push',
'--debug', '--reup'
]
config = Config(argv)
config.parse_args(main_required=True)
self.assertEqual(config.appid, 'app/id')
self.assertEqual(config.org, 'foobar')
self.assertEqual(config.username, 'test')
self.assertEqual(config.name, 'profilename')
self.assertEqual(config.config, 'config_file_path')
self.assertEqual(config.writepath, 'write_file_path')
self.assertEqual(config.duo_factor, 'push')
self.assertEqual(config.debug, True)
self.assertEqual(config.reup, True)
@mock.patch('aws_okta_keyman.config.os.path.isfile')
def test_read_yaml(self, isfile_mock):
isfile_mock.return_value = True
yml = ("username: <EMAIL>\n"
"org: example\n"
"appid: app/id\n")
m = mock.mock_open(read_data=yml)
with mock.patch('aws_okta_keyman.config.open', m):
ret = Config.read_yaml('./.config/aws_okta_keyman.yml')
expected = {
'username': '<EMAIL>', 'org': 'example', 'appid': 'app/id'
}
self.assertEqual(ret, expected)
@mock.patch('aws_okta_keyman.config.os.path.isfile')
def test_read_yaml_file_missing_no_raise(self, isfile_mock):
isfile_mock.return_value = False
ret = Config.read_yaml('./.config/aws_okta_keyman.yml')
self.assertEqual(ret, {})
@mock.patch('aws_okta_keyman.config.os.path.isfile')
def test_read_yaml_file_missing_with_raise(self, isfile_mock):
isfile_mock.return_value = False
with self.assertRaises(IOError):
Config.read_yaml('./.config/aws_okta_keyman.yml',
raise_on_error=True)
@mock.patch('aws_okta_keyman.config.os.path.isfile')
def test_read_yaml_parse_error_no_raise(self, isfile_mock):
isfile_mock.return_value = True
yml = ("username: <EMAIL>\n"
"org: example\n"
"- appid: foo\n")
m = mock.mock_open(read_data=yml)
with mock.patch('aws_okta_keyman.config.open', m):
ret = Config.read_yaml('./.config/aws_okta_keyman.yml')
self.assertEqual(ret, {})
@mock.patch('aws_okta_keyman.config.os.path.isfile')
def test_read_yaml_parse_error_with_raise(self, isfile_mock):
isfile_mock.return_value = True
yml = ("username: <EMAIL>\n"
"org: example\n"
"- appid: foo\n")
m = mock.mock_open(read_data=yml)
with mock.patch('aws_okta_keyman.config.open', m):
with self.assertRaises(yaml.parser.ParserError):
Config.read_yaml('./.config/aws_okta_keyman.yml',
raise_on_error=True)
@mock.patch('aws_okta_keyman.config.os.path.isfile')
def test_read_yaml_scan_error_no_raise(self, isfile_mock):
isfile_mock.return_value = True
yml = ("username: <EMAIL>\n"
"org: example\n"
"appid app/id\n")
m = mock.mock_open(read_data=yml)
with mock.patch('aws_okta_keyman.config.open', m):
ret = Config.read_yaml('./.config/aws_okta_keyman.yml')
self.assertEqual(ret, {})
@mock.patch('aws_okta_keyman.config.os.path.isfile')
def test_read_yaml_scan_error_with_raise(self, isfile_mock):
isfile_mock.return_value = True
yml = ("username: <EMAIL>.<EMAIL>\n"
"org: example\n"
"appid app/id\n")
m = mock.mock_open(read_data=yml)
with mock.patch('aws_okta_keyman.config.open', m):
with self.assertRaises(yaml.scanner.ScannerError):
Config.read_yaml('./.config/aws_okta_keyman.yml',
raise_on_error=True)
def test_parse_config(self):
config = Config(['aws_okta_keyman.py'])
config.read_yaml = mock.MagicMock()
config.read_yaml.return_value = {
'username': '<EMAIL>',
'org': 'example',
'appid': 'app/id',
}
config.parse_config('./.config/aws_okta_keyman.yml')
self.assertEqual(config.appid, 'app/id')
self.assertEqual(config.org, 'example')
self.assertEqual(config.username, '<EMAIL>')
def test_parse_config_args_preferred(self):
config = Config(['aws_okta_keyman.py'])
config.appid = 'mysupercoolapp/id'
config.org = 'foobar'
config.username = 'test'
config.read_yaml = mock.MagicMock()
config.read_yaml.return_value = {
'username': '<EMAIL>',
'org': 'example',
'appid': 'app/id',
}
config.parse_config('./.config/aws_okta_keyman.yml')
# Make sure we're getting the args not the config values
self.assertEqual(config.appid, 'mysupercoolapp/id')
self.assertEqual(config.org, 'foobar')
self.assertEqual(config.username, 'test')
def test_write_config(self):
config = Config(['aws_okta_keyman.py'])
config.clean_config_for_write = mock.MagicMock()
config_clean = {
'accounts': [{'name': 'Dev', 'appid': 'A123/123'}],
'org': 'example',
'reup': None,
'username': '<EMAIL>',
}
config.clean_config_for_write.return_value = config_clean
config.writepath = './.config/aws_okta_keyman.yml'
config.username = '<EMAIL>'
config.read_yaml = mock.MagicMock()
config.read_yaml.return_value = {
'username': '<EMAIL>',
'org': 'example',
'appid': 'app/id',
}
m = mock.mock_open()
with mock.patch('aws_okta_keyman.config.open', m):
config.write_config()
m.assert_has_calls([
mock.call(u'./.config/aws_okta_keyman.yml', 'w'),
])
m.assert_has_calls([
mock.call().write('accounts'),
mock.call().write(':'),
mock.call().write('\n'),
mock.call().write('-'),
mock.call().write(' '),
mock.call().write('appid'),
mock.call().write(':'),
mock.call().write(' '),
mock.call().write('A123/123'),
mock.call().write('\n'),
mock.call().write(' '),
mock.call().write('name'),
mock.call().write(':'),
mock.call().write(' '),
mock.call().write('Dev'),
mock.call().write('\n'),
mock.call().write('org'),
mock.call().write(':'),
mock.call().write(' '),
mock.call().write('example'),
mock.call().write('\n'),
mock.call().write('reup'),
mock.call().write(':'),
mock.call().write(' '),
mock.call().write('null'),
mock.call().write('\n'),
mock.call().write('username'),
mock.call().write(':'),
mock.call().write(' '),
mock.call().write('<EMAIL>'),
mock.call().write('\n'),
mock.call().flush(),
mock.call().flush(),
mock.call().__exit__(None, None, None)
])
def test_write_config_new_file(self):
config = Config(['aws_okta_keyman.py'])
config.clean_config_for_write = mock.MagicMock()
config_clean = {
'org': 'example',
'reup': None,
'username': '<EMAIL>',
}
config.clean_config_for_write.return_value = config_clean
config.writepath = './.config/aws_okta_keyman.yml'
config.username = '<EMAIL>'
config.appid = 'app/id'
config.org = 'example'
config.read_yaml = mock.MagicMock()
config.read_yaml.return_value = {}
m = mock.mock_open()
with mock.patch('aws_okta_keyman.config.open', m):
config.write_config()
m.assert_has_calls([
mock.call().write('org'),
mock.call().write(':'),
mock.call().write(' '),
mock.call().write('example'),
mock.call().write('\n'),
mock.call().write('reup'),
mock.call().write(':'),
mock.call().write(' '),
mock.call().write('null'),
mock.call().write('\n'),
mock.call().write('username'),
mock.call().write(':'),
mock.call().write(' '),
mock.call().write('<EMAIL>'),
mock.call().write('\n'),
mock.call().flush(),
mock.call().flush(),
mock.call().__exit__(None, None, None)
])
def test_write_config_path_expansion(self):
config = Config(['aws_okta_keyman.py'])
config.clean_config_for_write = mock.MagicMock()
config.clean_config_for_write.return_value = {}
config.writepath = '~/.config/aws_okta_keyman.yml'
config.username = '<EMAIL>'
config.appid = 'app/id'
config.org = 'example'
config.read_yaml = mock.MagicMock()
config.read_yaml.return_value = {}
expected_path = os.path.expanduser(config.writepath)
m = mock.mock_open()
with mock.patch('aws_okta_keyman.config.open', m):
config.write_config()
m.assert_has_calls([mock.call(expected_path, 'w')])
@mock.patch('aws_okta_keyman.config.os')
def test_write_config_path_create_when_missing(self, os_mock):
config = Config(['aws_okta_keyman.py'])
config.clean_config_for_write = mock.MagicMock()
config.clean_config_for_write.return_value = {}
config.read_yaml = mock.MagicMock()
config.read_yaml.return_value = {}
folderpath = '/home/user/.config/'
os_mock.path.dirname.return_value = folderpath
os_mock.path.exists.return_value = False
m = mock.mock_open()
with mock.patch('aws_okta_keyman.config.open', m):
config.write_config()
os_mock.assert_has_calls([
mock.call.makedirs(folderpath)
])
def test_clean_config_for_write(self):
config_in = {
'name': 'foo',
'appid': 'foo',
'argv': 'foo',
'writepath': 'foo',
'config': 'foo',
'debug': 'foo',
'oktapreview': 'foo',
'accounts': None,
'shouldstillbehere': 'woohoo',
'password_reset': True,
'command': None
}
config_out = {
'shouldstillbehere': 'woohoo'
}
ret = Config.clean_config_for_write(config_in)
self.assertEqual(ret, config_out)
def test_clean_config_for_write_with_accounts(self):
accounts = [
{'name': 'Account 1', 'appid': 'ABC123'},
{'name': 'Account 2', 'appid': 'XYZ890'}
]
config_in = {
'name': 'foo',
'appid': 'foo',
'argv': 'foo',
'writepath': 'foo',
'config': 'foo',
'debug': 'foo',
'oktapreview': 'foo',
'accounts': accounts,
'shouldstillbehere': 'woohoo',
'password_reset': True,
'command': None
}
config_out = {
'accounts': accounts,
'shouldstillbehere': 'woohoo'
}
ret = Config.clean_config_for_write(config_in)
self.assertEqual(ret, config_out)
@mock.patch('aws_okta_keyman.config.input')
def test_user_input(self, input_mock):
input_mock.return_value = ' test '
self.assertEqual('test', Config.user_input('input test'))
@mock.patch('aws_okta_keyman.config.getpass')
@mock.patch('aws_okta_keyman.config.input')
def test_interactive_config(self, input_mock, getpass_mock):
input_mock.side_effect = ['org', 'user', 'appid', 'test', '']
getpass_mock.return_value = 'fakeuser'
config = Config(['aws_okta_keyman.py'])
config.write_config = mock.MagicMock()
config.interactive_config()
self.assertEqual(config.org, 'org')
self.assertEqual(config.username, 'user')
self.assertEqual(config.accounts, [{'name': 'test', 'appid': 'appid'}])
config.write_config.assert_has_calls([mock.call()])
@mock.patch('aws_okta_keyman.config.getpass')
@mock.patch('aws_okta_keyman.config.input')
def test_interactive_config_auto_user(self, input_mock, getpass_mock):
input_mock.side_effect = ['org', '', 'appid', 'test', '']
getpass_mock.return_value = 'fakeuser'
config = Config(['aws_okta_keyman.py'])
config.write_config = mock.MagicMock()
config.interactive_config()
self.assertEqual(config.username, 'automatic-username')
@mock.patch('aws_okta_keyman.config.getpass')
@mock.patch('aws_okta_keyman.config.input')
def test_interactive_config_auto_account(self, input_mock, _getpass_mock):
input_mock.side_effect = ['org', 'user', '']
config = Config(['aws_okta_keyman.py'])
config.write_config = mock.MagicMock()
config.interactive_config()
self.assertEqual(config.accounts, None)
@mock.patch('aws_okta_keyman.config.getpass')
@mock.patch('aws_okta_keyman.config.input')
def test_interactive_config_keyboardexit(self, input_mock, getpass_mock):
input_mock.side_effect = ['org', 'user', KeyboardInterrupt]
getpass_mock.return_value = 'fakeuser'
config = Config(['aws_okta_keyman.py'])
config.write_config = mock.MagicMock()
ret = config.interactive_config()
self.assertEqual(ret, None)
assert not config.write_config.called
|
<filename>eyelab/main.py<gh_stars>0
import logging
import os
import sys
from functools import partial
from pathlib import Path
import eyepy as ep
import requests
from packaging import version
from PySide6 import QtWidgets
from PySide6.QtCore import QCoreApplication, QSize
from PySide6.QtGui import QIcon
from PySide6.QtWidgets import QFileDialog, QMessageBox
import eyelab as el
from eyelab.commands import get_undo_stack
from eyelab.config import EYELAB_FOLDER
from eyelab.dialogs.help import (
AreaAnnotationHelp,
IntroductionHelp,
LayerAnnotationHelp,
ShortcutHelp,
)
from eyelab.views.ui.ui_main_window import Ui_MainWindow
from eyelab.views.workspace import Workspace
class eyelab(QtWidgets.QMainWindow, Ui_MainWindow):
"""Create the main window that stores all of the widgets necessary for the application."""
def __init__(self, parent=None):
"""Initialize the components of the main window."""
super().__init__(parent)
self.setupUi(self)
self.save_path = None
self.action_import_vol.triggered.connect(
partial(self.import_data, method=ep.import_heyex_vol, format="file")
)
self.action_import_hexml.triggered.connect(
partial(self.import_data, method=ep.import_heyex_xml, format="folder")
)
self.action_import_retouch.triggered.connect(
partial(self.import_data, method=ep.import_retouch, format="folder")
)
self.action_import_duke.triggered.connect(
partial(self.import_data, method=ep.import_duke_mat, format="file")
)
self.action_import_bsfolder.triggered.connect(
partial(self.import_data, method=ep.import_bscan_folder, format="folder")
)
# self.action_save_annotations.triggered.connect(self.save_annotations)
# self.action_save_annotations_as.triggered.connect(
# partial(self.save_annotations, save_as=True)
# )
# self.action_load_annotations.triggered.connect(self.load_annotations)
self.action_open.triggered.connect(self.load)
self.action_save.triggered.connect(self.save)
self.action_save_as.triggered.connect(partial(self.save, save_as=True))
self.action_shortcut_sheet.triggered.connect(
lambda: self.open_help("shortcuts")
)
self.action_area_annotation_guide.triggered.connect(
lambda: self.open_help("area_annotation")
)
self.action_layer_annotation_guide.triggered.connect(
lambda: self.open_help("layer_annotation")
)
self.action_registration_guide.triggered.connect(
lambda: self.open_help("registration")
)
self.action_introduction.triggered.connect(
lambda: self.open_help("introduction")
)
self.workspace = Workspace(parent=self)
self.workspace.undo_stack.cleanChanged.connect(self.toggle_save)
self._edit_menu_setup()
self.setCentralWidget(self.workspace)
self.check_version()
# hide options for loading/saving annotations only
self.menuAnnotations.deleteLater()
# from eyepy.data import load
# ev = load("drusen_patient")
# ev.add_voxel_annotation(
# ep.drusen(ev.layers["RPE"], ev.layers["BM"], ev.shape), name="Drusen"
# )
# self.workspace.set_data(ev)
# self.statusBar().showMessage("Ready")
def toggle_save(self, clean):
if clean:
self.action_save.setEnabled(False)
self.action_save_as.setEnabled(False)
self.setWindowTitle(self.windowTitle().rstrip("*"))
else:
self.action_save.setEnabled(True)
self.action_save_as.setEnabled(True)
self.setWindowTitle(self.windowTitle().rstrip("*") + "*")
def _edit_menu_setup(self):
self.action_undo = self.workspace.undo_stack.createUndoAction(self)
self.action_redo = self.workspace.undo_stack.createRedoAction(self)
undo_icon = QIcon()
undo_icon.addFile(
":/icons/icons/baseline-undo-24px.svg", QSize(), QIcon.Normal, QIcon.Off
)
self.action_undo.setIcon(undo_icon)
redo_icon = QIcon()
redo_icon.addFile(
":/icons/icons/baseline-redo-24px.svg", QSize(), QIcon.Normal, QIcon.Off
)
self.action_redo.setIcon(redo_icon)
self.action_undo.setShortcut(
QCoreApplication.translate("MainWindow", "Ctrl+Z", None)
)
self.action_redo.setShortcut(
QCoreApplication.translate("MainWindow", "Ctrl+Y", None)
)
self.menuEdit.addAction(self.action_undo)
self.menuEdit.addAction(self.action_redo)
def check_version(self):
latest_url = "https://github.com/MedVisBonn/eyelab/releases/latest"
current_version = f"v{el.__version__}"
try:
latest_version = (requests.get(latest_url).url).split("/")[-1]
except requests.ConnectionError:
return
if version.parse(current_version) < version.parse(latest_version):
msgBox = QMessageBox()
msgBox.setText("A new version of EyeLab is available.")
msgBox.setInformativeText(
f"You are using version {current_version}. The latest version is {latest_version} and can be found <a href='{latest_url}'>here</a>."
)
msgBox.setStandardButtons(QMessageBox.Ok)
msgBox.setDefaultButton(QMessageBox.Ok)
ret = msgBox.exec()
@property
def start_dir(self):
if self.save_path:
return str(Path(self.save_path).parent)
else:
return str(Path.home())
def import_data(self, method, format):
if not self.workspace.data is None:
message = "The import replaces all data you have in your workspace. Do you want to proceed?"
ret = QMessageBox.question(
self, "EyeLab", message, QMessageBox.Ok | QMessageBox.Cancel
)
if ret == QMessageBox.Cancel:
return
if format == "file":
path = QFileDialog.getOpenFileName(dir=self.start_dir)[0]
elif format == "folder":
path = QFileDialog.getExistingDirectory(dir=self.start_dir)
self.save_path = None
self.statusBar().showMessage("Import Data...")
self.workspace.set_data(method(path))
self.statusBar().showMessage("Done")
def open_help(self, topic):
if topic == "introduction":
dialog = IntroductionHelp(self)
elif topic == "shortcuts":
dialog = ShortcutHelp(self)
elif topic == "area_annotation":
dialog = AreaAnnotationHelp(self)
elif topic == "layer_annotation":
dialog = LayerAnnotationHelp(self)
# elif topic == "registration":
# dialog = RegistrationHelp(self)
else:
raise ValueError("topic not available")
if dialog.exec() == QtWidgets.QDialog.Rejected:
pass
def get_save_location(self):
if "PYCHARM_HOSTED" in os.environ:
options = QFileDialog.DontUseNativeDialog
else:
options = QFileDialog.Options()
save_path, filter = QFileDialog.getSaveFileName(
parent=self, caption="Save", filter="Eye files (*.eye)", options=options
)
if save_path:
if not save_path.endswith(".eye"):
save_path = save_path + ".eye"
self.save_path = save_path
return self.save_path
def save(self, save_as=False):
if self.save_path is None or save_as is True:
self.get_save_location()
if self.save_path:
self.statusBar().showMessage("Saving...")
self.workspace.data.save(self.save_path)
get_undo_stack("main").setClean()
self.statusBar().showMessage("Done")
def load(self):
if self.workspace.data is not None:
message = "Loading data replaces the current data. Do you want to proceed?"
ret = QMessageBox.question(
self, "EyeLab", message, QMessageBox.Ok | QMessageBox.Cancel
)
if ret == QMessageBox.Cancel:
return
path = QFileDialog.getOpenFileName(
parent=self,
caption="Load",
dir=self.start_dir,
filter="Eye files (*.eye)",
)[0]
if path == "":
return
if not path.startswith("/run/user"):
self.save_path = path
else:
self.save_path = None
self.statusBar().showMessage("Loading...")
ev = ep.EyeVolume.load(path)
self.workspace.set_data(ev)
self.statusBar().showMessage("Done")
def save_annotations(self, save_as=False):
if self.save_path is None or save_as is True:
path = self.get_save_location()
if path == "":
return
self.workspace.data.save_annotations(self.save_path)
def load_annotations(self):
if self.workspace.data is None:
message = (
"Annotations can only be loaded after importing the corresponding data."
)
QMessageBox.information(self, "EyeLab", message, QMessageBox.Ok)
return
message = "Loading annotations replaces all current annotations. Do you want to proceed?"
ret = QMessageBox.question(
self, "EyeLab", message, QMessageBox.Ok | QMessageBox.Cancel
)
if ret == QMessageBox.Cancel:
return
path = QFileDialog.getOpenFileName()[0]
self.workspace.data.load_annotations(path)
def main(log_level=logging.DEBUG):
# create logger for "oat" application
logger = logging.getLogger("eyelab")
logger.setLevel(logging.DEBUG)
# create file handler which logs debug messages
fh = logging.FileHandler(EYELAB_FOLDER / "eyelab.log")
fh.setLevel(logging.DEBUG)
file_formatter = logging.Formatter(
"%(asctime)s - %(levelname)s - %(name)s - %(message)s"
)
fh.setFormatter(file_formatter)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(log_level)
cmd_formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
ch.setFormatter(cmd_formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
logger.info("Starting Application.")
application = QtWidgets.QApplication(sys.argv)
window = eyelab()
# desktop = QtGui.QScreen().availableGeometry()
# width = (desktop.width() - window.width()) / 2
# height = (desktop.height() - window.height()) / 2
# window.show()
# window.move(width, height)
window.showMaximized()
sys.exit(application.exec())
if __name__ == "__main__":
main()
|
# author: <NAME>, <NAME>
# version: 3.0
import cx_Oracle
import PySimpleGUI as sg
from Edit_Students import run_program as edit
from input_checker import check_string as check_string
old_class = '' # variable, holds the current name of the class
old_period_number = 0 # variable, holds the current period number of the class
def run_program(name, period, year): # the function that runs everything
con = cx_Oracle.connect('EOM/[email protected]/xe') # connects to the database
cur = con.cursor(scrollable=True) # object, used to execute SQL commands in python
student_numbers = [] # list, the student_numbers of the students in the given class
def get_number(course_code): # function, get the amount of students and fill the student_numbers array
cur.execute("select * from EOM_STUDENTS")
v_row = 0
for row in cur: # goes through the students table
if row[1] == course_code: # check if this student is in the given class
v_row += 1
student_numbers.append(row[0])
return v_row
global old_class
global old_period_number
old_class = str(name + '/' + year)
old_period_number = int(period)
layout = [[sg.Text('Edit Classes - ' + name, size=(30, 2), justification='center', font=("Helvetica", 25))],
# where the gui is put together, each [] means that its a line's content
[sg.Text(' Course Code', size=(50, 1), justification='center', font=("Helvetica", 15))],
[sg.Input(name, size=(20, 2), pad=((215, 150), 10))],
[sg.Text(' Period Number', size=(50, 1), justification='center', font=("Helvetica", 15))],
[sg.Input(period, size=(20, 2), pad=((215, 150), 10))],
[sg.Text('Year', size=(50, 1), justification='center', font=("Helvetica", 15))],
[sg.DropDown((2016, 2017, 2018, 2019), size=(18, 2), pad=((214, 150), 10), default_value=int(year))],
[sg.Button('Edit Course', key='edit_courses_button', size=(20, 2), pad=((205, 150), 10), )],
[sg.Button('Go to Edit Students', key='edit_student_key', size=(20, 2), pad=((205, 150), 10), )]
]
window = sg.Window('Edit Courses', default_element_size=(40, 2)).Layout(layout) # used to open up a window and display everything
while True: # runs as long as the window is open, similar to an action listener
event, values = window.Read() # the pysimplegui equivalent of an action listener
if event is None or event == 'Exit':
break
if event == 'edit_courses_button': # checks if it was the edit classes button that was pressed
if check_string(values[0], 'str', 8) and check_string(values[1], 'int', 4) \
and check_string(values[2], 'int', 2025): # check if the inputs are valid
cur.execute("UPDATE EOM_CLASS SET PERIOD_NUM = :v_period_num WHERE CLASS = :old_course", v_period_num=values[1],
old_course=old_class)
cur.execute("UPDATE EOM_CLASS SET CLASS = :v_class WHERE CLASS = :old_course", v_class=values[0] + '/' + values[2],
old_course=old_class)
for x in range(int(get_number(old_class)) - 1): # runs once for every student in the old_class to change their class to the new class
cur.execute("UPDATE EOM_STUDENTS SET CLASS = :new_class WHERE STUDENT_ID = :other_stuff",
new_class=values[0] + '/' + values[2],
other_stuff=student_numbers[x])
con.commit()
break
else:
sg.Popup('Invalid input')
if event == 'edit_student_key': # checks if it was the edit students button that was pressed
edit(old_class)
window.Close()
|
import datetime
import logging
import sys
from nltk.stem import WordNetLemmatizer
import projection
def crossLexiconOnRaw(corpusPath, crossedCorpusPath, csvFilePath, fieldNum=1):
time = datetime.datetime.now()
# Read the raw corpus
scoreDic = readScores(csvFilePath, fieldNum)
lexicon = readLexicon()
with open(corpusPath, 'r') as lemmatizedCorpus:
result = ''
for line in lemmatizedCorpus:
lemmas = tokenize(line[:-1])
lemmaText = line[:-1]
labels = ['0'] * len(lemmas)
mweIdx = 1
for entry in lexicon:
entryScore = 0
if entry in scoreDic:
entryScore = scoreDic[entry]
if set(tokenize(entry)).issubset(set(lemmas)):
entryTokens = tokenize(entry)
if entry in lemmaText:
idxs = projection.getContinousIdxs(entryTokens, lemmas)
if idxs:
labels = annotate(labels, idxs, entryScore)
mweIdx += 1
continue
idxs = projection.entryInLine(entryTokens, lemmas)
hasLegalCont, inOneDirec = False, False
if idxs:
if len(entryTokens) < 3:
hasLegalCont = projection.hasLegalContinuty(idxs, windowsSize=1)
else:
hasLegalCont = projection.hasLegalContinuty(idxs, windowsSize=2)
if idxs and hasLegalCont:
inOneDirec = projection.inOneDirection(idxs, entry, lemmas)
if idxs and hasLegalCont and inOneDirec:
labels = annotate(labels, idxs, entryScore)
mweIdx += 1
for i in range(len(lemmas)):
result += '{0}\t{1}\n'.format(lemmas[i], labels[i])
result += '\n'
with open(crossedCorpusPath, 'w') as corpus:
corpus.write(result)
logging.warn(
' the file {0} has been made. It has taken {1}'.format(crossedCorpusPath, datetime.datetime.now() - time))
def annotate(labels, idxs, score):
for idx in sorted(idxs):
if idx == max(idxs):
if labels[idx] == '0':
labels[idx] = str(score)
else:
labels[idx] += ';' + str(score)
return labels
def readLexicon():
wordnet_lemmatizer = WordNetLemmatizer()
lexicon = {}
with open('../Corpora/LPP/mweLEX.txt', 'r') as scoresF:
for line in scoresF:
if line:
entry = line[:-1].lower().strip()
tokens = entry.split(' ')
lemmas = []
for token in tokens:
lemmas.append(wordnet_lemmatizer.lemmatize(token))
lexicon[' '.join(lemmas)] = True
return lexicon
def readScores(csvFilePath, fieldNum=1):
lexicon = {}
with open(csvFilePath, 'r') as scoresF:
for line in scoresF:
if line:
parts = line.split(',')
lexicon[parts[0].lower().strip()] = parts[fieldNum]
return lexicon
def tokenize(line):
tokens = line.lower().split(' ')
realTokens = []
for token in tokens:
if token:
realTokens.append(token)
if realTokens:
return realTokens
return None
if __name__ == '__main__':
reload(sys)
sys.setdefaultencoding('utf8')
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
rawCopusPath = '../Corpora/LPP/lemmatized.txt'
lppLexiconPath = '../Corpora/LPP/mweLEX.txt'
resultFile = '../AnalysisFormat/Compositionality.txt'
# csvFilePath = '../AssociationMeasures/CBT-scores/candidates-features.csv'
csvFilePath = '../Word2Vec/CBOW/com.csv'
crossLexiconOnRaw(rawCopusPath, resultFile, csvFilePath, fieldNum=1)
|
<reponame>tachyonicClock/avalanche<gh_stars>0
from abc import ABC
from typing import List, TYPE_CHECKING
from avalanche.core import StrategyCallbacks
if TYPE_CHECKING:
from avalanche.evaluation.metric_results import MetricValue
from avalanche.training.strategies import BaseStrategy
class StrategyLogger(StrategyCallbacks[None], ABC):
"""
The base class for the strategy loggers.
Strategy loggers will receive events, under the form of callback calls,
from the :class:`EvaluationPlugin` carrying a reference to the strategy
as well as the values emitted by the metrics.
Each child class should implement the `log_metric` method, which
specifies how to report to the user the metrics gathered during
training and evaluation flows. The `log_metric` method is invoked
by default on each callback.
In addition, child classes may override the desired callbacks
to customize the logger behavior.
Make sure, when overriding callbacks, to call
the proper `super` method.
"""
def __init__(self):
super().__init__()
def log_single_metric(self, name, value, x_plot):
"""
This abstract method will have to be implemented by each subclass.
This method takes a metric name, a metric value and a x value and
decides how to show the metric value.
:param name: str, metric name
:param value: the metric value, will be ignored if
not supported by the logger
:param x_plot: an integer representing the x value
associated to the metric value
"""
pass
def log_metric(self, metric_value: 'MetricValue', callback: str) -> None:
"""
This method will be invoked on each callback.
The `callback` parameter describes the callback from which the metric
value is coming from.
:param metric_value: The value to be logged.
:param callback: The name of the callback (event) from which the
metric value was obtained.
:return: None
"""
name = metric_value.name
value = metric_value.value
x_plot = metric_value.x_plot
if isinstance(value, dict):
for k, v in value.items():
n = f"{name}/{k}"
self.log_single_metric(n, v, x_plot)
else:
self.log_single_metric(name, value, x_plot)
def before_training(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'before_training')
def before_training_exp(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'before_training_exp')
def after_train_dataset_adaptation(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'],
**kwargs):
for val in metric_values:
self.log_metric(val, 'adapt_train_dataset')
def before_training_epoch(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'before_training_epoch')
def before_training_iteration(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'],
**kwargs):
for val in metric_values:
self.log_metric(val, 'before_training_iteration')
def before_forward(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'before_forward')
def after_forward(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'after_forward')
def before_backward(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'before_backward')
def after_backward(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'after_backward')
def after_training_iteration(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'after_training_iteration')
def before_update(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'before_update')
def after_update(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'after_update')
def after_training_epoch(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'after_training_epoch')
def after_training_exp(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'after_training_exp')
def after_training(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'after_training')
def before_eval(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'before_eval')
def after_eval_dataset_adaptation(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'],
**kwargs):
for val in metric_values:
self.log_metric(val, 'adapt_eval_dataset')
def before_eval_exp(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'before_eval_exp')
def after_eval_exp(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'after_eval_exp')
def after_eval(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'after_eval')
def before_eval_iteration(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'before_eval_iteration')
def before_eval_forward(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'before_eval_forward')
def after_eval_forward(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'after_eval_forward')
def after_eval_iteration(self, strategy: 'BaseStrategy',
metric_values: List['MetricValue'], **kwargs):
for val in metric_values:
self.log_metric(val, 'after_eval_iteration')
__all__ = [
'StrategyLogger'
]
|
# coding: utf-8
from __future__ import with_statement
from operator import add
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import pytest
from chef.interpreter import Interpreter
from chef.datastructures import Ingredients, Ingredient, IngredientProperties
from chef.errors.runtime import InvalidInputError, UndefinedIngredientError,\
InvalidContainerIDError, NonExistingContainerError,\
EmptyContainerError, MissingLoopEndError
def test_interpreter_init():
interpreter = Interpreter()
assert interpreter.global_ingredients == Ingredients()
assert interpreter.mixing_bowls == [Ingredients()]
class TestInterpreterProperties(object):
def setup_method(self, method):
first_mixing_bowl = Ingredients([
Ingredient('apple', IngredientProperties(5, True, False)),
Ingredient('water', IngredientProperties(100, False, True))])
second_mixing_bowl = Ingredients([
Ingredient('oil', IngredientProperties(2, True, False)),
Ingredient('salmon', IngredientProperties(4, False, True))])
self.interpreter = Interpreter(mixing_bowls=[
first_mixing_bowl, second_mixing_bowl])
def test_first_mixing_bowl(self):
first_mixing_bowl = self.interpreter.first_mixing_bowl
assert first_mixing_bowl == Ingredients([
Ingredient('apple', IngredientProperties(5, True, False)),
Ingredient('water', IngredientProperties(100, False, True))])
def test_last_mixing_bowl(self):
last_mixing_bowl = self.interpreter.last_mixing_bowl
assert last_mixing_bowl == Ingredients([
Ingredient('oil', IngredientProperties(2, True, False)),
Ingredient('salmon', IngredientProperties(4, False, True))])
class TestInterpreterGetNthIngredients(object):
params = {
'test_lt_one': [{'id': -1}, {'id': 0}],
'test_too_high': [{'id': 2}, {'id': 3}]}
def setup_method(self, method):
self.interpreter = Interpreter()
def test_lt_one(self, id):
with pytest.raises(InvalidContainerIDError):
self.interpreter.get_nth_container(id)
def test_too_high(self, id):
with pytest.raises(NonExistingContainerError) as e:
self.interpreter.get_nth_container(id)
assert e.value.id == id
def test_accessible(self):
mixing_bowl = self.interpreter.get_nth_container(1)
assert mixing_bowl == Ingredients()
class TestInterpreterTake(object):
def setup_method(self, method):
self.interpreter = Interpreter()
def test_nonexisting(self):
with pytest.raises(UndefinedIngredientError) as e:
self.interpreter.take('milk', stdin=StringIO('23\n'))
assert e.value.ingredient == 'milk'
def test_overwriting(self):
self.interpreter.global_ingredients['sausage'] = IngredientProperties(
42, True, False)
self.interpreter.take('sausage', stdin=StringIO('57\n'))
sausage = self.interpreter.global_ingredients['sausage']
assert sausage == Ingredient(
'sausage', IngredientProperties(57, True, False))
def test_invalid_num_without_lineno(self):
with pytest.raises(InvalidInputError) as e:
self.interpreter.take(
'sausage', stdin=StringIO('not a number!\n'))
assert e.value.value == 'not a number!'
assert e.value.lineno is None
def test_invalid_num_with_lineno(self):
with pytest.raises(InvalidInputError) as e:
self.interpreter.take(
'sausage', 7, StringIO('not a number!\n'))
assert e.value.value == 'not a number!'
assert e.value.lineno == 7
class TestInterpreterPut(object):
def setup_method(self, method):
global_ingredients = Ingredients([
Ingredient('bananas', IngredientProperties(180, True, False))])
first_mixing_bowl = Ingredients([
Ingredient('milk', IngredientProperties(200, False, True)),
Ingredient('baking powder', IngredientProperties(50, True, False))])
second_mixing_bowl = Ingredients([
Ingredient('orange juice', IngredientProperties(100, False, True)),
Ingredient('cinnamon', IngredientProperties(15, True, False))])
self.interpreter = Interpreter(
global_ingredients, [first_mixing_bowl, second_mixing_bowl])
def test_undefined_ingredient(self):
with pytest.raises(UndefinedIngredientError) as e:
self.interpreter.put('olive oil')
assert e.value.ingredient == 'olive oil'
assert e.value.lineno is None
def test_without_mixing_bowl(self):
self.interpreter.put('bananas')
assert self.interpreter.first_mixing_bowl.top == Ingredient(
'bananas', IngredientProperties(180, True, False))
def test_with_mixing_bowl(self):
self.interpreter.put('bananas', 2)
assert self.interpreter.mixing_bowls[1].top == Ingredient(
'bananas', IngredientProperties(180, True, False))
def test_into_nonexisting_mixing_bowl(self):
self.interpreter.put('bananas', 3)
assert self.interpreter.last_mixing_bowl == Ingredients([
Ingredient('bananas', IngredientProperties(180, True, False))])
assert self.interpreter.last_mixing_bowl.top == Ingredient(
'bananas', IngredientProperties(180, True, False))
def test_same_item_multiple_times(self):
self.interpreter.put('bananas')
assert self.interpreter.first_mixing_bowl == Ingredients([
Ingredient('milk', IngredientProperties(200, False, True)),
Ingredient('baking powder', IngredientProperties(50, True, False)),
Ingredient('bananas', IngredientProperties(180, True, False))])
self.interpreter.put('bananas')
assert self.interpreter.first_mixing_bowl == Ingredients([
Ingredient('milk', IngredientProperties(200, False, True)),
Ingredient('baking powder', IngredientProperties(50, True, False)),
Ingredient('bananas', IngredientProperties(180, True, False)),
Ingredient('bananas', IngredientProperties(180, True, False))])
def test_invalid_mixing_bowl(self):
with pytest.raises(InvalidContainerIDError) as e:
self.interpreter.put('bananas', 4)
assert e.value.id == 4
class TestInterpreterFold(object):
def test_missing_top_value(self):
interpreter = Interpreter(
Ingredients([
Ingredient('yeast', IngredientProperties(47, True, False))]),
[Ingredients()])
with pytest.raises(EmptyContainerError) as e:
interpreter.fold('yeast')
assert e.value.id == 1
def test_working(self):
interpreter = Interpreter({'yeast': 47}, [Ingredients([23, 42, 1337])])
assert interpreter.mixing_bowls == [Ingredients([23, 42, 1337])]
assert interpreter.global_ingredients == {'yeast': 47}
interpreter.fold('yeast')
assert interpreter.mixing_bowls == [Ingredients([23, 42])]
assert interpreter.global_ingredients == {'yeast': 1337}
def test_interpreter_calculate_with_empty_mixing_bowl():
interpreter = Interpreter(Ingredients([
Ingredient('pigs', IngredientProperties(2, True, False))]))
with pytest.raises(EmptyContainerError):
interpreter.calculate(add, 'pigs')
def test_interpreter_add(interpreter):
assert interpreter.first_mixing_bowl.top == Ingredient(
'cherries', IngredientProperties(300, True, False))
interpreter.add('meat')
assert interpreter.first_mixing_bowl.top == Ingredient(
'meat', IngredientProperties(350, True, False))
def test_interpreter_remove(interpreter):
assert interpreter.first_mixing_bowl.top == Ingredient(
'cherries', IngredientProperties(300, True, False))
interpreter.remove('meat')
assert interpreter.first_mixing_bowl.top == Ingredient(
'meat', IngredientProperties(250, True, False))
def test_interpreter_combine(interpreter):
assert interpreter.first_mixing_bowl.top == Ingredient(
'cherries', IngredientProperties(300, True, False))
interpreter.combine('meat')
assert interpreter.first_mixing_bowl.top == Ingredient(
'meat', IngredientProperties(15000, True, False))
def test_interpreter_divide(interpreter):
assert interpreter.first_mixing_bowl.top == Ingredient(
'cherries', IngredientProperties(300, True, False))
interpreter.divide('meat')
assert interpreter.first_mixing_bowl.top == Ingredient(
'meat', IngredientProperties(6, True, False))
def test_interpreter_liquefy_ingredient(interpreter):
assert interpreter.global_ingredients == Ingredients([
Ingredient('meat', IngredientProperties(50, True, False))])
interpreter.liquefy_ingredient('meat')
assert interpreter.global_ingredients == Ingredients([
Ingredient('meat', IngredientProperties(50, False, True))])
def test_interpreter_liquefy_contents(interpreter):
assert interpreter.first_mixing_bowl == Ingredients([
Ingredient('apples', IngredientProperties(100, True, False)),
Ingredient('ketchup', IngredientProperties(200, False, True)),
Ingredient('cherries', IngredientProperties(300, True, False))])
interpreter.liquefy_contents()
assert interpreter.first_mixing_bowl == Ingredients([
Ingredient('apples', IngredientProperties(100, False, True)),
Ingredient('ketchup', IngredientProperties(200, False, True)),
Ingredient('cherries', IngredientProperties(300, False, True))])
def test_interpreter_stir_minutes(interpreter):
interpreter.stir_minutes(1)
assert interpreter.first_mixing_bowl == Ingredients([
Ingredient('apples', IngredientProperties(100, True, False)),
Ingredient('cherries', IngredientProperties(300, True, False)),
Ingredient('ketchup', IngredientProperties(200, False, True))])
def test_interpreter_stir_ingredient():
interpreter = Interpreter(
Ingredients([
Ingredient('sticks', IngredientProperties(2, True, False))]),
[Ingredients([
Ingredient('stones', IngredientProperties(100, True, False)),
Ingredient('skin', IngredientProperties(200, True, False)),
Ingredient('bones', IngredientProperties(300, True, False))])])
interpreter.stir_ingredient('sticks')
assert interpreter.first_mixing_bowl == Ingredients([
Ingredient('bones', IngredientProperties(300, True, False)),
Ingredient('stones', IngredientProperties(100, True, False)),
Ingredient('skin', IngredientProperties(200, True, False))])
def test_interpreter_clean(interpreter):
interpreter.clean()
# global ingredients must not change after having called the clean command
assert interpreter.global_ingredients == Ingredients([
Ingredient('meat', IngredientProperties(50, True, False))])
assert interpreter.mixing_bowls == [Ingredients()]
def test_interpreter_pour(interpreter):
assert interpreter.first_baking_dish == Ingredients()
interpreter.pour()
assert interpreter.first_baking_dish == Ingredients([
Ingredient('apples', IngredientProperties(100, True, False)),
Ingredient('ketchup', IngredientProperties(200, False, True)),
Ingredient('cherries', IngredientProperties(300, True, False))])
class TestInterpreterLoopStart(object):
def setup_method(self, method):
global_ingredients = Ingredients([
Ingredient('number', IngredientProperties(3, True, False))])
self.interpreter = Interpreter(global_ingredients)
def test_one_iteration(self):
interpreter = Interpreter(
Ingredients([
Ingredient('number', IngredientProperties(1, True, False))]))
following_instructions = [
{
'command': 'put',
'ingredient': 'number',
'mixing_bowl_id': None,
'lineno': 8},
{
'command': 'loop_end',
'ingredient': 'number',
'verb': 'counted',
'lineno': 9}]
interpreter.loop_start('Count', 'number', following_instructions, 7)
assert interpreter.first_mixing_bowl == Ingredients([
Ingredient('number', IngredientProperties(1, True, False))])
def test_multiple_iterations(self):
following_instructions = [
{
'command': 'put',
'ingredient': 'number',
'mixing_bowl_id': None,
'lineno': 8},
{
'command': 'loop_end',
'ingredient': 'number',
'verb': 'counted',
'lineno': 9}]
self.interpreter.loop_start(
'Count', 'number', following_instructions, 7)
assert self.interpreter.first_mixing_bowl == Ingredients([
Ingredient('number', IngredientProperties(3, True, False)),
Ingredient('number', IngredientProperties(2, True, False)),
Ingredient('number', IngredientProperties(1, True, False))])
@pytest.mark.xfail
def test_nested_loops(self):
assert False
def test_missing_loop_end(self):
following_instructions = [
{
'command': 'put',
'ingredient': 'number',
'mixing_bowl_id': None,
'lineno': 8},
{
'command': 'add',
'ingredient': 'number',
'mixing_bowl_id': None,
'lineno': 9}]
with pytest.raises(MissingLoopEndError):
self.interpreter.loop_start(
'Count', 'number', following_instructions, 7)
def test_interpreter_loop_end(interpreter):
interpreter.loop_end('meat')
assert interpreter.global_ingredients == Ingredients([
Ingredient('meat', IngredientProperties(49, True, False))])
def test_interpreter_serves():
interpreter = Interpreter()
interpreter.baking_dishes = [Ingredients([
Ingredient('water', IngredientProperties(97, False, True)),
Ingredient('salt', IngredientProperties(23, True, False)),
Ingredient('magic powder', IngredientProperties(55000, False, True))])]
stdout = StringIO()
interpreter.serves(1, stdout)
stdout.seek(0)
output = stdout.read()
assert output == '훘23a'
|
# -*- coding: utf-8 -*-
# Written in Python 2.7, but try to maintain Python 3+ compatibility
from __future__ import print_function
from __future__ import division
import unittest
from os import path
from classic_heuristics.parallel_savings import parallel_savings_init
from classic_heuristics.sequential_savings import sequential_savings_init
from classic_heuristics.gaskell_savings import gaskell_savings_init
from classic_heuristics.paessens_savings import paessens_savings_init
from classic_heuristics.suppression_savings import suppression_savings_init
from local_search import LSOPT, do_local_search
from local_search.intra_route_operators import do_3opt_move
from replicationbase import ReplicationBase, REPRO_QUALITY_LEVELS
class TestSavingsGaskell1967Replications(ReplicationBase):
def setUp(self):
self.algorithms = [
("savings:multiple", lambda pts, D,d,C,L,st:\
parallel_savings_init(D,d,C,L, minimize_K=True)),
("savings:sequential", lambda pts, D,d,C,L,st:\
sequential_savings_init(D,d,C,L, minimize_K=True,
initialize_routes_with = "farthest")),
("lambda:multiple", lambda pts, D,d,C,L,st:\
gaskell_savings_init(D,d,C,L, minimize_K=True,
savings_method="lambda")),
("pi:multiple", lambda pts,D,d,C,L,st:\
gaskell_savings_init(D,d,C,L, minimize_K=True,
savings_method="pi"))]
self.problem_names = ["01_Ga67_n37_k5.vrp",
"02-CW64_n32_k8.vrp",
"03-Ga67_n33-k4.vrp",
"04-Ga67-n22-k4.vrp",
"05-Ga67-n30-k4.vrp",
"06-Ga67-n23-k5.vrp"]
self.targets = [
#savings:multiple
((5,923),(8,1427),(5,839),(4,598),(4,963),(5,955)),
#savings:sequential
((5,947),(8,1427),(5,850),(4,648),(5,1017),(5,949)),
#lambda:multiple
((5,913),(8,1434),(5,821),(4,602),(5,979),(5,988)),
#pi:multiple
((5,857),(9,1500),(5,850),(4,598),(5,943),(6,1015))]
self.problem_path = path.join("Classic","Gaskell1967")
def test_parallel_savings_with_Gaskell1967_instances(self):
avgq, sdq, minq, maxq = self.solve_problems("savings:multiple")
self.assertTrue( abs(avgq) < REPRO_QUALITY_LEVELS.A_AVG, "Average quality not replicated (%.2f)"%avgq)
self.assertTrue( abs(sdq) < REPRO_QUALITY_LEVELS.A_SD, "There is too much variation between instances")
def test_sequential_savings_with_Gaskell1967_instances(self):
avgq, sdq, minq, maxq = self.solve_problems("savings:sequential")
self.assertTrue( abs(avgq) < REPRO_QUALITY_LEVELS.B_AVG, "Average quality not replicated (%.2f)"%avgq)
self.assertTrue( abs(sdq) < REPRO_QUALITY_LEVELS.B_SD, "There is too much variation between instances")
def test_Gaskell_lambda_savings_with_Gaskell1967_instances(self):
avgq, sdq, minq, maxq = self.solve_problems("lambda:multiple")
self.assertTrue( abs(avgq) < REPRO_QUALITY_LEVELS.B_AVG, "Average quality not replicated (%.2f)"%avgq)
self.assertTrue( abs(sdq) < REPRO_QUALITY_LEVELS.B_SD, "There is too much variation between instances")
def test_Gaskell_pi_savings_with_Gaskell1967_instances(self):
avgq, sdq, minq, maxq = self.solve_problems("pi:multiple")
self.assertTrue( abs(avgq) < REPRO_QUALITY_LEVELS.A_AVG, "Average quality not replicated (%.2f)"%avgq)
self.assertTrue( abs(sdq) < REPRO_QUALITY_LEVELS.A_SD, "There is too much variation between instances")
class TestSavingsCWEilonEtAl1971Replications(ReplicationBase):
""" The results were published in Eilon et al 1971 (in the book Distribution management)
"""
def setUp(self):
self.algorithms = [
("savings:multiple", lambda pts, D,d,C,L,st:\
parallel_savings_init(D,d,C,L)),]
self.problem_names = [
"01-eil7.vrp",
"02-eil13.vrp",
"03-Gaskell1-E022-k4g_exact2d.vrp",
"04-Gaskell2-E023-k5g_exact2d.vrp",
"05-Gaskell3-E030-k4g_exact2d.vrp",
"06-CW64_n31_k8c.vrp",
"07-Gaskell4-E033-k4g_exact2d.vrp",
"08-eil51_exact2d.vrp",
"09-eil76_exact2d.vrp",
"10-eil101_exact2d.vrp"]
# CW64_n31_k8c is without Birmingham with demand of 140
# (it is always served separately with a route of length 82*2)
self.targets = [
#savings:multiple
((2,119),(4,290),(4,598),(5,955),(5,963),
(8,1427-82*2),(5,839),(6,585),(10,900),(8,887))
]
self.problem_path = path.join("Classic","Beasley1983")
def test_parallel_savings_with_Beasley1983_instances(self):
_to_int = lambda x: int(x)
avgq, sdq, minq, maxq = self.solve_problems("savings:multiple", round_f_func = _to_int)
self.assertTrue( abs(avgq) < REPRO_QUALITY_LEVELS.A_AVG, "Average quality not replicated (%.2f)"%avgq)
self.assertTrue( abs(sdq) < REPRO_QUALITY_LEVELS.A_SD, "There is too much variation between instances")
class TestSavingsPaessensReplications(ReplicationBase):
""" The results were published in Paessens1988
"""
def setUp(self):
self.algorithms = [
("clarke_wright_savings", lambda pts, D,d,C,L,st:\
parallel_savings_init(D,d,C,L)),
("paessens_savings_M1", lambda pts, D,d,C,L,st:\
paessens_savings_init(D,d,C,L,strategy="M1", do_3opt=False)),
("paessens_savings_M4", lambda pts, D,d,C,L,st:\
paessens_savings_init(D,d,C,L,strategy="M4", do_3opt=False)),
("clarke_wright_savings_3OPT", lambda pts, D,d,C,L,st:\
do_local_search([do_3opt_move], parallel_savings_init(D,d,C,L),
D, d, C, L, operator_strategy=LSOPT.BEST_ACCEPT)),
("paessens_savings_M1_3OPT", lambda pts, D,d,C,L,st:\
paessens_savings_init(D,d,C,L,strategy="M1", do_3opt=True)),
("paessens_savings_M4_3OPT", lambda pts, D,d,C,L,st:\
paessens_savings_init(D,d,C,L,strategy="M4", do_3opt=True))
]
self.problem_names = [
"G1.vrp",
"G2.vrp",
"G3.vrp",
"G4.vrp",
"C1.vrp",
"C2.vrp",
"C3.vrp",
"C4.vrp",
"C5.vrp",
"C6.vrp",
"C7.vrp",
"C8.vrp",
"C9.vrp",
"C10.vrp",
"C11.vrp",
"GJ1.vrp" ]
# in Gaskell instances service_time was not included in the table
# in Christofides et al. service_time was included
self.targets = [
#cw
(599-10*21,956-10*22,964-10*29,841-10*32,
585,907,889,834,876,1068,1593,1140,1288,1395,1539,5568),
#M1
(585-10*21,956-10*22,938-10*29,814-10*32,
564,866,866,826,870,1065,1584,1102,1222,1370,1486,5380),
#M4
(598-10*21,956-10*22,938-10*29,814-10*32,
571,889,877,826,872,1068,1591,1112,1222,1370,1515,5476),
#cw+3opt
(599-10*21,956-10*22,962-10*29,840-10*32,
579,902,880,824,869,1047,1583,1134,1285,1387,1522,5546),
#M1+3opt
(585-10*21,956-10*22,937-10*29,812-10*32,
557,861,858,822,870,1046,1568,1083,1221,1359,1476,5348),
#M4+3opt
(598-10*21,956-10*22,937-10*29,812-10*32,
570,876,869,823,871,1047,1580,1106,1221,1359,1504,5449),
]
self.problem_path = path.join("Classic","Paessens1988", "literature")
def test_CW_with_Paessens1988_instances(self):
_to_int = lambda x: int(x)
avgq, sdq, minq, maxq = self.solve_problems("clarke_wright_savings",
round_f_func = _to_int,
cost_compare = False)
# Is better, give it some slack
self.assertTrue( abs(avgq)-0.02 < REPRO_QUALITY_LEVELS.A_AVG, "Average quality not replicated (%.2f)"%avgq)
self.assertTrue( abs(sdq) < REPRO_QUALITY_LEVELS.A_SD, "There is too much variation between instances")
def test_CW_w_3OPT_with_Paessens1988_instances(self):
_to_int = lambda x: int(x)
avgq, sdq, minq, maxq = self.solve_problems("clarke_wright_savings_3OPT",
round_f_func = _to_int,
cost_compare = False)
# It is in fact, better. But still, B.
self.assertTrue( abs(avgq) < REPRO_QUALITY_LEVELS.B_AVG, "Average quality not replicated (%.2f)"%avgq)
self.assertTrue( abs(sdq) < REPRO_QUALITY_LEVELS.B_SD, "There is too much variation between instances")
def test_PaessensM1_with_Paessens1988_instances(self):
_to_int = lambda x: int(x)
avgq, sdq, minq, maxq = self.solve_problems("paessens_savings_M1",
round_f_func = _to_int,
cost_compare = False)
self.assertTrue( abs(avgq)-0.01 < REPRO_QUALITY_LEVELS.A_AVG, "Average quality not replicated (%.2f)"%avgq)
self.assertTrue( abs(sdq) < REPRO_QUALITY_LEVELS.A_SD, "There is too much variation between instances")
def test_PaessensM1_w_3OPT_with_Paessens1988_instances(self):
_to_int = lambda x: int(x)
avgq, sdq, minq, maxq = self.solve_problems("paessens_savings_M1_3OPT",
round_f_func = _to_int,
cost_compare = False)
self.assertTrue( abs(avgq) < REPRO_QUALITY_LEVELS.A_AVG, "Average quality not replicated (%.2f)"%avgq)
self.assertTrue( abs(sdq) < REPRO_QUALITY_LEVELS.A_SD, "There is too much variation between instances")
def test_PaessensM4_with_Paessens1988_instances(self):
_to_int = lambda x: int(x)
avgq, sdq, minq, maxq = self.solve_problems("paessens_savings_M4",
round_f_func = _to_int,
cost_compare = False)
self.assertTrue( abs(avgq) < REPRO_QUALITY_LEVELS.B_AVG, "Average quality not replicated (%.2f)"%avgq)
self.assertTrue( abs(sdq) < REPRO_QUALITY_LEVELS.B_SD, "There is too much variation between instances")
def test_PaessensM4_w_3OPT_with_Paessens1988_instances(self):
_to_int = lambda x: int(x)
avgq, sdq, minq, maxq = self.solve_problems("paessens_savings_M4_3OPT",
round_f_func = _to_int,
cost_compare = False)
self.assertTrue( abs(avgq) < REPRO_QUALITY_LEVELS.A_AVG, "Average quality not replicated (%.2f)"%avgq)
self.assertTrue( abs(sdq) < REPRO_QUALITY_LEVELS.A_SD, "There is too much variation between instances")
class TestSavingsSuppressionEilonEtAl1971Replications(ReplicationBase):
""" The results were published in Eilon et al 1971 (in the book Distribution management)
"""
def setUp(self):
self.algorithms = [
(r"HP76-PS$\vert$IMS", lambda pts, D,d,C,L,st:\
suppression_savings_init(D,d,C,L, minimize_K=False)),]
self.problem_names = [
"01-eil7.vrp",
# some
"08-eil51.vrp",
"09-eil76.vrp",
"10-eil101.vrp"]
self.targets = [((2, 114), (5, 573), (10, 886), (8,876))]
self.problem_path = path.join("Classic","ChristofidesEilon1969")
def test_suppression_savings_with_ChristofidesEilon1969_instances(self):
_to_int = lambda x: int(x)
avgq, sdq, minq, maxq = self.solve_problems(r"HP76-PS$\vert$IMS", round_f_func = _to_int)
self.assertTrue( abs(avgq) < REPRO_QUALITY_LEVELS.B_AVG, "Average quality not replicated (%.2f)"%avgq)
self.assertTrue( abs(sdq) < REPRO_QUALITY_LEVELS.B_SD, "There is too much variation between instances")
if __name__ == '__main__':
unittest.main(exit=False)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import getdate, get_time, flt, now_datetime
from datetime import datetime, timedelta, date, time
from frappe.model.document import Document
from frappe import _
from club_crm.club_crm.utils.sms_notification import send_sms
from club_crm.club_crm.utils.push_notification import send_push
from club_crm.club_crm.doctype.client_sessions.client_sessions import create_session
class FitnessTrainingRequest(Document):
def after_insert(self):
self.trainer_notification()
def validate(self):
for i, item in enumerate(sorted(self.table_schedule, key=lambda item: str(item.date)), start=1):
item.idx = i
self.calculate_time()
self.validate_table()
self.set_payment_details()
self.create_sessions()
self.send_notification()
def validate_table(self):
if self.request_status == "Scheduled":
if not self.table_schedule:
frappe.throw("Please fill your training schedule under the 'Trainer Schedule' section.")
else:
no_rows=0
for row in self.table_schedule:
no_rows += 1
if not int(self.number_of_sessions) == no_rows:
frappe.throw("Number of appointment schedules does not match number of requested sessions")
duration = 0.0
package = frappe.get_doc('Club Packages', self.fitness_package)
if package.package_table:
for fitness in package.package_table:
if fitness.service_type=="Fitness Services":
duration = fitness.validity
if type(self.start_date) == str:
start_datetime= datetime.strptime(self.start_date, "%Y-%m-%d")
else:
start_datetime = self.start_date
expiry = start_datetime + timedelta(seconds=duration)
for row in self.table_schedule:
if type(row.date) == str:
row_datetime= datetime.strptime(row.date, "%Y-%m-%d")
else:
row_datetime = row.date
if expiry < row_datetime:
expiry_date = datetime.strftime(expiry, "%d-%m-%Y")
msg = _('One of the scheduled dates exceeds the validity of the package from the start date. The dates should be within ')
msg += _('<b>{0}</b>.').format(expiry_date)
frappe.throw(msg, title=_('Schedule date error'))
appointments = frappe.db.sql("""
select
name, service_staff, client_name, appointment_date, appointment_time, appointment_end_time
from
`tabFitness Training Appointment`
where
appointment_date=%s and appointment_status NOT IN ("Cancelled", "No Show")
and (service_staff=%s or client_name=%s) and
((appointment_time<%s and appointment_end_time>%s) or
(appointment_time>%s and appointment_time<%s) or
(appointment_time>%s and appointment_end_time<%s) or
(appointment_time=%s))
""", (row.date, self.trainer, self.client_name,
row.from_time, row.from_time,
row.from_time, row.to_time,
row.from_time, row.to_time,
row.from_time))
# appointments = frappe.get_all('Fitness Training Appointment', filters={'service_staff': self.trainer, 'appointment_date': row.date, 'appointment_time': row.time_from})
if appointments:
msg = _('One of the schedules on this request ')
msg += _('overlaps with your appointment for <b>{0}</b> on <b>{1}</b> at <b>{2}</b>.').format(
appointments[0][2], appointments[0][3], appointments[0][4])
frappe.throw(msg, title=_('Schedule date error'))
self.scheduled_at = getdate()
if self.request_status == "Completed":
if not self.table_schedule:
frappe.throw("Please fill your training schedule under the 'Trainer Schedule' section.")
if self.payment_status== "Not Paid":
frappe.throw("The training request has not been paid yet.")
def calculate_time(self):
package = frappe.get_doc('Club Packages', self.fitness_package)
if package.package_table:
for row in package.package_table:
fitness = frappe.get_doc('Fitness Services', row.service_name)
self.validity = fitness.duration
if self.table_schedule:
for row in self.table_schedule:
row.from_time = convert24(row.time_from)
from_time = datetime.strptime(row.from_time, "%H:%M:%S")
to_time = from_time + timedelta(seconds=self.validity)
row.to_time = datetime.strftime(to_time, "%H:%M:%S")
row.start_datetime = "%s %s" % (row.date, row.from_time or "00:00:00")
def set_payment_details(self):
self.paid_amounts = 0.0
self.total_to_be_paid = self.price
if self.payment_table:
for row in self.payment_table:
self.paid_amounts += row.paid_amount
self.balance_amount = self.total_to_be_paid - self.paid_amounts
if self.balance_amount == 0.0:
frappe.db.set_value("Cart", self.name, "payment_status", "Paid")
def create_sessions(self):
if self.sessions_created==0:
if self.request_status == "Completed" and self.payment_status == "Paid":
club_package = frappe.get_doc('Club Packages', self.fitness_package)
if club_package.package_table:
for row in club_package.package_table:
fitness = create_session(self.client_id, self.fitness_package, row.service_type, row.service_name, row.no_of_sessions, row.validity)
if self.table_schedule:
for schedule in self.table_schedule:
if type(schedule.start_datetime) == str:
start_time = datetime.strptime(schedule.start_datetime, "%Y-%m-%d %H:%M:%S")
else:
start_time = schedule.start_datetime
doc = frappe.get_doc({
"doctype": 'Fitness Training Appointment',
"session":1,
"online":1,
"client_id": self.client_id,
"session_name": fitness.name,
"service_staff": self.trainer,
"start_time" : start_time
})
doc.save()
self.sessions_created = 1
def trainer_notification(self):
settings = frappe.get_doc('Fitness Training Settings')
if settings.enabled_trainer==1:
msg = "New Fitness Training Request has been received from "+self.client_name+"."
receiver_list='"'+str(self.trainer_mobile_no)+'"'
send_sms(receiver_list,msg)
def send_notification(self):
settings = frappe.get_doc('Fitness Training Settings')
if settings.enabled_client==1:
if settings.scheduled_message:
msg = str(settings.scheduled_message)
else:
msg = "Your personalized Fitness Training schedule is ready on the app."
# Notification for scheduled appointments to Client
if self.schedule_notification==0 and self.request_status=="Scheduled":
receiver_list='"'+self.mobile_number+'"'
send_sms(receiver_list,msg)
client = frappe.get_doc('Client', self.client_id)
if client.fcm_token:
title = "Fitness Training schedule is ready"
send_push(client.name,title,msg)
self.schedule_notification=1
@frappe.whitelist()
def convert24(str1):
if str1[-3:] == " AM" and str1[:2] == "12":
return "00" + str1[2:-3]
elif str1[-3:] == " AM":
return str1[:-3]
elif str1[-3:] == " PM" and str1[:2] == "12":
return str1[:-3]
else:
return str(int(str1[:2]) + 12) + str1[2:8] |
import socket
import sys
import json
from config import Config
import time
from threading import Lock
from typing import List
import logging
import re
class LightningClient:
def __init__(self, socket_file):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
self.sock.connect(socket_file)
except Exception as e:
logging.warn(
"LightningClient: Failed to open sock {}".format((str(e))))
sys.exit(1)
self.f = self.sock.makefile('r')
self.id = 0
def close(self):
self.sock.close()
def call(self, method, *args):
response = self._call(method, *args)
if response.get("error"):
logging.warn("LightningClient retry due to error: {}".format(response["error"]))
time.sleep(0.5)
response = self._call(method, *args)
return response
else:
return response
def _call(self, method, *args):
params = dict(args[0]) if len(args) == 1 and type(
args[0]) == dict else list(args)
request = {'method': method, 'params': params}
request['id'] = self.id
request['jsonrpc'] = '2.0'
self.id += 1
msg = json.dumps(request) + '\n'
self.sock.sendall(msg.encode('ascii'))
response = json.loads(self.f.readline())
# Each response ends with two new lines, hence this.
# ref: https://github.com/ElementsProject/lightning/blob/v0.10.1/contrib/pyln-client/pyln/client/lightning.py#L298
_ = self.f.readline()
return response
def CreateLightningClient() -> LightningClient:
'''
Caller owns the returned LightningClient.
LightningClient must be closed when it is not needed.
'''
assert Config.EnableLightning, "Need to set EnableLightning to true in the config"
return LightningClient(Config.LightningUnixSocket)
class LightningOverview():
def __init__(self):
self.node_id = ""
self.alias = ""
self.funds_total_amount: int = None
self.total_spendable: int = None
self.total_receivable: int = None
self.num_active_channels: int = None
self.num_peers: int = None
class LightningFund():
def __init__(self):
self.txid = ""
self.output: int = None
self.value: int = None
self.address = ""
self.status = ""
class LightningChannel():
def __init__(self):
self.peer_id = ""
self.connected: int = None
self.funding_local_msat: int = None
self.funding_remote_msat: str = None
self.receivable_msat: str = None
self.spendable_msat: str = None
# Number of payment fulfilled that our Node received
self.in_payments_fulfilled: int = None
self.in_msatoshi_fulfilled: int = None
# Number of payment fulfilled that our Node sent
self.out_payments_fulfilled: int = None
self.out_msatoshi_fulfilled: int = None
self.state = ""
class LightningInvoice():
def __init__(self):
self.label: str = None
self.bolt11 = ""
self.msatoshi = 0
self.status: str = None
self.description: str = None
self.expires_at = 0
self.paid_at = 0
class LightningPay():
def __init__(self):
self.bolt11 = ""
self.destination = ""
# status of the payment(one of "pending", "failed", "complete")
self.status = ""
self.created_at = 0
# the amount we actually sent, including fees. Example "3000msat"
self.amount_sent_msat = ""
# the amount the destination received, if known. Example "1000msat"
self.amount_msat = ""
def get_lightning_overview():
client = CreateLightningClient()
try:
getinfo_response = client.call("getinfo")
assert getinfo_response.get("error") is None, "getinfo failed: {}".format(getinfo_response["error"])
info = getinfo_response["result"]
overview = LightningOverview()
overview.node_id = info["id"]
overview.alias = info["alias"]
overview.num_peers = info["num_peers"]
overview.num_active_channels = info["num_active_channels"]
listpeers_response = client.call("listpeers")
assert listpeers_response.get("error") is None, "listpeers failed: {}".format(listpeers_response["error"])
overview.total_receivable = 0
overview.total_spendable = 0
peers = listpeers_response["result"]["peers"]
for peer in peers:
for channel in peer["channels"]:
overview.total_receivable += channel["receivable_msatoshi"] // 1000
overview.total_spendable += channel["spendable_msatoshi"] // 1000
listfunds_response = client.call("listfunds")
assert listfunds_response.get(
"error") is None, "listfunds failed: {}".format(listfunds_response["error"])
funds = listfunds_response["result"]
overview.funds_total_amount = 0
for output in funds["outputs"]:
overview.funds_total_amount += output["value"]
return overview
finally:
client.close()
def listfunds() -> List[LightningFund]:
client = CreateLightningClient()
try:
listfunds_response = client.call("listfunds")
assert listfunds_response.get(
"error") is None, "listfunds failed: {}".format(listfunds_response["error"])
funds = listfunds_response["result"]["outputs"]
result = []
for fund in funds:
lightning_fund = LightningFund()
lightning_fund.txid = fund["txid"]
lightning_fund.output = fund["output"]
lightning_fund.value = fund["value"]
lightning_fund.address = fund["address"]
lightning_fund.status = fund["status"]
result.append(lightning_fund)
return result
finally:
client.close()
def get_channels() -> List[LightningChannel]:
client = CreateLightningClient()
try:
listpeers_response = client.call("listpeers")
assert listpeers_response.get(
"error") is None, "listpeers failed: {}".format(listpeers_response["error"])
peers = listpeers_response["result"]["peers"]
lightning_channels: List[LightningChannel] = []
for peer in peers:
for channel in peer["channels"]:
lightning_channel = LightningChannel()
lightning_channel.peer_id = peer["id"]
lightning_channel.connected = peer["connected"]
lightning_channel.funding_local_msat = channel["funding"]["local_msat"]
lightning_channel.funding_remote_msat = channel["funding"]["remote_msat"]
lightning_channel.receivable_msat = channel["receivable_msat"]
lightning_channel.spendable_msat = channel["spendable_msat"]
lightning_channel.in_payments_fulfilled = channel["in_payments_fulfilled"]
lightning_channel.in_msatoshi_fulfilled = channel["in_msatoshi_fulfilled"]
lightning_channel.out_payments_fulfilled = channel["out_payments_fulfilled"]
lightning_channel.out_msatoshi_fulfilled = channel["out_msatoshi_fulfilled"]
lightning_channel.state = channel["state"]
lightning_channels.append(lightning_channel)
return lightning_channels
finally:
client.close()
def invoice(msatoshi, description, expiry):
"""
@description: should be for human as it is encoded in the invoice. no more than 100 chars.
@expiry: without a suffix it is interpreted as seconds, otherwise suffixes s, m, h, d, w
indicate seconds, minutes, hours, days and weeks respectively. for example 60s = 60 seconds
@return (bolt11 string, expired_at, invoice_label) where bolt11 string encodes the invoice, and
expires_at is an UNIX timestamp of when invoice expires. see here for more on bolt11
https://github.com/lightningnetwork/lightning-rfc/blob/v1.0/11-payment-encoding.md
"""
assert len(description) < 100
client: LightningClient = CreateLightningClient()
invoice_label = _next_invoice_label()
try:
params = {
"msatoshi": msatoshi,
"label": invoice_label,
"description": description,
"expiry": expiry
}
invoice_response = client.call("invoice", params)
assert invoice_response.get(
"error") is None, invoice_response.get("error")
# Check for any warnings. Abort if there is any.
result = invoice_response["result"]
warnings = []
for key, value in result.items():
if key.startswith("warning_"):
warnings.append((key, value))
if warnings:
logging.warn("Invoice warnings: {}".format(warnings))
raise Exception("invoice has warnings")
return result["bolt11"], result["expires_at"], invoice_label
finally:
client.close()
def invoice_status(invoice_label):
"""
@return: Whether it's paid, unpaid or unpayable (one of "unpaid", "paid", "expired").
"""
client = CreateLightningClient()
try:
listinvoices_response = client.call("listinvoices", invoice_label)
assert listinvoices_response.get("error") is None
invoices = listinvoices_response["result"]["invoices"]
assert len(invoices) == 1, "Expecting exactly 1 invoice for {}, but got {}".format(
invoice_label, len(invoices))
return invoices[0]["status"]
finally:
client.close()
def listinvoices() -> List[LightningInvoice]:
client = CreateLightningClient()
try:
listinvoices_response = client.call("listinvoices")
assert listinvoices_response.get("error") is None
invoices: List[LightningInvoice] = []
for invoice_json in listinvoices_response["result"]["invoices"]:
invoice = LightningInvoice()
invoice.label = invoice_json["label"]
invoice.bolt11 = invoice_json["bolt11"]
invoice.msatoshi = invoice_json["msatoshi"]
invoice.status: str = invoice_json["status"]
invoice.description: str = invoice_json["description"]
invoice.expires_at = invoice_json["expires_at"]
if "paid_at" in invoice_json:
invoice.paid_at = invoice_json["paid_at"]
invoices.append(invoice)
return invoices
finally:
client.close()
def listpays() -> List[LightningPay]:
client = CreateLightningClient()
try:
listpays_response = client.call("listpays")
assert listpays_response.get("error") is None, listpays_response["error"]
pays: List[LightningPay()] = []
for pay_json in listpays_response["result"]["pays"]:
pay = LightningPay()
pay.bolt11 = pay_json["bolt11"]
pay.destination = pay_json["destination"]
pay.status = pay_json["status"]
pay.created_at = pay_json["created_at"]
pay.amount_msat = pay_json["amount_msat"]
pay.amount_sent_msat = pay_json["amount_sent_msat"]
pay.fee_msatoshi: int = int(
pay.amount_sent_msat[:-4]) - int(pay.amount_msat[:-4])
pays.append(pay)
return pays
finally:
client.close()
def decode(invoice) -> LightningInvoice:
"""
@invoice: is a str, bolt11.
"""
client = CreateLightningClient()
try:
decode_response = client.call("decode", invoice)
assert decode_response.get("error") is None
result = decode_response["result"]
assert result["valid"], "decode is invalid"
invoice = LightningInvoice()
invoice.msatoshi = result["msatoshi"]
invoice.description: str = result["description"]
return invoice
finally:
client.close()
def pay(invoice) -> int:
"""
@invoice: is a str, bolt11.
@return: return msatoshi sent.
"""
client = CreateLightningClient()
try:
pay_response = client.call("pay", invoice)
assert pay_response.get("error") is None, pay_response["error"]
return pay_response["result"]["msatoshi_sent"]
finally:
client.close()
# The captured group is the ID for the invoice
_gInvoiceLabelPattern = r"^banana-lightning-([\d]+)$"
_gLigthningMetadata = {
"lastInvoiceId": None
}
_gInvoiceIdLock: Lock = Lock()
def _next_invoice_label():
if not _gInvoiceIdLock.acquire(True, 30):
raise Exception("Deadlock?")
_gLigthningMetadata["lastInvoiceId"] += 1
invoice_id = _gLigthningMetadata["lastInvoiceId"]
_gInvoiceIdLock.release()
invoice_label = "banana-lightning-{}".format(invoice_id)
assert re.fullmatch(_gInvoiceLabelPattern, invoice_label) is not None
return invoice_label
def _init_ligthning_metadata():
client = CreateLightningClient()
try:
listinvoices_response = client.call("listinvoices")
assert listinvoices_response.get("error") is None
invoices = listinvoices_response["result"]["invoices"]
_gLigthningMetadata["lastInvoiceId"] = 0
for invoice in invoices:
label = invoice["label"]
match = re.fullmatch(_gInvoiceLabelPattern, label)
if match:
assert len(match.groups()) == 1
invoice_id = int(match.groups()[0])
if invoice_id > _gLigthningMetadata["lastInvoiceId"]:
_gLigthningMetadata["lastInvoiceId"] = invoice_id
finally:
client.close()
if Config.EnableLightning:
_init_ligthning_metadata()
|
<gh_stars>0
"""
Given the weights and profits of ‘N’ items, we are asked to put these items
in a knapsack which has a capacity ‘C’. The goal is to get the maximum profit
from the items in the knapsack.
we are allowed to use an unlimited quantity of an item.
Let’s take the example of Merry, who wants to carry some fruits in the
knapsack to get maximum profit. Here are the weights and profits of the fruits:
Items: { Apple, Orange, Melon }
Weights: { 1, 2, 3 }
Profits: { 15, 20, 50 }
Knapsack capacity: 5
Let’s try to put different combinations of fruits in the knapsack,
such that their total weight is not more than 5.
5 Apples (total weight 5) => 75 profit
1 Apple + 2 Oranges (total weight 5) => 55 profit
2 Apples + 1 Melon (total weight 5) => 80 profit
1 Orange + 1 Melon (total weight 5) => 70 profit
This shows that 2 apples + 1 melon is the best combination,
as it gives us the maximum profit and the total weight does not exceed the
capacity.
"""
from typing import List, DefaultDict
from types import SimpleNamespace
from collections import defaultdict
import pprint
def solve_knapsack_rec(profits: list, weights: list, capacity: int):
if len(profits) != len(weights):
raise ValueException("Profits and weights lengths should be same")
result: List[List[int]] = []
profit = SimpleNamespace(max=0)
def helper(slate: list, idx: int, capacity: int, cur_max_profit: int, cur_weight: int):
if cur_max_profit > profit.max and cur_weight == capacity:
profit.max = cur_max_profit
result.append(list(slate))
if idx >= len(weights) or cur_weight >= capacity:
return
if weights[idx] < capacity:
slate.append((weights[idx], profits[idx]))
helper(slate, idx, capacity, cur_max_profit + profits[idx],
cur_weight + weights[idx])
slate.pop()
helper(slate, idx + 1, capacity, cur_max_profit, cur_weight)
helper([], 0, capacity, 0, 0)
print(result)
return profit.max
def solve_knapsack_memo(profits: list, weights: list, capacity: int):
if len(profits) != len(weights):
raise ValueException("Profits and weights lengths should be same")
result: List[List[int]] = []
profit = SimpleNamespace(max=0)
memo: DefaultDict = defaultdict(int)
def helper(slate: list, idx: int, capacity: int, cur_max_profit: int, cur_weight: int):
if cur_max_profit > profit.max and cur_weight == capacity:
profit.max = cur_max_profit
result.append(list(slate))
memo[cur_weight] = cur_max_profit
if idx >= len(weights) or cur_weight >= capacity:
return
if weights[idx] not in memo and weights[idx] < capacity:
slate.append((weights[idx], profits[idx]))
helper(slate, idx, capacity, cur_max_profit + profits[idx],
cur_weight + weights[idx])
slate.pop()
helper(slate, idx + 1, capacity, cur_max_profit, cur_weight)
helper([], 0, capacity, 0, 0)
print(result)
return profit.max
def solve_knapsack_dp(profits: list, weights: list, capacity: int) -> int:
table: List[List[int]] = [[0 for x in range(capacity + 1)] for y in range(len(weights))]
for col in range(1, len(table[0])):
if col >= weights[0] and weights[0] <= capacity:
table[0][col] = (col // weights[0]) * profits[0]
weights_used: set = set()
for row in range(1, len(table)):
for col in range(1, len(table[row])):
exclude_current_weight: int = table[row - 1][col]
include_current_weight: int = (profits[row] + table[row][col - weights[row]]
if weights[row] <= col else 0)
if include_current_weight > exclude_current_weight:
weights_used.add(weights[row])
table[row][col] = include_current_weight
else:
table[row][col] = exclude_current_weight
pprint.pprint(table)
print(weights_used)
return table[-1][-1]
print(solve_knapsack_dp([15, 50, 60, 90], [1, 3, 4, 5], 8))
print(solve_knapsack_dp([15, 50, 60, 90], [1, 3, 4, 5], 6))
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from rally import exceptions
from rally.task import context
from tests.unit import fakes
from tests.unit import test
@ddt.ddt
class BaseContextTestCase(test.TestCase):
@ddt.data({"config": {"bar": "spam"}, "expected": {"bar": "spam"}},
{"config": {"bar": "spam"}, "expected": {"bar": "spam"}},
{"config": {}, "expected": {}},
{"config": None, "expected": None},
{"config": 42, "expected": 42},
{"config": "foo str", "expected": "foo str"},
{"config": [], "expected": ()},
{"config": [11, 22, 33], "expected": (11, 22, 33)})
@ddt.unpack
def test_init(self, config, expected):
ctx = {"config": {"foo": 42, "fake": config}, "task": "foo_task"}
ins = fakes.FakeContext(ctx)
self.assertEqual(expected, ins.config)
self.assertEqual("foo_task", ins.task)
self.assertEqual(ctx, ins.context)
def test_init_with_default_config(self):
@context.configure(name="foo", order=1)
class FooContext(fakes.FakeContext):
DEFAULT_CONFIG = {"alpha": "beta", "delta": "gamma"}
self.addCleanup(FooContext.unregister)
ctx = {"config": {"foo": {"ab": "cd"}, "bar": 42}, "task": "foo_task"}
ins = FooContext(ctx)
self.assertEqual({"ab": "cd", "alpha": "beta", "delta": "gamma"},
ins.config)
def test_init_empty_context(self):
ctx0 = {
"task": mock.MagicMock(),
"config": {"fake": {"foo": 42}}
}
ctx = fakes.FakeContext(ctx0)
self.assertEqual(ctx0["config"]["fake"], ctx.config)
self.assertEqual(ctx0["task"], ctx.task)
self.assertEqual(ctx0, ctx.context)
@ddt.data(({"test": 2}, True), ({"nonexisting": 2}, False))
@ddt.unpack
def test_validate(self, config, valid):
results = context.Context.validate("fake", None, None, config)
if valid:
self.assertEqual(results, [])
else:
self.assertEqual(1, len(results))
def test_setup_is_abstract(self):
@context.configure("test_abstract_setup", 0)
class A(context.Context):
def cleanup(self):
pass
self.addCleanup(A.unregister)
self.assertRaises(TypeError, A)
def test_cleanup_is_abstract(self):
@context.configure("test_abstract_cleanup", 0)
class A(context.Context):
def setup(self):
pass
self.addCleanup(A.unregister)
self.assertRaises(TypeError, A)
def test_with_statement(self):
ctx0 = {
"task": mock.MagicMock()
}
ctx = fakes.FakeContext(ctx0)
ctx.setup = mock.MagicMock()
ctx.cleanup = mock.MagicMock()
with ctx as entered_ctx:
self.assertEqual(ctx, entered_ctx)
ctx.cleanup.assert_called_once_with()
def test_get_owner_id_from_task(self):
ctx = {"config": {"fake": {"test": 10}}, "task": {"uuid": "task_uuid"}}
ins = fakes.FakeContext(ctx)
self.assertEqual("task_uuid", ins.get_owner_id())
def test_get_owner_id(self):
ctx = {"config": {"fake": {"test": 10}}, "task": {"uuid": "task_uuid"},
"owner_id": "foo_uuid"}
ins = fakes.FakeContext(ctx)
self.assertEqual("foo_uuid", ins.get_owner_id())
def test___eq__(self):
@context.configure(name="bar", order=1)
class BarContext(fakes.FakeContext):
pass
foo_context = fakes.FakeContext()
bar_context = BarContext()
self.assertTrue(foo_context == bar_context)
def test___lt__(self):
@context.configure(name="barlt", order=2)
class BarContext(fakes.FakeContext):
pass
foo_context = fakes.FakeContext()
bar_context = BarContext()
self.assertTrue(foo_context < bar_context)
def test___gt__(self):
@context.configure(name="bargt", order=0)
class BarContext(fakes.FakeContext):
pass
foo_context = fakes.FakeContext()
bar_context = BarContext()
self.assertTrue(foo_context > bar_context)
def test___le__(self):
@context.configure(name="barle", order=1)
class BarContext(fakes.FakeContext):
pass
@context.configure(name="bazle", order=2)
class BazContext(fakes.FakeContext):
pass
foo_context = fakes.FakeContext()
bar_context = BarContext()
baz_context = BazContext()
self.assertTrue(foo_context <= bar_context)
self.assertTrue(foo_context <= baz_context)
def test___ge__(self):
@context.configure(name="barge", order=0)
class BarContext(fakes.FakeContext):
pass
@context.configure(name="bazge", order=-1)
class BazContext(fakes.FakeContext):
pass
foo_context = fakes.FakeContext()
bar_context = BarContext()
baz_context = BazContext()
self.assertTrue(foo_context >= bar_context)
self.assertTrue(foo_context >= baz_context)
class ContextManagerTestCase(test.TestCase):
@mock.patch("rally.task.context.ContextManager._get_sorted_context_lst")
def test_setup(self, mock__get_sorted_context_lst):
foo_context = mock.MagicMock()
bar_context = mock.MagicMock()
mock__get_sorted_context_lst.return_value = [foo_context, bar_context]
ctx_object = {"config": {"a": [], "b": []}, "task": {"uuid": "uuid"}}
manager = context.ContextManager(ctx_object)
result = manager.setup()
self.assertEqual(result, ctx_object)
foo_context.setup.assert_called_once_with()
bar_context.setup.assert_called_once_with()
self.assertEqual([
{
"plugin_cfg": foo_context.config,
"plugin_name": foo_context.get_fullname.return_value,
"setup": {
"atomic_actions": foo_context.atomic_actions.return_value,
"error": None,
"started_at": mock.ANY,
"finished_at": mock.ANY
},
"cleanup": {
"atomic_actions": None,
"error": None,
"started_at": None,
"finished_at": None}
},
{
"plugin_cfg": bar_context.config,
"plugin_name": bar_context.get_fullname.return_value,
"setup": {
"atomic_actions": bar_context.atomic_actions.return_value,
"error": None,
"started_at": mock.ANY,
"finished_at": mock.ANY
},
"cleanup": {
"atomic_actions": None,
"error": None,
"started_at": None,
"finished_at": None}
}], manager.contexts_results())
@mock.patch("rally.task.context.task_utils.format_exc")
@mock.patch("rally.task.context.ContextManager._get_sorted_context_lst")
def test_setup_fails(self, mock__get_sorted_context_lst, mock_format_exc):
special_exc = KeyError("Oops")
foo_context = mock.MagicMock()
foo_context.setup.side_effect = special_exc
bar_context = mock.MagicMock()
mock__get_sorted_context_lst.return_value = [foo_context, bar_context]
ctx_object = {"config": {"a": [], "b": []}, "task": {"uuid": "uuid"}}
manager = context.ContextManager(ctx_object)
e = self.assertRaises(KeyError, manager.setup)
self.assertEqual(special_exc, e)
foo_context.setup.assert_called_once_with()
self.assertFalse(bar_context.setup.called)
self.assertEqual([
{
"plugin_cfg": foo_context.config,
"plugin_name": foo_context.get_fullname.return_value,
"setup": {
"atomic_actions": foo_context.atomic_actions.return_value,
"error": mock_format_exc.return_value,
"started_at": mock.ANY,
"finished_at": mock.ANY
},
"cleanup": {
"atomic_actions": None,
"error": None,
"started_at": None,
"finished_at": None}
}], manager.contexts_results())
mock_format_exc.assert_called_once_with(special_exc)
def test_get_sorted_context_lst(self):
@context.configure("foo", order=1)
class A(context.Context):
def setup(self):
pass
def cleanup(self):
pass
@context.configure("foo", platform="foo", order=0)
class B(A):
pass
@context.configure("boo", platform="foo", order=2)
class C(A):
pass
self.addCleanup(A.unregister)
self.addCleanup(B.unregister)
self.addCleanup(C.unregister)
ctx_obj = {"config": {"foo@default": [], "boo": [], "foo@foo": []}}
ctx_insts = context.ContextManager(ctx_obj)._get_sorted_context_lst()
self.assertEqual(3, len(ctx_insts))
self.assertIsInstance(ctx_insts[0], B)
self.assertIsInstance(ctx_insts[1], A)
self.assertIsInstance(ctx_insts[2], C)
@mock.patch("rally.task.context.Context.get_all")
def test_get_sorted_context_lst_fails(self, mock_context_get_all):
ctx_object = {"config": {"foo": "bar"}}
mock_context_get_all.return_value = []
manager = context.ContextManager(ctx_object)
self.assertRaises(exceptions.PluginNotFound,
manager._get_sorted_context_lst)
mock_context_get_all.assert_called_once_with(
name="foo", platform=None, allow_hidden=True)
def test_cleanup(self):
mock_obj = mock.MagicMock()
@context.configure("a", platform="foo", order=1)
class A(context.Context):
def setup(self):
pass
def cleanup(self):
mock_obj("a@foo")
self.addCleanup(A.unregister)
@context.configure("b", platform="foo", order=2)
class B(context.Context):
def setup(self):
pass
def cleanup(self):
mock_obj("b@foo")
ctx_object = {
"config": {"a@foo": [], "b@foo": []},
"task": {"uuid": "uuid"}
}
context.ContextManager(ctx_object).cleanup()
mock_obj.assert_has_calls([mock.call("b@foo"), mock.call("a@foo")])
@mock.patch("rally.task.context.task_utils.format_exc")
@mock.patch("rally.task.context.LOG.exception")
def test_cleanup_exception(self, mock_log_exception, mock_format_exc):
mock_obj = mock.MagicMock()
exc = Exception("So Sad")
@context.configure("a", platform="foo", order=1)
class A(context.Context):
def setup(self):
pass
def cleanup(self):
mock_obj("a@foo")
raise exc
self.addCleanup(A.unregister)
ctx_object = {"config": {"a@foo": []}, "task": {"uuid": "uuid"}}
ctx_manager = context.ContextManager(ctx_object)
ctx_manager._data[A.get_fullname()] = {
"cleanup": {"atomic_actions": None,
"started_at": None,
"finished_at": None,
"error": None}}
ctx_manager.cleanup()
mock_obj.assert_called_once_with("a@foo")
mock_log_exception.assert_called_once_with(mock.ANY)
mock_format_exc.assert_called_once_with(exc)
self.assertEqual([{
"cleanup": {
"atomic_actions": [],
"error": mock_format_exc.return_value,
"started_at": mock.ANY,
"finished_at": mock.ANY}}],
ctx_manager.contexts_results())
@mock.patch("rally.task.context.ContextManager.cleanup")
@mock.patch("rally.task.context.ContextManager.setup")
def test_with_statement(
self, mock_context_manager_setup, mock_context_manager_cleanup):
with context.ContextManager(mock.MagicMock()):
mock_context_manager_setup.assert_called_once_with()
mock_context_manager_setup.reset_mock()
self.assertFalse(mock_context_manager_cleanup.called)
self.assertFalse(mock_context_manager_setup.called)
mock_context_manager_cleanup.assert_called_once_with()
@mock.patch("rally.task.context.ContextManager.cleanup")
@mock.patch("rally.task.context.ContextManager.setup")
def test_with_statement_exception_during_setup(
self, mock_context_manager_setup, mock_context_manager_cleanup):
mock_context_manager_setup.side_effect = Exception("abcdef")
try:
with context.ContextManager(mock.MagicMock()):
pass
except Exception:
pass
finally:
mock_context_manager_setup.assert_called_once_with()
mock_context_manager_cleanup.assert_called_once_with()
|
import os, re, time, shutil
import requests
from bs4 import BeautifulSoup
from multiprocessing import Pool
# the only global variable, would be nice if i could somehow implement it in
# main() but idk how?
temp = './temp/'
# gets the url and returns beautifulsoup'd html
#
# it's used multiple times so i decided that it was maybe smarter to make it a
# separate function?
def get_site(url):
# uses requests to get the website
response = requests.get(url)
# formats the website response
html = BeautifulSoup(response.content, 'html.parser')
# closes the website request - maybe unnecessary?
response.close()
# returns the beautifulsoup'd html
return html
def scrape(data_list):
# get's beautifulsoup'd html usint the url provide by the data_list
html = get_site(data_list[1])
# some chapters are in the chapter thing others in the chapter-content thing
# this tries the chapter thing first and if it raises and exception it tries
# the chapter-content thing
#
# the definition of a quick and dirty fix, i wonder if there is a better
# way to do this?
try:
# creates a list containg all paragraphs in in the chapter thing
# inside the html acquired above
content = html.find(id='chapter').find_all('p')
except:
# creates a list containg all paragraphs in the chapter-content thing
# inside the html acquired above
content = html.find(id='chapter-content').find_all('p')
# get's the text from the paragraph in the content list
content = [ x.get_text() for x in content ]
# removes all the spaces surrounding the text to fix weird site formatting
content = [ x.strip() for x in content ]
# adds a line break after each paragraph
content = [ x + '\n\n' for x in content ]
# joins the content list into a large string
content = ''.join(content)
# creates a new file using the name provided by the data_list
with open(temp + data_list[0] + '.txt', 'w', encoding='utf-8') as target:
# writes the content string inside the file
target.write(content)
# closes the file
target.close()
# prints which chapter the function acquired
print(' Acquired: ' + data_list[0], end='\r')
# displays strings depending on the number n, pretty much eye candy used to
# show that the script is running while it's gathering chapter urls
#
# smart usage of the dictionary maybe?
def search_dots(n):
dots = {
0:' ',
1:' . ',
2:' .. ',
3:' ...'
}
print(dots.get(n), end='\r')
# main function
def main():
# get's the ToC link
url = input('input url: ')
# defines the script start time
start_time = time.time()
# get's the website html
html = get_site(url)
# finds the novel title
novel_title = html.find(class_='title').get_text()
print('\n Novel title: {}'.format(novel_title))
# finds the novel author
novel_author = html.find(class_='info').find_all('div')[0].get_text().split(',')
novel_author = novel_author[0][7:]
print(' Novel author: {}'.format(novel_author))
# TODO: get other novel data such as the genre, the source, and
# maybe if it's completed or not...
# formats the given url to get the base url for future use
# https://novelfull.com/dimensional-sovereign.html -> https://novelfull.com
# notice it doesn't have the slash at the end...
main_url = url.split('/')[0:3]
main_url = '/'.join(main_url)
# created an empty list for formated chapter links
url_list = []
# value used by the search_dots() function
n = 0
# simple loop that a) get's the page html again (i could maybe skip this
# part but idk how i would implement it otherwise), b) finds the chapter urls
# on the page, and scrapes them before finding the next page url, and
# repeating the steps until it get's to the last page
while True:
# eye candy
search_dots(n)
# as before, get's beautifulsoup'd html
html = get_site(url)
# finds and formats chapter links before appending them to the url_list
columns = html.find_all(class_='list-chapter')
for column in columns:
rows = column.find_all('a')
for row in rows:
url_list.append(main_url + row.get('href'))
# try's to find the next page link
# if it succeeds it changes the url used and adds +1 to the value n
# which is used for eye candy
try:
# formats the next page url using the main_url defined earlier
url = main_url + html.find(class_='next').find('a').get('href')
# checks n value, and depending on the condition it either resets
# it to 0 or gives it +1 (used for eye candy)
if n < 3:
n += 1
else:
n = 0
except:
# in case there is no next page url the loop is broken
break
# prints the amount of chapter links gathered
print(' Chapters: {}'.format(len(url_list)))
# tries to make a folder to store scraped chapters
# try is used to prevent the script raising an exception if for some reason
# the folder is already present
try:
# creates a temp folder
os.mkdir(temp)
except:
# if folder is present, pass without raising and exception
pass
# creates an empty list for scraped chapter names
name_list = []
# creates numbered names for all scraped chapters
for x in range(len(url_list)):
# creates the numbered name
x = 'ch' + '{}'.format(x + 1).zfill(4)
# appends the name to the name_list
name_list.append(x)
# creates a data_list combining the name_list with the url_list
# [[name, url], [name, url]...] <- data_list format
data_list = set(zip(name_list, url_list))
# the easiest way that i found to implement multiprocessing for a
# simple web-scraper idk what else to say?
with Pool(10) as pool:
# defies the "crawler" (bs) and creates separate processes for the
# scrape function feeding it the data_list
crawler = pool.map(scrape, data_list)
# prototype web-scraping code used before i had to make it a function
# to implement multiprocessing ("...")
#
# for i, url in enumerate(url_list):
# i = 'ch' + '{}'.format(i + 1).zfill(4)
#
# html = get_site(url)
#
# content = html.find(id = 'chapter-content').find_all('p')
# content = [ x.get_text() for x in content ]
# content = [ x + '\n\n' for x in content]
# content = ''.join(content)
#
# with open(temp + i + '.txt', 'w', encoding='utf-8') as target:
# target.write(content)
# target.close()
#
# print(' Acquired: ' + i, end='\r')
# checks the temporary folder, get's all file name and puts them in a
# temp_files list
temp_files = os.listdir(temp)
# prints the amount of chapters acquired using the len() of the temp_files
# list. it's a simple way to allow the user to see if the amount of
# chapters that were downloaded matches the amount of chapters there is
print(' Acquired: ' + str(len(temp_files)) + ' files', end='\r')
# relic that is left from when i used to name the chapters "1, 2, 3...",
# and is used to fix the "weird" sorting thing. now i keep it as a precaution
#
# made by @Toothy with a comment on https://nedbatchelder.com/blog/200712/human_sorting.html post
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
temp_files.sort(key=alphanum_key)
# creates the main novel file and names it using the novel_title
with open(novel_title + '.txt', 'w', encoding='utf-8') as f1:
# inside the file it writes the novel data
f1.write('Novel title: {}\nNovel author: {}\nChapters: {}'.format(novel_title, novel_author, len(url_list)))
# clears the acquired string printed above
print(' Merged: ', end='\r')
# for each file (chapter) in the temp_files list
for chapter in temp_files:
# prints file name (chapter)
print(' Merged: ' + chapter, end='\r')
# opens a file from the temporary filder
with open(temp + chapter, encoding='utf-8') as f2:
# reads line by line in the file from the temporary folder
for line in f2:
# writes the read line to the main novel file
f1.write(line)
# defines the script end time
stop_time = time.time()
# removes the temporary folder (including the individual chapters within)
shutil.rmtree(temp)
# prints the time the script was getting the novel
print(' Done in {:.2f}s '.format(stop_time - start_time))
# if i understand correctly this prevents the code running if it's imported
if __name__ == '__main__':
main()
|
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
from tensorflow.keras import layers
import tensorflow as tf
import time
from IPython import display
(train_images,train_labels),(_,_)=tf.keras.datasets.mnist.load_data()
print(train_images.shape)
train_images=train_images.reshape(train_images.shape[0],28,28,1).astype('float32')
train_images=(train_images - 127.5)/127.5 #Normalise the images to [-1:1]
BUFFER_SIZE=60000
BATCH_SIZE=256
#Batch and shuffle the data
train_dataset=tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
def make_generator_model():
model=tf.keras.Sequential()
model.add(layers.Dense(7*7*256,use_bias=False,input_shape=(100,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape([7,7,256]))
assert model.output_shape==(None,7,7,256)#Note: None is the batch size
model.add(layers.Conv2DTranspose(128,(5,5),strides=(1,1),padding="same",use_bias=False))
assert model.output_shape==(None,7,7,128)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(64,(5,5),strides=(2,2),padding="same",use_bias=False))
assert model.output_shape==(None,14,14,64)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(1,(5,5),strides=(2,2),padding="same",activation="tanh"))
assert model.output_shape==(None,28,28,1)
return model
generator=make_generator_model()
print(generator.summary())
noise=tf.random.normal([1,100])
generated_image=generator(noise,training=False)
plt.imshow(generated_image[0,:,:,0],cmap="gray")
#Discriminator-It is a CNN-based image classifier
def make_discriminator_model():
model=tf.keras.Sequential()
model.add(layers.Conv2D(64,(5,5),strides=(2,2),padding="same",input_shape=[28,28,1]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(128,(5,5),strides=(2,2),padding="same"))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
return model
discriminator=make_discriminator_model()
print(discriminator.summary())
decision=discriminator(generated_image)
print(decision)
#Define loss and optimizers(We only predict real or fake)
cross_entropy=tf.keras.losses.BinaryCrossentropy(from_logits=True)
#Discriminator loss
def discriminator_loss(real_output,fake_output):
real_loss=cross_entropy(tf.ones_like(real_output),real_output)
fake_loss=cross_entropy(tf.zeros_like(fake_output),fake_output)
total_loss=real_loss+fake_loss
return total_loss
#Generator loss
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output),fake_output)
#optimizers
generator_optimizer=tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer=tf.keras.optimizers.Adam(1e-4)
#Define the training loop
EPOCHS=50
noise_dim=100
num_examples_to_generate=16
#To reuse the data we use seed
seed=tf.random.normal([num_examples_to_generate,noise_dim])
#Training step
def train_step(images):
noise=tf.random.normal([BATCH_SIZE,noise_dim])
with tf.GradientTape() as gen_tape,tf.GradientTape() as disc_tape:
generated_images=generator(noise,training=True)
real_output=discriminator(images,training=True)
fake_output=discriminator(generated_images,training=True)
gen_loss=generator_loss(fake_output)
disc_loss=discriminator_loss(real_output,fake_output)
gradients_of_generator=gen_tape.gradient(gen_loss,generator.trainable_variables)
gradients_of_discriminator=disc_tape.gradient(disc_loss,discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator,generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator,discriminator.trainable_variables))
def train(dataset,epochs):
for epoch in range(epochs):
start=time.time()
for image_batch in dataset:
train_step(image_batch)
#Produce images for the GIF as we go
display.clear_output(wait=True)
generate_and_save_images(generator,epoch+1,seed)
print('True for epoch {} is {} sec'.format(epoch1,time.time().start))
train(train_dataset,EPOCHS)
|
# -*- coding: utf-8 -*-
"""
Created on 2018-08-28 17:38:43
---------
@summary: 根据json生成表
---------
@author: Boris
@email: <EMAIL>
"""
import time
import pyperclip
import feapder.setting as setting
import feapder.utils.tools as tools
from feapder.db.mysqldb import MysqlDB
from feapder.utils.tools import key2underline
class CreateTable:
def __init__(self):
self._db = MysqlDB()
def is_valid_date(self, date):
try:
if ":" in date:
time.strptime(date, "%Y-%m-%d %H:%M:%S")
else:
time.strptime(date, "%Y-%m-%d")
return True
except:
return False
def get_key_type(self, value):
try:
value = eval(value)
except:
value = value
key_type = "varchar(255)"
if isinstance(value, int):
key_type = "int"
elif isinstance(value, float):
key_type = "double"
elif isinstance(value, str):
if self.is_valid_date(value):
if ":" in value:
key_type = "datetime"
else:
key_type = "date"
elif len(value) > 50:
key_type = "text"
else:
key_type = "varchar(255)"
elif isinstance(value, (dict, list)):
key_type = "longtext"
return key_type
def get_data(self):
"""
@summary: 从控制台读取多行
---------
---------
@result:
"""
input("请复制json格式数据, 复制后按任意键读取剪切板内容\n")
text = pyperclip.paste()
print(text + "\n")
return tools.get_json(text)
def create(self, table_name):
# 输入表字段
data = self.get_data()
if not isinstance(data, dict):
raise Exception("表数据格式不正确")
# 拼接表结构
sql = """
CREATE TABLE `{db}`.`{table_name}` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id主键',
{other_key}
`crawl_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '采集时间',
{unique}
PRIMARY KEY (`id`)
) COMMENT='';
"""
# print("请设置注释 回车跳过")
other_key = ""
for key, value in data.items():
key = key2underline(key)
comment = ""
if key == "id":
key = "data_id"
comment = "原始数据id"
key_type = self.get_key_type(value)
# comment = input("%s : %s -> comment:" % (key, key_type))
other_key += (
"`{key}` {key_type} COMMENT '{comment}',\n ".format(
key=key, key_type=key_type, comment=comment
)
)
print("\n")
while True:
yes = input("是否添加批次字段 batch_date(y/n):")
if yes == "y":
other_key += (
"`{key}` {key_type} COMMENT '{comment}',\n ".format(
key="batch_date", key_type="date", comment="批次时间"
)
)
break
elif yes == "n":
break
print("\n")
while True:
yes = input("是否设置唯一索引(y/n):")
if yes == "y":
unique = input("请设置唯一索引, 多个逗号间隔\n等待输入:\n").replace(",", ",")
if unique:
unique = "UNIQUE `idx` USING BTREE (`%s`) comment ''," % "`,`".join(
unique.split(",")
)
break
elif yes == "n":
unique = ""
break
sql = sql.format(
db=setting.MYSQL_DB,
table_name=table_name,
other_key=other_key.strip(),
unique=unique,
)
print(sql)
if self._db.execute(sql):
print("\n%s 创建成功" % table_name)
print("注意手动检查下字段类型,确保无误!!!")
else:
print("\n%s 创建失败" % table_name)
|
<gh_stars>0
# -*- coding: utf-8 -*-
""" Sahana Eden Security Model
@copyright: 2012-14 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3SecurityModel",)
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3AddResourceLink
# =============================================================================
class S3SecurityModel(S3Model):
"""
"""
names = ("security_zone_type",
"security_zone",
"security_staff_type",
"security_staff",
)
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# -----------------------------------------------------------
# Security Zone Types
#
tablename = "security_zone_type"
define_table(tablename,
Field("name",
label=T("Name")),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_ZONE_TYPE = T("Create Zone Type")
crud_strings[tablename] = Storage(
label_create = ADD_ZONE_TYPE,
title_display = T("Zone Type Details"),
title_list = T("Zone Types"),
title_update = T("Edit Zone Type"),
title_upload = T("Import Zone Types"),
label_list_button = T("List Zone Types"),
label_delete_button = T("Delete Zone Type"),
msg_record_created = T("Zone Type added"),
msg_record_modified = T("Zone Type updated"),
msg_record_deleted = T("Zone Type deleted"),
msg_list_empty = T("No Zone Types currently registered"))
zone_type_represent = S3Represent(lookup=tablename)
self.configure(tablename,
deduplicate = self.security_zone_type_duplicate,
)
# -----------------------------------------------------------
# Security Zones
#
tablename = "security_zone"
define_table(tablename,
Field("name",
label=T("Name")),
Field("zone_type_id", db.security_zone_type,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "security_zone_type.id",
zone_type_represent,
sort=True)),
represent = zone_type_represent,
comment = S3AddResourceLink(c="security",
f="zone_type",
label=ADD_ZONE_TYPE,
tooltip=T("Select a Zone Type from the list or click 'Add Zone Type'")),
label=T("Type")),
self.gis_location_id(
widget = S3LocationSelectorWidget2(
catalog_layers = True,
points = False,
polygons = True,
)
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_ZONE = T("Create Zone")
crud_strings[tablename] = Storage(
label_create = ADD_ZONE,
title_display = T("Zone Details"),
title_list = T("Zones"),
title_update = T("Edit Zone"),
title_upload = T("Import Zones"),
label_list_button = T("List Zones"),
label_delete_button = T("Delete Zone"),
msg_record_created = T("Zone added"),
msg_record_modified = T("Zone updated"),
msg_record_deleted = T("Zone deleted"),
msg_list_empty = T("No Zones currently registered"))
zone_represent = S3Represent(lookup=tablename)
# -----------------------------------------------------------
# Security Staff Types
#
tablename = "security_staff_type"
define_table(tablename,
Field("name",
label=T("Name")),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_STAFF = T("Add Staff Type")
crud_strings[tablename] = Storage(
label_create = ADD_STAFF,
title_display = T("Staff Type Details"),
title_list = T("Staff Types"),
title_update = T("Edit Staff Type"),
title_upload = T("Import Staff Types"),
label_list_button = T("List Staff Types"),
label_delete_button = T("Delete Staff Type"),
msg_record_created = T("Staff Type added"),
msg_record_modified = T("Staff Type updated"),
msg_record_deleted = T("Staff Type deleted"),
msg_list_empty = T("No Staff Types currently registered"))
staff_type_represent = S3Represent(lookup=tablename)
# -----------------------------------------------------------
# Security Staff
#
tablename = "security_staff"
define_table(tablename,
self.hrm_human_resource_id(),
Field("staff_type_id", "list:reference security_staff_type",
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "security_staff_type.id",
staff_type_represent,
sort=True,
multiple=True)),
represent = self.security_staff_type_multirepresent,
comment = S3AddResourceLink(c="security",
f="staff_type",
label=ADD_STAFF,
tooltip=T("Select a Staff Type from the list or click 'Add Staff Type'")),
label=T("Type")),
Field("zone_id", db.security_zone,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "security_zone.id",
zone_represent,
sort=True)),
represent = zone_represent,
comment = S3AddResourceLink(c="security",
f="zone",
label=ADD_ZONE,
tooltip=T("For wardens, select a Zone from the list or click 'Add Zone'")),
label=T("Zone")),
self.super_link("site_id", "org_site",
label = T("Facility"),
represent=self.org_site_represent,
readable=True,
writable=True),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_STAFF = T("Add Security-Related Staff")
crud_strings[tablename] = Storage(
label_create = ADD_STAFF,
title_display = T("Security-Related Staff Details"),
title_list = T("Security-Related Staff"),
title_update = T("Edit Security-Related Staff"),
title_upload = T("Import Security-Related Staff"),
label_list_button = T("List Security-Related Staff"),
label_delete_button = T("Delete Security-Related Staff"),
msg_record_created = T("Security-Related Staff added"),
msg_record_modified = T("Security-Related Staff updated"),
msg_record_deleted = T("Security-Related Staff deleted"),
msg_list_empty = T("No Security-Related Staff currently registered"))
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict()
# -------------------------------------------------------------------------
@staticmethod
def security_zone_type_duplicate(item):
"""
Zone Type record duplicate detection, used for the deduplicate hook
@param item: the S3ImportItem to check
"""
if item.tablename == "security_zone_type":
table = item.table
query = (table.name == item.data.name)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
if row:
item.id = row.id
item.method = item.METHOD.UPDATE
# -----------------------------------------------------------------------------
@staticmethod
def security_staff_type_multirepresent(opt):
""" Represent a staff type in list views """
db = current.db
table = db.security_staff_type
set = db(table.id > 0).select(table.id,
table.name).as_dict()
if isinstance(opt, (list, tuple)):
opts = opt
vals = [str(set.get(o)["name"]) for o in opts]
multiple = True
elif isinstance(opt, int):
opts = [opt]
vals = str(set.get(opt)["name"])
multiple = False
else:
try:
opt = int(opt)
except:
return current.messages["NONE"]
else:
opts = [opt]
vals = str(set.get(opt)["name"])
multiple = False
if multiple:
if len(opts) > 1:
vals = ", ".join(vals)
else:
vals = len(vals) and vals[0] or ""
return vals
# END =========================================================================
|
from ncc.models.ncc_model import NccEncoderDecoderModel
from ncc.modules.embedding import Embedding
from ncc.modules.code2vec.lstm_encoder import LSTMEncoder
from ncc.modules.seq2seq.lstm_decoder import LSTMDecoder
from ncc.models import register_model
from ncc.utils import utils
DEFAULT_MAX_SOURCE_POSITIONS = 1e5
DEFAULT_MAX_TARGET_POSITIONS = 1e5
@register_model('seq2seq')
class Seq2SeqModel(NccEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, args, config, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted (in case there are any new ones)
# base_architecture(args)
if args['model']['encoder_layers'] != args['model']['decoder_layers']:
raise ValueError('--encoder-layers must match --decoder-layers')
max_source_positions = args['model']['max_source_positions'] if args['model']['max_source_positions'] \
else DEFAULT_MAX_SOURCE_POSITIONS
max_target_positions = args['model']['max_target_positions'] if args['model']['max_target_positions'] \
else DEFAULT_MAX_TARGET_POSITIONS
def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
embed_dict = utils.parse_embedding(embed_path)
utils.print_embed_overlap(embed_dict, dictionary)
return utils.load_embedding(embed_dict, dictionary, embed_tokens)
if args['model']['encoder_embed']:
pretrained_encoder_embed = load_pretrained_embedding_from_file(
args['model']['encoder_embed_path'], task.source_dictionary, args['model']['encoder_embed_dim'])
else:
num_embeddings = len(task.source_dictionary)
pretrained_encoder_embed = Embedding(
num_embeddings, args['model']['encoder_embed_dim'], task.source_dictionary.pad()
)
if args['model']['share_all_embeddings']:
# double check all parameters combinations are valid
if task.source_dictionary != task.target_dictionary:
raise ValueError('--share-all-embeddings requires a joint dictionary')
if args['model']['decoder_embed_path'] and (
args['model']['decoder_embed_path'] != args['model']['encoder_embed_path']):
raise ValueError(
'--share-all-embed not compatible with --decoder-embed-path'
)
if args['model']['encoder_embed_dim'] != args['model']['decoder_embed_dim']:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to '
'match --decoder-embed-dim'
)
pretrained_decoder_embed = pretrained_encoder_embed
args['model']['share_decoder_input_output_embed'] = True
else:
# separate decoder input embeddings
pretrained_decoder_embed = None
if args['model']['decoder_embed']:
pretrained_decoder_embed = load_pretrained_embedding_from_file(
args['model']['decoder_embed'],
task.target_dictionary,
args['model']['decoder_embed_dim']
)
# one last double check of parameter combinations
if args['model']['share_decoder_input_output_embed'] and (
args['model']['decoder_embed_dim'] != args['model']['decoder_out_embed_dim']):
raise ValueError(
'--share-decoder-input-output-embeddings requires '
'--decoder-embed-dim to match --decoder-out-embed-dim'
)
if args['model']['encoder_freeze_embed']:
pretrained_encoder_embed.weight.requires_grad = False
if args['model']['decoder_freeze_embed']:
pretrained_decoder_embed.weight.requires_grad = False
encoder = LSTMEncoder(
dictionary=task.source_dictionary,
embed_dim=args['model']['encoder_embed_dim'],
hidden_size=args['model']['encoder_hidden_size'],
num_layers=args['model']['encoder_layers'],
dropout_in=args['model']['encoder_dropout_in'],
dropout_out=args['model']['encoder_dropout_out'],
bidirectional=bool(args['model']['encoder_bidirectional']),
left_pad=args['task']['left_pad_source'],
pretrained_embed=pretrained_encoder_embed,
max_source_positions=max_source_positions
)
decoder = LSTMDecoder(
dictionary=task.target_dictionary,
embed_dim=args['model']['decoder_embed_dim'],
hidden_size=args['model']['decoder_hidden_size'],
out_embed_dim=args['model']['decoder_out_embed_dim'],
num_layers=args['model']['decoder_layers'],
dropout_in=args['model']['decoder_dropout_in'],
dropout_out=args['model']['decoder_dropout_out'],
attention=args['model']['decoder_attention'],
encoder_output_units=encoder.output_units,
pretrained_embed=pretrained_decoder_embed,
share_input_output_embed=args['model']['share_decoder_input_output_embed'],
adaptive_softmax_cutoff=(
args['model']['adaptive_softmax_cutoff']
if args['criterion'] == 'adaptive_loss' else None
),
max_target_positions=max_target_positions
)
return cls(encoder, decoder)
|
<reponame>rokroskar/gastrodon<filename>gastrodon/__init__.py
'''
Gastrodon module header
'''
import re
from abc import ABCMeta, abstractmethod
from collections import OrderedDict, Counter
from collections import deque
from functools import lru_cache
from sys import stdout,_getframe
from types import FunctionType,LambdaType,GeneratorType,CoroutineType,FrameType,CodeType,MethodType
from types import BuiltinFunctionType,BuiltinMethodType,DynamicClassAttribute,ModuleType,AsyncGeneratorType
try:
from typing import GenericMeta # python 3.6
except ImportError:
# in 3.7, genericmeta doesn't exist but we don't need it
class GenericMeta(type): pass
from typing import Dict,Match
from urllib.error import HTTPError
from urllib.parse import urlparse
import pandas as pd
from IPython.display import display_png
from SPARQLWrapper import SPARQLWrapper, JSON
from pyparsing import ParseResults, ParseException
from rdflib import Graph, URIRef, Literal, BNode, RDF
from rdflib.namespace import NamespaceManager
from rdflib.plugins.serializers.turtle import TurtleSerializer
from rdflib.plugins.sparql.processor import SPARQLResult
from rdflib.plugins.sparql.parser import parseQuery,parseUpdate
from rdflib.store import Store
from rdflib.term import Identifier, _castPythonToLiteral, Variable
#
# types that could not reasonably be expected to be serialized automatically to RDF terms in a SPARQL query.
#
_cannot_substitute={
FunctionType,LambdaType,GeneratorType,CoroutineType,FrameType,CodeType,MethodType,
BuiltinFunctionType,BuiltinMethodType,DynamicClassAttribute,ModuleType,AsyncGeneratorType,
ABCMeta,GenericMeta,type
}
_pncb_regex='_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\U00010000-\U000EFFFF'
_pncu_regex='_'+_pncb_regex
_pnc_regex='\\-0-9\u00B7\u0300-\u036F\u203F-\u2040'+_pncb_regex
_var_regex=re.compile('[?$]([%s0-9][%s0-9\u00B7\u0300-\u036F\u203F-\u2040]*)' % ((_pncu_regex,)*2))
# a modified version of the PN_LOCAL regex from the SPARQL 1.1 specification with the percentage and colon
# characters removed, as this is used to tell if we can tell if a full URI can be safely converted to a QName
# or not
_valid_tail_regex=re.compile("[%s0-9]([%s.]*[%s])?" % (_pncu_regex,_pnc_regex,_pnc_regex))
# % (
# PN_CHARS_U_re)
class GastrodonURI(str):
"""
This class is used to wrap a URI that is passed from Gastrodon to Pandas and back again.
`GastrodonURI` subclasses `str`
It keeps track of
both a shortened URI (if a namespace is given) and the full URI, so we can roundtrip this object out of the
table and back into a SPARQL query without a chance of a short name being mistaken for an ordinary string.
"""
def __new__(cls,short,uri_ref):
return super().__new__(cls,short)
def __init__(self,short,uri_ref):
self.uri_ref=uri_ref
def to_uri_ref(self) -> URIRef:
"""
:return: an RDFLib :class:`rdflib.URIRef`
"""
return self.uri_ref
class QName:
"""
This class represents a qualified name.
This class makes it easy to write qualified names, without ambiguity, in Python variables that
later get substituted into SPARQL variables. If
``@prefix bibo: <http://purl.org/ontology/bibo/>``
is declared for an Endpoint, and one writes in Python
``objectType=QName("bibo:AcademicPaper")``
then the SPARQL variable ``?_objectType`` will be replaced with a URI Referemce
``<http://purl.org/ontology/bibo/AcademicPaper>``
for queries inside the scope in which `_objectType` is local. Note that if you simply wrote
``objectType="bibo:AcademicPaper"``
the value substituted in SPARQL would be just the string
``"bibo:AcademicPaper"``
If one wants to write a URI Reference as a full URI, simply use the `URIRef` class from rdflib, ex.
``objectType=URIRef("http://purl.org/ontology/bibo/AcademicPaper")``
:param name: qualified name of the form 'prefix:localname', such as 'rdf:type'
"""
def __init__(self,name:str):
self.name=name
def toURIRef(self,manager:NamespaceManager) -> URIRef:
"""
Convert to URI Reference
:param manager: :class:`rdflib.namespace.NamespaceManager` used to resolve namespace
:return: A :class:`rdflib.URIRef`
"""
if ":" not in self.name:
return None
head,tail=self.name.split(':',1)
for prefix,ns in manager.namespaces():
if prefix==head:
return ns+tail
return URIRef(self.name)
_last_exception=[]
class GastrodonException(Exception):
"""
Gastrodon-specific exception. The primary features of this is that it defines the method
`_render_traceback_` which controls the way the exception is drawn in IPython.
:param args: positional arguments for `Exception`
:param kwargs: keyword arguments for `Exception`
"""
kwargs:Dict={}
def __init__(self,*args,**kwargs):
super().__init__(*args)
if "lines" not in kwargs:
kwargs["lines"]=args[0].split('\n')
self.kwargs=kwargs
def _render_traceback_(self):
return self.kwargs["lines"]
@staticmethod
def throw(*args,**kwargs):
"""
Throws a new :class:`GastrodonException` with the following positional and keyword arguments while
suppressing the context of the :class:`Exception` to enable short error messages in Jupyter
:param args: positional arguments for Exception
:param kwargs: keyword arguments for Exception
:return: does not return
"""
raise GastrodonException(*args,**kwargs) from None
class Endpoint(metaclass=ABCMeta):
"""
An Endpoint is something which can answer SPARQL queries. `Endpoint`
is an abstract base class and cannot be instantiated on its own.
Current implementations include
a :class:`RemoteEndpoint` via the SPARQL protocol or a :class:`LocalEndpoint` provided by rdflib.
:param prefixes: Graph object with attached namespace mappings to be applied to the new :class:`Endpoint`
:param base_uri: base URI to control the base namespace of the :class:`Endpoint` as we see it.
"""
qname_regex=re.compile("(?<![A-Za-z<])([A-Za-z_][A-Za-z_0-9.-]*):")
def __init__(self,prefixes:Graph=None,base_uri=None):
self.prefixes=prefixes
self.base_uri=base_uri
if prefixes!=None:
self._namespaces=set(map(lambda y: y if y[-1] in {"#", "/"} else y + "/", [str(x[1]) for x in prefixes.namespaces()]))
def namespaces(self):
"""
Display prefix to namespace mapping.
Produces a Pandas `DataFrame` that looks something like this
====== ==============================
prefix namespace
====== ==============================
bibo ``http://purl.org/ontology/bibo/``
cc ``http://creativecommons.org/ns#``
dbo ``http://dbpedia.org/ontology/``
... ...
====== ==============================
where `prefix` is the index of the dataframe so you can look up a namespace like
``endpoint.namespaces().at['bibo','namespace']``
:return: :class:`pandas.DataFrame` describing the prefix to namespace mapping used for this endpoint
"""
prefix = [x[0] for x in self.prefixes.namespaces()]
namespace = [x[1] for x in self.prefixes.namespaces()]
frame = pd.DataFrame(namespace, columns=["namespace"], index=prefix)
frame.columns.name = "prefix"
return frame.sort_index()
def is_ok_qname(self, url):
"""
Many :class:`URIRef`\s can be resolved to a namespace and written as a short name (QName), except when special
characters such as parenthesis and colon are in the localpart of the domain. In that case, the :class:`URIRef`
should be rendered in RDF as an absolute URI (ex. ``<http://example.com/>``).
:param url: a URIRef or a str for a URL
:return: true if the URIRef can be safely resolved to a namespace in short form.
"""
x = str(url)
pos = max(x.rfind('#'), x.rfind('/')) + 1
prefix = x[:pos]
suffix = x[pos:]
if not _valid_tail_regex.fullmatch(suffix):
return None
return prefix in self._namespaces
def ns_part(self, url):
"""
Given a URI like
``http://purl.org/ontology/bibo/AcademicArticle``
return the namespace part of the URI, which would be
``http://purl.org/ontology/bibo/``
This is based on the syntax of the URI, not the declared prefixes associated
with this URI.
:param url: URIRef or string URL
:return: namespace part of URL as string
"""
x = str(url)
return x[:max(x.rfind('#'), x.rfind('/')) + 1]
def local_part(self, url):
"""
Given a URI like
``http://purl.org/ontology/bibo/AcademicArticle``
return the localname part of the URI, which would be
``AcademicArticle``
This is based on the syntax of the URI, not the declared prefixes associated
with this URI.
:param url: URIRef or string URL
:return: localname part of URL as string
"""
x = str(url)
return x[max(x.rfind('#'), x.rfind('/')) + 1:]
def to_python(self, term):
"""
Convert a simple rdflib `term` into a idiomatic Python object.
A simple rdflib term is entirely self-contained; either a literal value or a signifier (a Resource) floating
in space, without consideration of other facts in the graph.
For RDF literals and blank nodes, behavior is exactly the same as the `toPython` method, which this encloses.
URIReferences are wrapped in `GastrodonURI` objects which look like short names (QNames) inside Jupyter and Pandas,
but remember the full URI if they are later used with Gastrodon.
This is a method of an `Endpoint` as opposed to a static method or method of a `term` wrapper because the
exact resolution to a QName is relative to the namespaces defined for that `Endpoint`.
:param term: an RDFLib Node object
:return: a Plain Ordinary Python Object except for URI References which are returned as GastrodonURI
"""
if term==None:
return None
if isinstance(term, URIRef):
if self.prefixes !=None and ("/" in term.toPython() or str(term).startswith('urn:')):
if self.base_uri and str(term).startswith(self.base_uri):
return GastrodonURI("<" + term[len(self.base_uri):] + ">", term)
if self.is_ok_qname(term):
try:
return GastrodonURI(self.short_name(term), term)
except Exception:
pass
return term
return term.toPython()
def short_name(self,term):
"""
Assuming we've made the following namespace declaration on this endpoint,
``@prefix bibo: <http://purl.org/ontology/bibo/>``
and given a URI like
``http://purl.org/ontology/bibo/AcademicArticle``
this returns
``bibo:AcademicArticle``
which can be used as a QName relative to the Endpoint.
:param term: URIRef which can be expressed with a QName
:return: the QName, as a string
"""
prefix, namespace, name = self.prefixes.compute_qname(term)
return ":".join((prefix, name))
def _process_namespaces(self, sparql, parseFn):
if self.prefixes != None:
sparql = self._prepend_namespaces(sparql, parseFn)
return sparql
def _candidate_prefixes(self, sparql:str):
return {x.group(1) for x in self.qname_regex.finditer(sparql)}
def _prepend_namespaces(self, sparql:str, parseFn):
# extract prefixes and base uri from the query so that we won't try to
# overwrite them
parsed = parseFn(sparql)
(query_base,query_ns)=_extract_decl(parsed,parseFn)
candidates=self._candidate_prefixes(sparql)
for (q_prefix,q_uri) in query_ns.namespaces():
if q_prefix in candidates:
candidates.remove(q_prefix)
ns_section = ""
if self.base_uri and not query_base:
ns_section += "base <%s>\n" % (self.base_uri)
for name,value in self.prefixes.namespaces():
if name in candidates:
ns_section += "prefix %s: %s\n" % (name,value.n3())
return ns_section+sparql
def _substitute_arguments(self, sparql:str, args:Dict, prefixes:NamespaceManager) -> str:
def substitute_one(m:Match):
name=m.group(1)
if name not in args:
return m.group()
return self._to_rdf(args[name], prefixes).n3()
sparql = _var_regex.sub(substitute_one,sparql)
return sparql
def _to_rdf(self, value, prefixes):
if not isinstance(value, Identifier):
if isinstance(value, QName):
value = value.toURIRef(prefixes)
elif isinstance(value, GastrodonURI):
value = value.to_uri_ref()
else:
value = _toRDF(value)
# virtuoso-specific hack for bnodes
if isinstance(value, BNode):
value = self._bnode_to_sparql(value)
return value
def _bnode_to_sparql(self, bnode):
return bnode
def _normalize_column_type(self,column):
if not all(filter(lambda x:x==None or type(x)==str,column)):
return column
try:
return [None if x==None else int(x) for x in column]
except ValueError:
pass
try:
return [None if x==None else float(x) for x in column]
except ValueError:
pass
return column
def _dataframe(self, result:SPARQLResult)->pd.DataFrame:
columnNames = [str(x) for x in result.vars]
column = OrderedDict()
for name in columnNames:
column[name] = []
for bindings in result.bindings:
for variable in result.vars:
column[str(variable)].append(self.to_python(bindings.get(variable)))
for key in column:
column[key] = self._normalize_column_type(column[key])
return pd.DataFrame(column)
def decollect(self,node):
'''
:param node: a URIRef pointing to an rdf:Seq, rdf:Bag, or rdf:Alt
:return: a Python List, Counter, or Set of POPOs
'''
survey=self.select_raw("""
SELECT ?type {
VALUES (?type) { (rdf:Seq) (rdf:Bag) (rdf:Alt)}
?s a ?type
}
""",bindings=dict(s=node))
types=self._set(survey)
if RDF.Bag in types:
return self._decollect_Bag(node)
return self._decollect_Seq(node)
def _decollect_Seq(self, node):
items=self.select_raw("""
SELECT ?index ?item {
?s ?predicate ?item
FILTER(STRSTARTS(STR(?predicate),"http://www.w3.org/1999/02/22-rdf-syntax-ns#_"))
BIND(xsd:integer(SUBSTR(STR(?predicate),45)) AS ?index)
} ORDER BY ?index
""",bindings=dict(s=node))
output=[]
for x in items:
output.append(self.to_python(x["item"]))
return output
def _decollect_Bag(self, node):
items=self.select_raw("""
SELECT ?item (COUNT(*) AS ?count) {
?s ?predicate ?item
FILTER(STRSTARTS(STR(?predicate),"http://www.w3.org/1999/02/22-rdf-syntax-ns#_"))
BIND(xsd:integer(SUBSTR(STR(?predicate),45)) AS ?index)
} GROUP BY ?item
""",bindings=dict(s=node))
output=Counter()
for x in items:
output[self.to_python(x["item"])]=self.to_python(x["count"])
return output
def _decollect_Seq(self, node):
items=self.select_raw("""
SELECT ?index ?item {
?s ?predicate ?item
FILTER(STRSTARTS(STR(?predicate),"http://www.w3.org/1999/02/22-rdf-syntax-ns#_"))
BIND(xsd:integer(SUBSTR(STR(?predicate),45)) AS ?index)
} ORDER BY ?index
""",bindings=dict(s=node))
output=[]
for x in items:
output.append(self.to_python(x["item"]))
return output
def _set(self,result):
columnNames=result.vars
if len(columnNames)>1:
raise ValueError("Currently can only create a set from a single column result")
that=columnNames[0]
output=set()
for bindings in result.bindings:
output.add(bindings[that])
return output
@abstractmethod
def _select(self, sparql,**kwargs) -> SPARQLResult:
pass
@abstractmethod
def _construct(self, sparql,**kwargs) -> Graph:
pass
@abstractmethod
def _update(self, sparql,**kwargs) -> None:
pass
def select(self,sparql:str,**kwargs) -> pd.DataFrame:
"""
Perform a SPARQL SELECT query against the endpoint. To make interactive
queries easy in the Jupyter environment, any variable with a name beginning with
an underscore (eg. ?_var or @_var)
in the SPARQL query will be replaced with an RDF serialized version of the
variable (eg. var) in the python stack frame of the caller.
If you call this in a Jupyter notebook, it sees all variables that are accessible
from the cells in the notebook. If you call it inside a function definition, it
sees variables local to that definition.
:param sparql: SPARQL SELECT query
:param kwargs: any keyword arguments are implementation-dependent
:return: SELECT result as a Pandas DataFrame
"""
result = self.select_raw(sparql,_user_frame=3,**kwargs)
frame=self._dataframe(result)
columnNames = {str(x) for x in result.vars}
parsed=_parseQuery(sparql)
group_variables=_extract_group_by(parsed)
if group_variables and all([x in columnNames for x in group_variables]):
frame.set_index(group_variables,inplace=True)
return frame
def select_raw(self,sparql:str,_user_frame=2,**kwargs) -> SPARQLResult:
"""
Perform a SPARQL SELECT query as would the select method, but do not
convert result to a DataFrame, instead return the original
SPARQLResult
:param sparql: SPARQL SELECT query
:param kwargs: any keyword arguments are implementation-dependent
:return: result as a SPARQLResult
"""
return self._exec_raw(sparql,self._select,_user_frame,**kwargs)
def construct(self,sparql:str,_user_frame=2,**kwargs):
"""
Perform a SPARQL CONSTRUCT query, making the same substitutions as
the select method. Returns a Graph
:param sparql: SPARQL SELECT query
:param kwargs: any keyword arguments are implementation-dependent
:return: result as a Graph
"""
return self._exec_raw(sparql,self._construct,_user_frame,**kwargs)
def _exec_raw(self,sparql:str,operation,_user_frame=1,**kwargs):
try:
sparql = self._process_namespaces(sparql, _parseQuery)
except ParseException as x:
lines= self._error_header()
lines += [
"Failure parsing SPARQL query supplied by caller; this is either a user error",
"or an error in a function that generated this query. Query text follows:",
""
]
error_lines = self._mark_query(sparql, x)
lines += error_lines
GastrodonException.throw("Error parsing SPARQL query",lines=lines,inner_exception=x)
if "bindings" in kwargs:
bindings = kwargs["bindings"]
else:
bindings = self._filter_frame(_getframe(_user_frame))
sparql = self._substitute_arguments(sparql, bindings, self.prefixes)
try:
if "_inject_post_substitute_fault" in kwargs:
sparql=kwargs["_inject_post_substitute_fault"]
result = operation(sparql, **kwargs)
except ParseException as x:
lines= self._error_header()
lines += [
"Failure parsing SPARQL query after argument substitution. This is almost certainly an error inside",
"Gastrodon. Substituted query text follows:",
""
]
error_lines = self._mark_query(sparql, x)
lines += error_lines
GastrodonException.throw("Error parsing substituted SPARQL query",lines=lines,inner_exception=x)
except HTTPError as x:
lines= self._error_header()
url_parts = urlparse(x.geturl())
lines += [
"HTTP Error doing Remote SPARQL query to endpoint at",
self.url,
""
]
lines.append(str(x))
GastrodonException.throw("HTTP Error doing Remote SPARQL query",lines=lines,inner_exception=x)
pass
return result
def _mark_query(self, sparql, x):
error_lines = sparql.split("\n")
error_lines.insert(x.lineno, " " * (x.col - 1) + "^")
error_lines.append("Error at line %d and column %d" % (x.lineno, x.col))
return error_lines
def _error_header(self):
return [
"*** ERROR ***",
"",
]
def update(self,sparql:str,_user_frame=1,**kwargs) -> None:
"""
Performs a SPARQL update statement with the same substitutions as the select method
:param sparql: SPARQL update query, as a str
:param _user_frame: Number of stack frames to look back to find variables; defaults to 1, which gets variables from the caller, 2 gets them from the caller of the caller and so forth
:param kwargs: dependent on implementation
:return: nothing
"""
try:
sparql = self._process_namespaces(sparql, _parseUpdate)
except ParseException as x:
lines = self._error_header()
lines += [
"Failure parsing SPARQL update statement supplied by caller; this is either a user error or ",
"an error in a function that generated this query. Query text follows:",
""
]
error_lines = self._mark_query(sparql, x)
lines += error_lines
GastrodonException.throw("Error parsing SPARQL query", lines=lines, inner_exception=x)
if "bindings" in kwargs:
bindings = kwargs["bindings"]
else:
bindings=self._filter_frame(_getframe(_user_frame))
sparql = self._substitute_arguments(sparql, bindings, self.prefixes)
return self._update(sparql,**kwargs)
def _filter_frame(self,that:FrameType):
return {
"_"+k:v for (k,v)
in that.f_locals.items()
if type(v) not in _cannot_substitute
and not k.startswith("_")
}
class RemoteEndpoint(Endpoint):
"""
Represents a SPARQL endpoint available under the SPARQL Protocol.
:param url: String URL for the SPARQL endpoint
:param prefixes: Graph containing prefix declarations for this endpoint
:param http_auth: http authentication method (eg. "BASIC", "DIGEST")
:param default_graph: str URL for default graph
:param base_uri: str for base URI for purposes of name resolution
"""
def __init__(self,url:str,prefixes:Graph=None,user=None,passwd=<PASSWORD>,http_auth=None,default_graph=None,base_uri=None):
super().__init__(prefixes,base_uri)
self.url=url
self.user=user
self.passwd=<PASSWORD>
self.http_auth=http_auth
self.default_graph=default_graph
def _jsonToNode(self, jsdata):
type = jsdata["type"]
value = jsdata["value"]
if type == "uri":
return URIRef(value)
if type == "typed-literal":
return Literal(value, datatype=jsdata["datatype"])
if type == "literal":
return Literal(value)
if type == "bnode":
return BNode(value)
return None
def _jsonToPython(self, jsdata):
return self.to_python(self._jsonToNode(jsdata))
def _bnode_to_sparql(self, bnode):
return URIRef(bnode.to_python())
def _update(self, sparql,**kwargs):
that = self._wrapper()
that.setQuery(sparql)
that.setReturnFormat(JSON)
that.setMethod("POST")
result = that.queryAndConvert()
return
def _wrapper(self):
sparql_wrapper = SPARQLWrapper(self.url)
sparql_wrapper.user=self.user
sparql_wrapper.passwd=<PASSWORD>
if self.default_graph:
sparql_wrapper.addDefaultGraph(self.default_graph)
if self.http_auth:
sparql_wrapper.setHTTPAuth(self.http_auth)
return sparql_wrapper
def peel(self,node):
"""
Copies part of a graph starting at node, copying all facts linked at that
node and continuing this transversal for each blank node that we find.
:param node: URIRef starting point
:return: Graph object containing copied graph
"""
output = self._peel(node)
nodes=all_uri(output)
used_ns = {URIRef(self.ns_part(x)) for x in nodes if x.startswith('http')}
ns_decl = [ns for ns in self.prefixes.namespaces() if ns[1] in used_ns]
for x in ns_decl:
output.namespace_manager.bind(*x)
return output
def _peel(self, node):
output = Graph()
query = """
SELECT (?that as ?s) ?p ?o {
?that ?p ?o .
}
"""
items = self._select(query, bindings={"that": node})
bnodes = set()
q = deque()
urins = set()
while True:
for x in items["results"]["bindings"]:
s = self._jsonToNode(x["s"])
p = self._jsonToNode(x["p"])
o = self._jsonToNode(x["o"])
if isinstance(s, URIRef):
urins.add(self.ns_part(s))
if isinstance(p, URIRef):
urins.add(self.ns_part(p))
if isinstance(p, URIRef):
urins.add(self.ns_part(o))
output.add((s, p, o))
if isinstance(o, BNode) and o not in bnodes:
bnodes.add(o)
q.append(o)
if not q:
return output
# note that the detailed behavior of blank nodes tends to be different in different triple stores,
# in particular, although almost all triple stores have some way to refer to a blank node inside the
# triple store, there is no standard way to do this.
#
# This works with Virtuoso but I tried a number of things that don't work (such as putting a list of
# nodeId's in the form <nodeID://b506362> in an IN clause in a FILTER statement) or things that work but
# are too slow (filtering on STR(?s))
items = self._select(query,bindings={"that":q.popleft()})
def _select(self, sparql:str,**kwargs) -> SPARQLResult:
that = self._wrapper()
that.setQuery(sparql)
that.setReturnFormat(JSON)
json_result=that.queryAndConvert()
res={}
res["type_"] = "SELECT"
res["vars_"] = [Variable(v) for v in json_result["head"]["vars"]]
column = OrderedDict()
bindings=[]
for json_row in json_result["results"]["bindings"]:
rdf_row={}
for variable in res["vars_"]:
if str(variable) in json_row:
rdf_row[variable]=self._jsonToNode(json_row[str(variable)])
else:
rdf_row[variable]=None
bindings.append(rdf_row)
res["bindings"]=bindings
return SPARQLResult(res)
def _construct(self, sparql:str,**kwargs) -> Graph:
result=self._select(sparql,**kwargs)
S=Variable("s")
P=Variable("p")
O=Variable("o")
neo=Graph()
for fact in result.bindings:
neo.add((fact[S],fact[P],fact[O]))
return neo
class LocalEndpoint(Endpoint):
'''
LocalEndpoint for doing queries against a local RDFLib graph.
:param graph: Graph object that will be encapsulated
:param prefixes: Graph defining prefixes for this Endpoint, will be the same as the input graph by default
:param base_uri: base_uri for resolving URLs
'''
def __init__(self,graph:Graph,prefixes:Graph=None):
"""
"""
if not prefixes:
prefixes=graph
super().__init__(prefixes)
self.graph=graph
def _select(self, sparql:str,**kwargs) -> SPARQLResult:
return self.graph.query(sparql)
def _construct(self, sparql:str,**kwargs) -> Graph:
return self.graph.query(sparql)
def _update(self, sparql:str,**kwargs) ->None :
self.graph.update(sparql)
return
def _toRDF(x):
lex,datatype=_castPythonToLiteral(x)
return Literal(lex,datatype=datatype)
def ttl(g:Store):
'''
Write out Graph (or other Store) in Turtle format to stdout.
:param g: input Graph
:return: nothing
'''
s = TurtleSerializer(g)
s.serialize(stdout,spacious=True)
def all_uri(g:Graph):
'''
Returns the set of all URIRef objects inside Graph g appearing in the
subject. predicate, or object position in any triple.
:param g: input Graph
:return: a set of URIRef objects for all URIRefs that appear in this graph
'''
uris = set()
for fact in g.triples((None, None, None)):
for node in fact:
if isinstance(node, URIRef):
uris.add(node)
return uris
def show_image(filename):
'''
Embed a PNG image with a filename relative to the current working directory into an
IPython/Jupyter notebook.
:param filename:
:return: nothing
'''
with open(filename, "rb") as f:
image = f.read()
display_png(image, raw=True)
def inline(turtle):
'''
Convert turtle-format RDF into a Graph
:param turtle: str in Turtle Format
:return: Graph with corresponding triples
'''
g=Graph()
g.parse(data=turtle,format="ttl")
return LocalEndpoint(g)
def one(items):
'''
In cases where a composite object has only a single element, returns the element.
Element types currently supported are:
DataFrame
returns single element of a single row
List (or object castable to list)
returns only element of list
:param items: a composite object
:return: only member of composite object, otherwise throw exception
'''
if isinstance(items,pd.DataFrame):
if items.shape!=(1,1):
raise ValueError("one(x) requires that DataFrame x have exactly one row and one column")
return items.iloc[0,0]
l=list(items)
if len(l)>1:
raise ValueError("Result has more than one member")
if len(l)==0:
raise IndexError("Cannot get first member from empty container")
return l[0]
def member(index):
'''
Return URIRef that (as ?predicate) states that ?object is the index(th) member of
?subject.
:param index: non-negative integer index:
:return: URIRef rdf:_N for N=index+1
'''
return URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#_{:d}".format(index+1))
def _extract_decl(parsed: ParseResults,parseFn):
ns=Graph()
base_iri=None
for decl in parsed[0] if parseFn==_parseQuery else parsed["prologue"][0]:
if 'prefix' in decl:
ns.bind(decl["prefix"],decl["iri"],override=True)
elif 'iri' in decl:
base_iri=decl["iri"]
return (base_iri,ns)
@lru_cache()
def _parseUpdate(sparql):
return parseUpdate(sparql)
@lru_cache()
def _parseQuery(sparql):
return parseQuery(sparql)
def _extract_group_by(parsed):
main_part=parsed[1]
if 'groupby' not in main_part:
return []
if not all([type(x)==Variable for x in main_part['groupby']['condition']]):
return []
return [str(x) for x in main_part['groupby']['condition']]
|
<gh_stars>10-100
import time
from typing import AsyncGenerator, Dict
import aioredis # type: ignore
import pytest
from fast_tools.base.redis_helper import Lock, LockError, RedisHelper, errors
pytestmark = pytest.mark.asyncio
@pytest.fixture()
async def redis_helper() -> AsyncGenerator[RedisHelper, None]:
redis_helper: RedisHelper = RedisHelper()
try:
redis_helper.init(
await aioredis.create_pool("redis://localhost", minsize=1, maxsize=10, encoding="utf-8"),
)
yield redis_helper
except Exception:
await redis_helper.close()
class TestRedisHelper:
def test_not_call_init(self) -> None:
with pytest.raises(ConnectionError) as e:
RedisHelper().client
assert e.value.args[0] == "Not init RedisHelper, please run RedisHelper.init"
async def test_init(self) -> None:
redis_helper: RedisHelper = RedisHelper()
# init error pool
with pytest.raises(ConnectionError) as e:
redis_helper.init(None) # type: ignore
assert e.value.args[0] == "conn_pool is none"
# init success
redis_helper.init(
await aioredis.create_pool("redis://localhost", minsize=1, maxsize=10, encoding="utf-8"), namespace="test"
)
assert redis_helper.namespace == "test"
# repeat call
with pytest.raises(ConnectionError) as e:
redis_helper.init(
await aioredis.create_pool("redis://localhost", minsize=1, maxsize=10, encoding="utf-8"),
)
assert e.value.args[0].startswith("Init error, RedisHelper already init")
# close redis pool
await redis_helper.close()
assert redis_helper.closed()
# reinitialize
redis_helper.init(
await aioredis.create_pool("redis://localhost", minsize=1, maxsize=10, encoding="utf-8"),
)
assert not redis_helper.closed()
async def test_redis_dict(self, redis_helper: RedisHelper) -> None:
test_dict: Dict[str, int] = {"a": 1, "b": 2, "c": 3}
await redis_helper.set_dict("test", test_dict, 10)
assert await redis_helper.get_dict("test") == test_dict
assert await redis_helper.execute("ttl", "test") <= 10
assert await redis_helper.del_key("test")
async def test_del_key(self, redis_helper: RedisHelper) -> None:
await redis_helper.execute("set", "test_key", "value")
await redis_helper.del_key("test_key")
assert await redis_helper.execute("get", "test_key") is None
await redis_helper.execute("set", "test_key", "value")
await redis_helper.del_key("test_key", 10)
assert await redis_helper.execute("ttl", "test") <= 10
assert await redis_helper.del_key("test_key")
async def test_pipeline(self, redis_helper: RedisHelper) -> None:
await redis_helper.pipeline([("set", "test_key", "value"), ("expire", "test_key", 10)])
assert await redis_helper.execute("ttl", "test_key") <= 10
await redis_helper.pipeline([("set", "test_key", "value"), ("del", "test_key")])
assert not await redis_helper.execute("get", "test_key")
with pytest.raises(errors.PipelineError) as e:
await redis_helper.pipeline([("set", "test_key")])
assert e.value.args[0].startswith("PipelineError errors:")
async def test_hash(self, redis_helper: RedisHelper) -> None:
assert not await redis_helper.hget_dict("test", "key1")
test_dict: Dict[str, int] = {str(i): i for i in range(1000)}
await redis_helper.hmset_dict("test", test_dict)
assert await redis_helper.hget_dict("test", "1") == 1
assert await redis_helper.hmget_dict("test") == test_dict
assert await redis_helper.execute("del", "test")
assert not await redis_helper.hmget_dict("test")
async def test_execute(self) -> None:
redis_helper: RedisHelper = RedisHelper()
with pytest.raises(ConnectionError) as e:
await redis_helper.execute("set", "test", "value")
assert e.value.args[0] == "Not init RedisHelper, please run RedisHelper.init"
redis_helper.init(
await aioredis.create_pool("redis://localhost", minsize=1, maxsize=10, encoding="utf-8"),
)
await redis_helper.execute("set", "test", "value")
with pytest.raises(errors.RedisError):
await redis_helper.hmget_dict("test")
await redis_helper.execute("del", "test")
await redis_helper.close()
async def test_lock(self, redis_helper: RedisHelper) -> None:
lock_1: Lock = redis_helper.lock("test_key")
lock_2: Lock = redis_helper.lock("test_key")
await lock_1.acquire()
assert not await lock_2.do_acquire(str(int(time.time()) + 10))
assert await lock_1.locked()
assert await lock_2.locked()
with pytest.raises(LockError) as e:
await lock_2.release()
assert e.value.args[0] == "Cannot release an unlocked lock"
with pytest.raises(LockError) as e:
await lock_2.do_release(int(time.time()))
assert e.value.args[0] == "Cannot release a lock that's no longer owned"
assert not await lock_2.acquire(1)
await lock_1.release()
assert not await lock_1.locked()
|
import angr
import claripy
import logging
import simuvex
import random
import capstone
import signal
import os
from random import shuffle
from utils import *
angr.loggers.disable_root_logger()
log = logging.getLogger("CoreTaint")
log.setLevel("DEBUG")
GLOB_TAINT_DEP_KEY = 'taint_deps'
UNTAINT_DATA = 'untainted_data'
UNTAINTED_VARS = 'untainted_vars'
SEEN_MASTERS = 'seen_masters'
class MyFileHandler(object):
def __init__(self, filename, handlerFactory, **kw):
kw['filename'] = filename
self._handler = handlerFactory(**kw)
def __getattr__(self, n):
if hasattr(self._handler, n):
return getattr(self._handler, n)
raise AttributeError, n
class TimeOutException(Exception):
def __init__(self, message):
super(TimeOutException, self).__init__(message)
class CoreTaint:
"""
Perform a symbolic-execution-based taint analysis on a given binary to find whether
it exists a tainted path between a source and a sink.
"""
def __init__(self, p, interfunction_level=0, log_path='/tmp/coretaint.out',
smart_call=True, follow_unsat=False, try_thumb=False, white_calls=[], black_calls=[], not_follow_any_calls=False,
default_log=True, exit_on_decode_error=True, concretization_strategy=None, force_paths=False,
taint_returns_unfollowed_calls=False, taint_arguments_unfollowed_calls=False, allow_untaint=True,
only_follow_near_calls=False, logger_obj=None):
"""
Initialialization function
:param p: angr project
:param interfunction_level: interfunction level
:param log_path: path where the analysis' log is created
:param smart_call: if True a call is followed only if at least one of its parameters is tainted
:param follow_unsat: if true unsat successors are also considered during path exploration. In this case
the collected constraints up to that point will be dropped.
:param try_thumb: try to force thumb mode if some decoding error occurred
:param white_calls: calls to follow in any case
:param default_log: log info by default
:param exit_on_decode_error: terminate the analysis in case of error
:param concretization_strategy: concretization strategy callback
:param force_paths: force a path to be followed even when some decode errors were found
:param allow_untaint: allow to untaint variables.
"""
global log
self._count_var = 0
self._back_jumps = {}
self._N = 1
self._keep_run = True
self._timeout_triggered = False
self._timer = 0
self._force_exit_after = -1
self._p = p
self._taint_buf = "taint_buf"
self._taint_applied = False
self._taint_buf_size = 4096 # 1 page
self._bogus_return = 0x41414141
self._fully_taint_guard = []
self._white_calls = white_calls
self._black_calls = black_calls
self._taint_returns_unfollowed_calls = taint_returns_unfollowed_calls
self._taint_arguments_unfollowed_calls = taint_arguments_unfollowed_calls
self._allow_untaint = allow_untaint
self._not_follow_any_calls = not_follow_any_calls
self._only_follow_near_calls = only_follow_near_calls
self._deref_taint_address = False
self._deref_instruction = None
self._deref_addr_expr = None
self._deref = (None, None)
self._old_deref = self._deref
self._old_deref_taint_address = self._deref_taint_address
self._old_deref_addr_expr = self._deref_addr_expr
self._interfunction_level = interfunction_level
self._smart_call = smart_call
self._follow_unsat = follow_unsat
self._concretizations = {}
self._summarized_f = {}
self._fp = open(log_path, 'w')
self._interesing_path = {'sink': [], 'deref': [], 'loop': []}
self._try_thumb = try_thumb
self._force_paths = force_paths
self._default_log = default_log
self._exit_on_decode_error = exit_on_decode_error
self._concretization_strategy = self._default_concretization_strategy if concretization_strategy is None else \
concretization_strategy
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fileh = MyFileHandler(log_path + '._log', logging.FileHandler)
fileh.setFormatter(formatter)
log.addHandler(fileh)
def handler(self, signum, frame):
"""
Timeout handler
:param signum: signal number
:param frame: frame
:return:
"""
log.info("Timeout triggered, %s left...." % str(self._force_exit_after))
self._keep_run = False
self._timeout_triggered = True
# time to stop this non-sense!
self._force_exit_after -= 1
self.set_alarm(self._timer, self._force_exit_after)
if self._force_exit_after <= 0:
raise TimeOutException("Hard timeout triggered")
def _get_bb(self, addr):
try:
bl = self._p.factory.block(addr)
except:
bl = None
if bl is None or bl.vex.jumpkind == 'Ijk_NoDecode':
try:
bl = self._p.factory.block(addr, thumb=True)
except:
bl = None
return bl
def _save_taint_flag(self):
"""
Save the tainting related flags
:return:
"""
self._old_deref = self._deref
self._old_deref_taint_address = self._deref_taint_address
self._old_deref_addr_expr = self._deref_addr_expr
def _restore_taint_flags(self):
"""
Restiore the tainting related flags
:return:
"""
self._deref = self._old_deref
self._deref_taint_address = self._old_deref_taint_address
self._deref_addr_expr = self._old_deref_addr_expr
@property
def bogus_return(self):
return self._bogus_return
@property
def taint_buf(self):
return self._taint_buf
@property
def taint_buf_size(self):
return self._taint_buf_size
@property
def taint_applied(self):
return self._taint_applied
@property
def p(self):
return self._p
def safe_load(self, path, addr, size=None, unconstrained=False):
"""
Loads bytes from memory, saving and restoring taint info
:param path: path
:param addr: address
:return: the content in memory at address addr
"""
self._save_taint_flag()
if not size:
size = self._p.arch.bits / 8
state = path.active[0] if not unconstrained else path.unconstrained[0]
mem_cnt = state.memory.load(addr, size)
self._restore_taint_flags()
return mem_cnt
def safe_store(self, path, addr, thing):
"""
Stores bytes in memory, saving and restoring taint info
:param path: path
:param addr: address
:param thing: thing to store
:return:
"""
self._save_taint_flag()
path.active[0].memory.store(addr, thing)
self._restore_taint_flags()
def get_sym_val(self, **args):
return self._get_sym_val(**args)
def _set_deref_bounds(self, ast_node):
"""
Check an ast node and if contains a dereferenced address, it sets
its bounds
:param ast_node: ast node
:return: None
"""
lb = self._deref[0]
ub = self._deref[1]
if hasattr(ast_node, 'op') and ast_node.op == 'Extract' \
and self.is_tainted(ast_node.args[2]):
m = min(ast_node.args[0], ast_node.args[1])
lb = m if lb is None or m < lb else lb
m = max(ast_node.args[0], ast_node.args[1])
ub = m if ub is None or m > ub else ub
self._deref = (lb, ub)
elif hasattr(ast_node, 'args'):
for a in ast_node.args:
self._set_deref_bounds(a)
elif self.is_tainted(ast_node):
self._deref = (0, 0)
def addr_concrete_after(self, state):
"""
Hook for address concretization
:param state: Program state
"""
addr_expr = state.inspect.address_concretization_expr
state.inspect.address_concretization_result = [self._get_target_concretization(addr_expr, state)]
# a tainted buffer's location is used as address
if self.is_tainted(addr_expr, state=state):
self._set_deref_bounds(addr_expr)
self._deref_taint_address = True
self._deref_addr_expr = addr_expr
self._deref_instruction = state.ip.args[0]
if state.inspect.address_concretization_action == 'load':
# new fresh var
name = "cnt_pt_by(" + self._taint_buf + '[' + str(self._deref[0]) + ', ' + str(self._deref[1]) + ']' + ")"
bits = state.inspect.mem_read_length
if type(bits) not in (long, int) and hasattr(bits, 'symbolic'):
bits = state.se.max_int(bits)
var = self._get_sym_val(name=name, bits=bits)
state.memory.store(state.inspect.address_concretization_result[0], var)
def _check_taint(self, state, reg, history):
"""
Check whther a path is completely tainted
:param state: current state
:param reg: Register used to pass the argument to the sink call
:return: True if reg has is still tainted before the sink's call, False otherwise
"""
self._bounds = [None, None]
def _find_extract_bounds(ast_node):
if ast_node.op == 'Extract':
a, b = ast_node.args[0], ast_node.args[1]
if a < b:
return a, b
return b, a
for a in ast_node.args:
if hasattr(a, 'args'):
a, b = _find_extract_bounds(a)
if self._bounds[0] is None or (a is not None and a <= self._bounds[0]):
self._bounds[0] = a
if self._bounds[1] is None or (b is not None and b >= self._bounds[1]):
self._bounds[1] = b
return self._bounds[0], self._bounds[1]
def _find_name(ast_node):
if type(ast_node) == claripy.ast.BV and \
ast_node.op == 'BVS':
return ast_node.args[0]
elif hasattr(ast_node, 'args'):
for a in ast_node.args:
name = _find_name(a)
if name:
return name
return None
def _check_guards(tainted_var, history):
self._bounds = [None, None]
lb, ub = _find_extract_bounds(tainted_var)
tainted_buff_name = _find_name(tainted_var)
for a, g in history:
if self.is_tainted(g):
# scan the path's guards and collect those relative to
# the tainted portion of memory
t_op = g.args[0] if self.is_tainted(g.args[0]) else g.args[1]
sec_op = g.args[1] if self.is_tainted(g.args[0]) else g.args[0]
if not self.is_tainted(sec_op):
name_op = _find_name(t_op)
if name_op != tainted_buff_name:
# we consider only the conditions relative
# to the tainted variable which reached the sink
continue
# the condition untaints part of the tainted buffer
# get the portion of untainted buffer
self._bounds = [None, None]
lb_op, ub_op = _find_extract_bounds(t_op)
if lb_op is None:
log.error("The whole buffer seem to be untainted, check me!")
return False
if lb >= lb_op:
lb = lb_op
if ub <= ub_op:
ub = ub_op
if lb >= ub:
return False
else:
# both operands involved in the guard are tainted
self._fully_taint_guard.append((a, g))
return True
self._fully_taint_guard = []
if hasattr(state.regs, reg):
ast = getattr(state.regs, reg)
if self.is_tainted(ast):
# TODO: check also below part?
if _check_guards(ast, history):
return True
# save taint flags, the following load may change them
self._save_taint_flag()
# the function will dereference the argument
# resulting in a read from our tainting location
tmp_s = state.copy()
try:
cnt = tmp_s.memory.load(ast, 1)
except TimeOutException as t:
raise t
except Exception:
log.warning("Unable to concretize %s" % hex(ast))
return False
# the load might have set some flags, let's restore them
self._restore_taint_flags()
if self.is_tainted(cnt):
# the variable reaching the sink is tainted
return _check_guards(cnt, history)
return False
raise Exception("Architecture %s has no register %s" % (self._p.arch.name, reg))
def _save_sink_info(self, path, reg, sink_address):
"""
Dump the info about a tainted sink into the log file
:param path: path found to be tainted
:param reg: register pointing to the tainted buffer
:param sink_address: sink address
:return:
"""
if not self._default_log:
return
f = self._fp
reg_cnt = getattr(self.get_state(path).regs, str(reg))
mem_cnt = None
is_addr = False
tmp_s = self.get_state(path).copy()
if not self.is_tainted(reg_cnt, path=path):
is_addr = True
self._save_taint_flag()
mem_cnt = tmp_s.memory.load(reg_cnt)
self._restore_taint_flags()
key_path = (str(mem_cnt), str(reg_cnt), str(reg))
if key_path in self._interesing_path['sink']:
return
self._interesing_path['sink'].append(key_path)
f.write("===================== Start Info path =====================\n")
f.write("Sink address: %s\n" % hex(sink_address))
if is_addr:
f.write("\nReason: sink accepts %s which points to the location of memory %s.\n" % (str(reg), reg_cnt))
f.write("\nContent of %s: %s\n" % (str(reg_cnt), str(mem_cnt)))
else:
f.write("\nReason: sink accepts parameter %s which is tainted.\n" % (str(reg)))
f.write("\nContent of %s: %s\n" % (str(reg), str(reg_cnt)))
f.write("\n\nPath \n----------------\n")
path = ' -> '.join([hex(a) for a in self.get_state(path).history.bbl_addrs])
f.write(path + '\n\n')
f.write("Fully tainted conditions \n----------------\n")
if not self._fully_taint_guard:
f.write('None\n')
else:
for fc in self._fully_taint_guard:
f.write(fc[0] + ': ')
f.write(str(fc[1]) + '\n\n')
f.write("===================== End Info path =====================\n\n\n")
def _save_deref_info(self, path, addr_expr):
"""
Dump the dereference of tainted address information into the log file
:param path: path found to be tainted
:return:
"""
if not self._default_log:
return
f = self._fp
code_addr = self.get_addr(path)
key_path = (str(code_addr), str(addr_expr))
if key_path in self._interesing_path['deref']:
return
self._interesing_path['deref'].append(key_path)
f.write("===================== Start Info path =====================\n")
f.write("Dereference address at: %s\n" % hex(code_addr))
f.write("\nReason: at location %s a tainted variable is dereferenced and used as address.\n" % hex(code_addr))
f.write("\nContent of the tainted variable: %s\n" % str(addr_expr))
f.write("\n\nTainted Path \n----------------\n")
path = ' -> '.join([hex(a) for a in self.get_state(path).history.bbl_addrs])
f.write(path + '\n\n')
f.write("===================== End Info path =====================\n\n\n")
def _save_loop_info(self, path, addr, cond):
"""
Dump the info about a tainted variable guarding a loop
:param path: path found to be tainted
:return:
"""
if not self._default_log:
return
f = self._fp
key_path = (str(addr), str(cond))
if key_path in self._interesing_path['loop']:
return
self._interesing_path['loop'].append(key_path)
f.write("===================== Start Info path =====================\n")
f.write("Dangerous loop condition at address %s\n" % hex(addr))
f.write("\nReason: a tainted variable is used in the guard of a loop condition\n")
f.write("\nCondition: %s\n" % cond)
f.write("\n\nTainted Path \n----------------\n")
path = ' -> '.join([hex(a) for a in self.get_state(path).history.bbl_addrs])
f.write(path + '\n\n')
f.write("===================== End Info path =====================\n\n\n")
def _default_concretization_strategy(self, state, cnt):#, extra_constraints=()):
extra_constraints = state.inspect.address_concretization_extra_constraints
if not extra_constraints:
extra_constraints = tuple()
concs = state.se.any_n_int(cnt, 50, extra_constraints=extra_constraints)
return random.choice(concs)
def _get_target_concretization(self, var, state):
"""
Concretization must be done carefully in order to perform
a precise taint analysis. We concretize according the following
strategy:
* every symbolic leaf of an ast node is concretized to unique value, according on its name.
In this way we obtain the following advantages:
a = get_pts();
b = a
c = a + 2
d = b + 1 + 1
d = get_pts()
conc(a) = conc(b)
conc(c) = conc(d)
conc(d) != any other concretizations
:param var: ast node
:param state: current state
:return: concretization value
"""
def get_key_cnt(x):
# angr by default create a unique id for every new symbolic variable.
# as in karonte we often have to copy the state, step and check some
# quantities before step() with the current state, two identical variables might assume
# two different names. Therefore, we should not consider the unique _id_ added to symbolic variables
# created by angr
ret = str(x)
if '_' in str(x) and not self.is_tainted(x):
splits = str(x).split('_')
idx = splits[-2]
if not idx.isdigit():
log.error("get_key_cnt: Symbolic ID parsing failed, using the whole id: %s" % ret)
return ret
ret = '_'.join(splits[:-2]) + '_'
ret += '_'.join(splits[-1:])
return ret
# chek if uncontrained
state_cp = state.copy()
se = state_cp.se
leafs = [l for l in var.recursive_leaf_asts]
if not leafs:
conc = self._concretization_strategy(state_cp, var)
if not se.solution(var, conc):
conc = se.any_int(var)
key_cnt = get_key_cnt(var)
self._concretizations[key_cnt] = conc
return conc
for cnt in leafs:
key_cnt = get_key_cnt(cnt)
# concretize all unconstrained children
if cnt.symbolic:
# first check whether the value is already constrained
if key_cnt in self._concretizations.keys():
conc = self._concretizations[key_cnt]
if state_cp.se.solution(cnt, conc):
state_cp.add_constraints(cnt == conc)
continue
conc = self._concretization_strategy(state_cp, cnt)
self._concretizations[key_cnt] = conc
state_cp.add_constraints(cnt == conc)
val = state_cp.se.any_int(var)
return val
def is_tainted(self, var, path=None, state=None, unconstrained=False):
def is_untaint_constraint_present(v, untaint_var_strs):
for u in untaint_var_strs:
# get argument name
if v.args[0] in u:
# variable is untainted
return True
# no untaint found, var is tainted!
return False
# Nothing is tainted
if self._taint_buf not in str(var):
return False
#
# something is tainted
#
if not self._allow_untaint or (not path and not state):
return True
# get contraints
if path:
state = path.active[0] if not unconstrained else path.unconstrained[0]
untaint_var_strs = state.globals[UNTAINT_DATA][UNTAINTED_VARS]
if not untaint_var_strs:
return True
taint_leafs = list(set([l for l in var.recursive_leaf_asts if self._taint_buf in str(l)]))
taints = set()
for l in taint_leafs:
if l in taints:
continue
# search an untaint constraint for this taint variable
if not is_untaint_constraint_present(l, untaint_var_strs):
return True
taints.add(l)
return False
def add_taint_glob_dep(self, master, slave, path):
"""
Add a taint dependency: if master gets untainted, slave should be untainted
:param master: master expression
:param slave: slave expression
:param path: path
:return:
"""
if not self.is_tainted(master):
return
leafs = list(set([l for l in master.recursive_leaf_asts if self.is_tainted(l)]))
key = tuple(map(str, leafs))
if key not in self.get_state(path).globals[GLOB_TAINT_DEP_KEY]:
self.get_state(path).globals[GLOB_TAINT_DEP_KEY][key] = []
self.get_state(path).globals[GLOB_TAINT_DEP_KEY][key].append(slave)
def _do_recursive_untaint_core(self, dst, path):
# given an expression to untaint, we untaint every single tainted variable in it.
# E.g., given (taint_x + taint_y) to untaint, both variables gets untainted as
# they cannot assume no longer arbitrary values down this path.
if not self._allow_untaint:
return
state = self.get_state(path)
leafs = list(set([l for l in dst.recursive_leaf_asts if self.is_tainted(l)]))
# then we use the collected untainted variables
# and check whether we should untaint some other variables
state.globals[UNTAINT_DATA][UNTAINTED_VARS] += map(str, leafs)
deps = dict(state.globals[GLOB_TAINT_DEP_KEY])
i = 0
while i < len(deps.keys()):
master, salve = deps.items()[i]
# if not already untainted, let's consider it
if master not in state.globals[UNTAINT_DATA][SEEN_MASTERS]:
untainted_vars = set(state.globals[UNTAINT_DATA][UNTAINTED_VARS])
set_master = set(master)
# we can not untaint it
if set_master.intersection(untainted_vars) == set_master:
state.globals[UNTAINT_DATA][SEEN_MASTERS].append(master)
for entry in deps[master]:
self._do_recursive_untaint_core(entry, path)
# restart!
i = 0
continue
i += 1
def do_recursive_untaint(self, dst, path):
return self._do_recursive_untaint_core(dst, path)
def apply_taint(self, current_path, addr, taint_id, bit_size=None):
self._save_taint_flag()
bit_size = bit_size if bit_size else self._taint_buf_size
t = self._get_sym_val(name=self._taint_buf + '_' + taint_id + '_', bits=bit_size).reversed
self.get_state(current_path).memory.store(addr, t)
self._restore_taint_flags()
self._taint_applied = True
def _check_if_sink_or_source(self, current_path, guards_info, _, sinks_info=(), sources_info=()):
"""
Check if a tainted sink is present in the current block of a path
:param current_path: current path
:param guards_info: info about the guards in the current path
:param sinks_info: sinks' information: ((sinks), reg)
:param sources_info: sources' information ((source), reg)
:return: True if the sink is tainted, false otherwise
"""
current_cp = current_path.copy(copy_states=True)
succ = current_cp.step()
# get the successor state
if not succ.active:
# check if it was un unconstrained call.
# somtimes angr fucks it up
bl = self._get_bb(self.get_addr(current_path))
if bl.vex.jumpkind != 'Ijk_Call':
# Heuristic: if not a function call, we do not consider dereference
# of tainted locations, since it is unlikely to be used as address
return False
suc_state = current_cp.unconstrained[0]
succ.active = [suc_state]
suc_state = self.get_state(succ)
# SOURCES:
# look for sources:
for source, reg_source in sources_info:
bb = self._get_bb(self.get_addr(current_path))
# the bb contains the call to the source
if any([x for x in bb.vex.statements if x.tag == 'Ist_IMark' and x.addr == source]):
# time to taint
if reg_source == 'RETURN':
addr_to_taint = self._get_sym_val(name='reg_ret_', inc=False)
else:
addr_to_taint = getattr(suc_state.regs, reg_source)
# check whether is tainted first! A tainted address passed to a source
# might overwrite sensible data.
if self.is_tainted(addr_to_taint):
self._save_deref_info(current_path, addr_to_taint)
t = self._get_sym_val(name=self._taint_buf, bits=self._taint_buf_size).reversed
self._save_taint_flag()
self.get_state(current_path).memory.store(addr_to_taint, t)
self._restore_taint_flags()
self._taint_applied = True
# SINKS:
# look for sinks (only if we have successors. A sink is a function!):
succ_addr = self.get_addr(succ)
found = False
for sink, reg_sink in sinks_info:
if succ_addr == sink:
if self._check_taint(suc_state, reg_sink, guards_info):
log.info("HOOOORAY: Detected a possibly tainted path")
self._save_sink_info(succ, reg_sink, sink)
found = True
if found:
return True
# or if a tainted address is dereferenced
if self._deref_taint_address:
self._deref_taint_address = False
bl = self._get_bb(self._deref_instruction)
if bl.vex.jumpkind == 'Ijk_Call':
log.info("Dereferenced tainted address")
self._save_deref_info(current_path, self._deref_addr_expr)
# self._keep_run = False
# eventually if we are in a loop guarded by a tainted variable
if len(succ.active) > 1 and any([a for a in succ.active if a.addr in [t for t in self.get_state(current_path).history.bbl_addrs]]):
cond_guard = [g for g in self.get_state(succ).guards][-1]
for node in cond_guard.recursive_leaf_asts:
if self.is_tainted(node):
self._save_loop_info(current_path, self.get_addr(current_path), cond_guard)
return True
return False
def _get_sym_val(self, name='x_', bits=None, inc=True, explicit=False):
if bits is None:
bits = self._p.arch.bits
if explicit:
var = claripy.BVS(name=name, size=bits, explicit_name=True)
else:
var = claripy.BVS(name=(name + '_' + str(self._count_var) + '_' + str(self._p.arch.bits)), size=bits,
explicit_name=True)
if inc:
self._count_var += 1
return var
def get_addr(self, run):
return run.active[0].ip.args[0]
def get_state(self, run):
return run.active[0]
def is_or_points_to_tainted_data(self, x, path, unconstrained=False):
return self.is_tainted(x, path=path, unconstrained=unconstrained) or \
self.is_tainted(self.safe_load(path, x, unconstrained=unconstrained), path=path, unconstrained=unconstrained)
def _set_fake_ret_succ(self, path, state, addr, ret):
"""
Create a fake ret successors of a given path.
:param path: current path
:param: state: state to set in the new succ
:param addr: address where the fake ret block will return
:param ret: return of the current function
:return: angr path
"""
new_s = state.copy()
new_s.history.jumpkind = "Ijk_FakeRet"
# check whether any of the function parameters are tainted
nargs = get_arity(self._p, self.get_addr(path))
next_cp = path.copy(copy_states=True).step()
to_taint = False
ord_arg_tainted = -1
for i in xrange(nargs):
name = self._p.arch.register_names[ordered_agument_regs[self._p.arch.name][i]]
try:
val_arg = getattr(self.get_state(next_cp).regs, name)
except:
break
if self.is_or_points_to_tainted_data(val_arg, next_cp):
to_taint = True
ord_arg_tainted = i
break
# return value
name = 'reg_ret_'
if self._taint_returns_unfollowed_calls and to_taint:
name = self._taint_buf + '_' + name
ret_reg = return_regs[self._p.arch.name]
link_reg = link_regs[self._p.arch.name]
new_s.regs.pc = addr
setattr(new_s.regs, self._p.arch.register_names[link_reg], ret)
setattr(new_s.regs, self._p.arch.register_names[ret_reg], self._get_sym_val(name=name))
# function arguments
if to_taint and self._taint_arguments_unfollowed_calls:
for i in xrange(nargs):
if i == ord_arg_tainted:
continue
name_reg = self._p.arch.register_names[ordered_agument_regs[self._p.arch.name][i]]
addr = getattr(new_s.regs, name_reg)
if addr.concrete and addr.args[0] < self.p.loader.main_object.min_addr:
continue
taint_name = self._taint_buf + '_' + name_reg
new_s.memory.store(addr, self._get_sym_val(name=taint_name, bits=self._taint_buf_size))
return path.copy(
stashes={'active': [new_s], 'unsat': [], 'pruned': [], 'unconstrained': [], 'deadended': []})
def _is_summarized(self, prev_path, suc_path, current_depth):
# first check if function is summarized
addr = self.get_addr(suc_path)
if self._summarized_f:
for s_addr in self._summarized_f.keys():
if addr == s_addr:
self._summarized_f[s_addr](self, prev_path, suc_path)
return True
return False
def _follow_call(self, prev_path, suc_path, current_depth):
"""
Checks if a call should be followed or not: if any of its parameters is tainted
and the current depth of transitive closure allows it yes, otherwise no.
:param prev_path: previous path
:param suc_path: successive path
:param current_depth: current depth of transitive closure
:return: True if call should be followed, false otherwise
"""
if self._not_follow_any_calls:
return False
# first check if function is summarized
prev_addr = self.get_addr(prev_path)
addr = self.get_addr(suc_path)
if self._only_follow_near_calls:
try:
plt = self.p.loader.main_object.reverse_plt
if addr in plt:
return False
except:
pass
if addr in self._black_calls or prev_addr in self._black_calls:
return False
# check if call falls within bound binary
if addr > self._p.loader.max_addr or addr < self._p.loader.min_addr:
return False
# if the function is summarized by angr, we follow it
if self._p._should_use_sim_procedures:
# consider also next addr in case th current one is a trampoline (eg., plt)
trp = suc_path.copy(copy_states=True)
trp.step()
trp_addr = self.get_addr(trp)
if self._p.is_hooked(addr) or self._p.is_hooked(trp_addr):
return True
if addr in self._white_calls:
return True
if current_depth <= 0:
return False
if not self._smart_call:
return True
if not self._taint_applied:
return False
bl = self._get_bb(self.get_addr(prev_path))
puts = [s for s in bl.vex.statements if s.tag == 'Ist_Put']
expected = 0
index = 0
set_regs = []
# type of regs we are looking for
reg_ty = 'r' if self._p.arch.bits == 32 else 'x'
while True:
if index >= len(puts):
break
p = puts[index]
if self._p.arch.register_names[p.offset] == reg_ty + str(expected):
set_regs.append(reg_ty + str(expected))
expected += 1
index = 0
continue
index += 1
self._save_taint_flag()
for r in set_regs:
reg_cnt = getattr(self.get_state(suc_path).regs, r)
# check if it is pointing to a tainted location
tmp_s = self.get_state(suc_path).copy()
try:
mem_cnt = tmp_s.memory.load(reg_cnt, 50) # FIXME set this N to a meaningful value
except TimeOutException as t:
raise t
except Exception as e:
# state is unconstrained
log.warning("Tried to defererence a non pointer!")
continue
# we might have dereferenced wrongly a tainted variable during the tests before
if (self.is_tainted(reg_cnt) or self.is_tainted(mem_cnt)) and current_depth > 0:
self._restore_taint_flags()
return True
self._restore_taint_flags()
return False
def _follow_back_jump(self, current_path, next_path, guards_info):
"""
Check if a back jump (probably a loop) should be followed.
:param current_path: current path
:param next_path: next path
:param guards_info: guards information
:return: true if should back jump, false otherwise
"""
key = hash(''.join(sorted(list(set([x[0] for x in guards_info])))))
bj = (key, self.get_addr(next_path), self.get_addr(current_path))
if bj not in self._back_jumps.keys():
self._back_jumps[bj] = 1
elif self._back_jumps[bj] > self._N:
# we do not want to follow the same back jump infinite times
return False
else:
self._back_jumps[bj] += 1
return True
def _check_sat_state(self, current_path, current_guards):
# just try to concretize any variable
cp_state = current_path.active[0].copy()
try:
reg_name = self._p.arch.register_names[return_regs[self._p.arch.name]]
reg = getattr(cp_state.regs, reg_name)
cp_state.se.any_int(reg)
self.last_sat = (current_path.copy(copy_states=True), current_guards)
except TimeOutException as t:
raise t
except Exception as e:
print str(e)
return False
return True
def _vex_fucked_up(self, current_path, next_path):
current_path_addr = current_path.active[0].ip.args[0]
next_path_addr = next_path.active[0].ip.args[0]
bl = self._get_bb(current_path_addr)
puts = [p for p in bl.vex.statements if p.tag == 'Ist_Put']
lr = self._p.arch.register_names[link_regs[self._p.arch.name]]
for p in puts:
if self._p.arch.register_names[p.offset] == lr:
break
else:
return False
if next_path_addr == self._next_inst(bl):
log.warning(" VEX fucked up big time!")
return True
return False
def _drop_constraints(self, path):
self.get_state(path).release_plugin('solver_engine')
self.get_state(path).downsize()
#FIXME: change offset according arch.
def _next_inst(self, bl):
return bl.instruction_addrs[-1] + 4
def _flat_explore(self, current_path, check_path_fun, guards_info, current_depth, **kwargs):
"""
Find a tainted path between a source and a sink
:param current_path: current path
:param check_path_fun: function to call for every block in the path
:param guards_info: current info about the guards in the current path
:param kwargs: additional arguments to pass to check_path_fun
:return: the tainted path between the source and the sink, if any
"""
if not self._keep_run:
log.debug("Backtracking due to stop")
return
current_path_addr = self.get_addr(current_path)
log.debug("%s: Analyzing block %s", self._p.filename.split('/')[-1], hex(current_path_addr))
if not self._check_sat_state(current_path, guards_info) and not self._timeout_triggered:
log.error("State got messed up!")
raise Exception("State became UNSAT")
# check whether we reached a sink
try:
check_path_fun(current_path, guards_info, current_depth, **kwargs)
except Exception as e:
if not self._keep_run:
return
log.error("'Function check path errored out: %s" % str(e))
try:
succ_path = current_path.copy(copy_states=True).step()
except Exception as e:
print("ERROR: %s" % str(e))
return
# try thumb
if succ_path and succ_path.errored and self._try_thumb and not self._force_paths:
succ_path = current_path.copy(copy_states=True).step(thumb=True)
if succ_path and succ_path.errored and self._try_thumb and not self._force_paths:
if self._exit_on_decode_error:
self._keep_run = False
return
succ_states_unsat = succ_path.unsat if self._follow_unsat else []
succ_states_sat = succ_path.active
if succ_path.deadended and not succ_states_sat and not succ_states_unsat:
log.debug("Backtracking from dead path")
return
if not succ_states_sat:
# check if it was un unconstrained call.
# sometimes angr fucks it up
bl = self._get_bb(current_path_addr)
if not bl:
return
if bl.vex.jumpkind == 'Ijk_Call':
# create a fake successors
# which should have been created
# before.
# FIXME: I should use get_below_block
# but as of now I don;t want to use CFG
unc_state = succ_path.unconstrained[0]
ret_addr = self._next_inst(bl)
link_reg = self._p.arch.register_names[link_regs[self._p.arch.name]]
ret_func = getattr(self.get_state(current_path).regs, link_reg)
tmp_path = self._set_fake_ret_succ(current_path, unc_state, ret_addr, ret_func)
succ_states_sat = [self.get_state(tmp_path)]
# register sat and unsat information so that later we can drop the constraints
for s in succ_states_sat:
s.sat = True
for s in succ_states_unsat:
s.sat = False
# collect and prepare the successors to be analyzed
#shuffle(succ_states_sat)
succ_states = succ_states_sat + succ_states_unsat
for next_state in succ_states:
if hasattr(next_state.ip, 'symbolic') and next_state.ip.symbolic:
if next_state.sat:
continue
log.warning("Got a symbolic IP, perhaps a non-handled switch statement? FIX ME... ")
continue
next_path = succ_path.copy(stashes={'active': [next_state.copy()], 'unsat': [], 'pruned': [], 'unconstrained': [], 'deadended': []})
if not next_state.sat:
# unsat successors, drop the constraints
self._drop_constraints(next_path)
next_depth = current_depth
# First, let's see if we can follow the calls
try:
if self.get_state(next_path).history.jumpkind == 'Ijk_Call' and not self._vex_fucked_up(current_path, next_path):
if not self._is_summarized(current_path, next_path, current_depth):
if not self._follow_call(current_path, next_path, current_depth):
# if there is not fake ret we create one
if not any(s.history.jumpkind == "Ijk_FakeRet" for s in succ_states):
state = self.get_state(next_path)
link_reg = self._p.arch.register_names[link_regs[self._p.arch.name]]
ret_addr = getattr(state.regs, link_reg)
ret_func = getattr(self.get_state(current_path).regs, link_reg)
next_path = self._set_fake_ret_succ(current_path, state, ret_addr, ret_func)
else:
# the fake ret is already present, therefore we just skip
# the call
continue
else:
log.debug("Following function call to %s" % hex(self.get_addr(next_path)))
next_depth = current_depth - 1
except Exception as e:
log.error("ERROR: %s" % str(e))
return
try:
if self.get_state(next_path).history.jumpkind == 'Ijk_Ret':
next_depth = current_depth + 1
except:
continue
# we have a back jump
if self.get_state(next_path).history.jumpkind == 'Ijk_Boring' and \
self.get_addr(next_path) <= self.get_addr(current_path) and \
not self._follow_back_jump(current_path, next_path, guards_info):
log.debug("breaking loop")
continue
# the successor leads out of the function, we do not want to follow it
if self.get_addr(next_path) == self._bogus_return:
log.debug("hit a return")
continue
# save the info about the guards of this path
new_guards_info = list(guards_info)
current_guards = [g for g in self.get_state(next_path).guards]
if current_guards and len(new_guards_info) < len(current_guards):
new_guards_info.append([hex(self.get_addr(current_path)), current_guards[-1]])
# next step!
self._flat_explore(next_path, check_path_fun, new_guards_info, next_depth, **kwargs)
log.debug("Back to block %s", hex(self.get_addr(current_path)))
log.debug("Backtracking")
def set_project(self, p):
"""
Set the project
:param p: angr project
:return:
"""
self._p = p
def stop_run(self):
"""
Stop the taint analysis
:return:
"""
self._keep_run = False
def flat_explore(self, state, check_path_fun, guards_info, force_thumb=False, **kwargs):
self._keep_run = True
initial_path = self._p.factory.path(state)
initial_path = self._p.factory.simgr(initial_path, save_unconstrained=True, save_unsat=True)
current_depth = self._interfunction_level
if force_thumb:
# set thumb mode
initial_path = initial_path.step(thumb=True)[0]
self._flat_explore(initial_path, check_path_fun, guards_info, current_depth, **kwargs)
def start_logging(self):
if not self._default_log:
return
self._fp.write("Log Start \n"
"Binary: " +
self._p.filename + '\n'
"=================================\n\n")
def log(self, msg):
self._fp.write(msg)
def stop_logging(self):
if self._default_log:
log.info("Done.")
log.info("Results in " + self._fp.name)
self._fp.close()
def _init_bss(self, state):
bss = [s for s in self._p.loader.main_bin.sections if s.name == '.bss']
if not bss:
return
bss = bss[0]
min_addr = bss.min_addr
max_addr = bss.max_addr
for a in range(min_addr, max_addr + 1):
var = self._get_sym_val(name="bss_", bits=8)
state.memory.store(a, var)
def set_alarm(self, timer, n_tries=0):
# setup a consistent initial state
signal.signal(signal.SIGALRM, self.handler)
signal.alarm(timer)
self._force_exit_after = n_tries
self._timer = timer
def unset_alarm(self):
signal.alarm(0)
def run(self, state, sinks_info, sources_info, summarized_f={}, init_bss=True,
check_func=None, force_thumb=False, use_smart_concretization=True):
if use_smart_concretization:
state.inspect.b(
'address_concretization',
simuvex.BP_AFTER,
action=self.addr_concrete_after
)
state.globals[GLOB_TAINT_DEP_KEY] = {}
state.globals[UNTAINT_DATA] = {UNTAINTED_VARS:[], SEEN_MASTERS: []}
self._count_var = 0
self._back_jumps = {}
self._keep_run = True
self._taint_applied = False
self._fully_taint_guard = []
self._deref_taint_address = False
self._deref_addr_expr = None
self._deref = (None, None)
self._old_deref = self._deref
self._old_deref_taint_address = self._deref_taint_address
self._old_deref_addr_expr = self._deref_addr_expr
self._concretizations = {}
self._summarized_f = summarized_f
self._timeout_triggered = False
check_func = self._check_if_sink_or_source if check_func is None else check_func
if init_bss:
log.info("init .bss")
self._init_bss(state)
try:
self.flat_explore(state, check_func, [], force_thumb=force_thumb, sinks_info=sinks_info,
sources_info=sources_info)
except TimeOutException:
log.warning("Hard timeout triggered")
if self._timeout_triggered:
self.log("\nTimed out...\n")
log.debug("Timeout triggered")
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.lib.histograms import histogram, histogramdd, histogram_bin_edges
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
assert_array_almost_equal, assert_raises, assert_allclose,
assert_array_max_ulp, assert_raises_regex, suppress_warnings,
)
import pytest
class TestHistogram(object):
def setup(self):
pass
def teardown(self):
pass
def test_simple(self):
n = 100
v = np.random.rand(n)
(a, b) = histogram(v)
# check if the sum of the bins equals the number of samples
assert_equal(np.sum(a, axis=0), n)
# check that the bin counts are evenly spaced when the data is from
# a linear function
(a, b) = histogram(np.linspace(0, 10, 100))
assert_array_equal(a, 10)
def test_one_bin(self):
# Ticket 632
hist, edges = histogram([1, 2, 3, 4], [1, 2])
assert_array_equal(hist, [2, ])
assert_array_equal(edges, [1, 2])
assert_raises(ValueError, histogram, [1, 2], bins=0)
h, e = histogram([1, 2], bins=1)
assert_equal(h, np.array([2]))
assert_allclose(e, np.array([1., 2.]))
def test_normed(self):
sup = suppress_warnings()
with sup:
rec = sup.record(np.VisibleDeprecationWarning, '.*normed.*')
# Check that the integral of the density equals 1.
n = 100
v = np.random.rand(n)
a, b = histogram(v, normed=True)
area = np.sum(a * np.diff(b))
assert_almost_equal(area, 1)
assert_equal(len(rec), 1)
sup = suppress_warnings()
with sup:
rec = sup.record(np.VisibleDeprecationWarning, '.*normed.*')
# Check with non-constant bin widths (buggy but backwards
# compatible)
v = np.arange(10)
bins = [0, 1, 5, 9, 10]
a, b = histogram(v, bins, normed=True)
area = np.sum(a * np.diff(b))
assert_almost_equal(area, 1)
assert_equal(len(rec), 1)
def test_density(self):
# Check that the integral of the density equals 1.
n = 100
v = np.random.rand(n)
a, b = histogram(v, density=True)
area = np.sum(a * np.diff(b))
assert_almost_equal(area, 1)
# Check with non-constant bin widths
v = np.arange(10)
bins = [0, 1, 3, 6, 10]
a, b = histogram(v, bins, density=True)
assert_array_equal(a, .1)
assert_equal(np.sum(a * np.diff(b)), 1)
# Test that passing False works too
a, b = histogram(v, bins, density=False)
assert_array_equal(a, [1, 2, 3, 4])
# Variale bin widths are especially useful to deal with
# infinities.
v = np.arange(10)
bins = [0, 1, 3, 6, np.inf]
a, b = histogram(v, bins, density=True)
assert_array_equal(a, [.1, .1, .1, 0.])
# Taken from a bug report from <NAME> on the numpy-discussion
# mailing list Aug. 6, 2010.
counts, dmy = np.histogram(
[1, 2, 3, 4], [0.5, 1.5, np.inf], density=True)
assert_equal(counts, [.25, 0])
def test_outliers(self):
# Check that outliers are not tallied
a = np.arange(10) + .5
# Lower outliers
h, b = histogram(a, range=[0, 9])
assert_equal(h.sum(), 9)
# Upper outliers
h, b = histogram(a, range=[1, 10])
assert_equal(h.sum(), 9)
# Normalization
h, b = histogram(a, range=[1, 9], density=True)
assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15)
# Weights
w = np.arange(10) + .5
h, b = histogram(a, range=[1, 9], weights=w, density=True)
assert_equal((h * np.diff(b)).sum(), 1)
h, b = histogram(a, bins=8, range=[1, 9], weights=w)
assert_equal(h, w[1:-1])
def test_arr_weights_mismatch(self):
a = np.arange(10) + .5
w = np.arange(11) + .5
with assert_raises_regex(ValueError, "same shape as"):
h, b = histogram(a, range=[1, 9], weights=w, density=True)
def test_type(self):
# Check the type of the returned histogram
a = np.arange(10) + .5
h, b = histogram(a)
assert_(np.issubdtype(h.dtype, np.integer))
h, b = histogram(a, density=True)
assert_(np.issubdtype(h.dtype, np.floating))
h, b = histogram(a, weights=np.ones(10, int))
assert_(np.issubdtype(h.dtype, np.integer))
h, b = histogram(a, weights=np.ones(10, float))
assert_(np.issubdtype(h.dtype, np.floating))
def test_f32_rounding(self):
# gh-4799, check that the rounding of the edges works with float32
x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32)
y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32)
counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100)
assert_equal(counts_hist.sum(), 3.)
def test_bool_conversion(self):
# gh-12107
# Reference integer histogram
a = np.array([1, 1, 0], dtype=np.uint8)
int_hist, int_edges = np.histogram(a)
# Should raise an warning on booleans
# Ensure that the histograms are equivalent, need to suppress
# the warnings to get the actual outputs
with suppress_warnings() as sup:
rec = sup.record(RuntimeWarning, 'Converting input from .*')
hist, edges = np.histogram([True, True, False])
# A warning should be issued
assert_equal(len(rec), 1)
assert_array_equal(hist, int_hist)
assert_array_equal(edges, int_edges)
def test_weights(self):
v = np.random.rand(100)
w = np.ones(100) * 5
a, b = histogram(v)
na, nb = histogram(v, density=True)
wa, wb = histogram(v, weights=w)
nwa, nwb = histogram(v, weights=w, density=True)
assert_array_almost_equal(a * 5, wa)
assert_array_almost_equal(na, nwa)
# Check weights are properly applied.
v = np.linspace(0, 10, 10)
w = np.concatenate((np.zeros(5), np.ones(5)))
wa, wb = histogram(v, bins=np.arange(11), weights=w)
assert_array_almost_equal(wa, w)
# Check with integer weights
wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1])
assert_array_equal(wa, [4, 5, 0, 1])
wa, wb = histogram(
[1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True)
assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4)
# Check weights with non-uniform bin widths
a, b = histogram(
np.arange(9), [0, 1, 3, 6, 10],
weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True)
assert_almost_equal(a, [.2, .1, .1, .075])
def test_exotic_weights(self):
# Test the use of weights that are not integer or floats, but e.g.
# complex numbers or object types.
# Complex weights
values = np.array([1.3, 2.5, 2.3])
weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2])
# Check with custom bins
wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
# Check with even bins
wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
# Decimal weights
from decimal import Decimal
values = np.array([1.3, 2.5, 2.3])
weights = np.array([Decimal(1), Decimal(2), Decimal(3)])
# Check with custom bins
wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
# Check with even bins
wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
def test_no_side_effects(self):
# This is a regression test that ensures that values passed to
# ``histogram`` are unchanged.
values = np.array([1.3, 2.5, 2.3])
np.histogram(values, range=[-10, 10], bins=100)
assert_array_almost_equal(values, [1.3, 2.5, 2.3])
def test_empty(self):
a, b = histogram([], bins=([0, 1]))
assert_array_equal(a, np.array([0]))
assert_array_equal(b, np.array([0, 1]))
def test_error_binnum_type (self):
# Tests if right Error is raised if bins argument is float
vals = np.linspace(0.0, 1.0, num=100)
histogram(vals, 5)
assert_raises(TypeError, histogram, vals, 2.4)
def test_finite_range(self):
# Normal ranges should be fine
vals = np.linspace(0.0, 1.0, num=100)
histogram(vals, range=[0.25,0.75])
assert_raises(ValueError, histogram, vals, range=[np.nan,0.75])
assert_raises(ValueError, histogram, vals, range=[0.25,np.inf])
def test_invalid_range(self):
# start of range must be < end of range
vals = np.linspace(0.0, 1.0, num=100)
with assert_raises_regex(ValueError, "max must be larger than"):
np.histogram(vals, range=[0.1, 0.01])
def test_bin_edge_cases(self):
# Ensure that floating-point computations correctly place edge cases.
arr = np.array([337, 404, 739, 806, 1007, 1811, 2012])
hist, edges = np.histogram(arr, bins=8296, range=(2, 2280))
mask = hist > 0
left_edges = edges[:-1][mask]
right_edges = edges[1:][mask]
for x, left, right in zip(arr, left_edges, right_edges):
assert_(x >= left)
assert_(x < right)
def test_last_bin_inclusive_range(self):
arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5))
assert_equal(hist[-1], 1)
def test_bin_array_dims(self):
# gracefully handle bins object > 1 dimension
vals = np.linspace(0.0, 1.0, num=100)
bins = np.array([[0, 0.5], [0.6, 1.0]])
with assert_raises_regex(ValueError, "must be 1d"):
np.histogram(vals, bins=bins)
def test_unsigned_monotonicity_check(self):
# Ensures ValueError is raised if bins not increasing monotonically
# when bins contain unsigned values (see #9222)
arr = np.array([2])
bins = np.array([1, 3, 1], dtype='uint64')
with assert_raises(ValueError):
hist, edges = np.histogram(arr, bins=bins)
def test_object_array_of_0d(self):
# gh-7864
assert_raises(ValueError,
histogram, [np.array(0.4) for i in range(10)] + [-np.inf])
assert_raises(ValueError,
histogram, [np.array(0.4) for i in range(10)] + [np.inf])
# these should not crash
np.histogram([np.array(0.5) for i in range(10)] + [.500000000000001])
np.histogram([np.array(0.5) for i in range(10)] + [.5])
def test_some_nan_values(self):
# gh-7503
one_nan = np.array([0, 1, np.nan])
all_nan = np.array([np.nan, np.nan])
# the internal comparisons with NaN give warnings
sup = suppress_warnings()
sup.filter(RuntimeWarning)
with sup:
# can't infer range with nan
assert_raises(ValueError, histogram, one_nan, bins='auto')
assert_raises(ValueError, histogram, all_nan, bins='auto')
# explicit range solves the problem
h, b = histogram(one_nan, bins='auto', range=(0, 1))
assert_equal(h.sum(), 2) # nan is not counted
h, b = histogram(all_nan, bins='auto', range=(0, 1))
assert_equal(h.sum(), 0) # nan is not counted
# as does an explicit set of bins
h, b = histogram(one_nan, bins=[0, 1])
assert_equal(h.sum(), 2) # nan is not counted
h, b = histogram(all_nan, bins=[0, 1])
assert_equal(h.sum(), 0) # nan is not counted
def test_datetime(self):
begin = np.datetime64('2000-01-01', 'D')
offsets = np.array([0, 0, 1, 1, 2, 3, 5, 10, 20])
bins = np.array([0, 2, 7, 20])
dates = begin + offsets
date_bins = begin + bins
td = np.dtype('timedelta64[D]')
# Results should be the same for integer offsets or datetime values.
# For now, only explicit bins are supported, since linspace does not
# work on datetimes or timedeltas
d_count, d_edge = histogram(dates, bins=date_bins)
t_count, t_edge = histogram(offsets.astype(td), bins=bins.astype(td))
i_count, i_edge = histogram(offsets, bins=bins)
assert_equal(d_count, i_count)
assert_equal(t_count, i_count)
assert_equal((d_edge - begin).astype(int), i_edge)
assert_equal(t_edge.astype(int), i_edge)
assert_equal(d_edge.dtype, dates.dtype)
assert_equal(t_edge.dtype, td)
def do_signed_overflow_bounds(self, dtype):
exponent = 8 * np.dtype(dtype).itemsize - 1
arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype)
hist, e = histogram(arr, bins=2)
assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4])
assert_equal(hist, [1, 1])
def test_signed_overflow_bounds(self):
self.do_signed_overflow_bounds(np.byte)
self.do_signed_overflow_bounds(np.short)
self.do_signed_overflow_bounds(np.intc)
self.do_signed_overflow_bounds(np.int_)
self.do_signed_overflow_bounds(np.longlong)
def do_precision_lower_bound(self, float_small, float_large):
eps = np.finfo(float_large).eps
arr = np.array([1.0], float_small)
range = np.array([1.0 + eps, 2.0], float_large)
# test is looking for behavior when the bounds change between dtypes
if range.astype(float_small)[0] != 1:
return
# previously crashed
count, x_loc = np.histogram(arr, bins=1, range=range)
assert_equal(count, [1])
# gh-10322 means that the type comes from arr - this may change
assert_equal(x_loc.dtype, float_small)
def do_precision_upper_bound(self, float_small, float_large):
eps = np.finfo(float_large).eps
arr = np.array([1.0], float_small)
range = np.array([0.0, 1.0 - eps], float_large)
# test is looking for behavior when the bounds change between dtypes
if range.astype(float_small)[-1] != 1:
return
# previously crashed
count, x_loc = np.histogram(arr, bins=1, range=range)
assert_equal(count, [1])
# gh-10322 means that the type comes from arr - this may change
assert_equal(x_loc.dtype, float_small)
def do_precision(self, float_small, float_large):
self.do_precision_lower_bound(float_small, float_large)
self.do_precision_upper_bound(float_small, float_large)
def test_precision(self):
# not looping results in a useful stack trace upon failure
self.do_precision(np.half, np.single)
self.do_precision(np.half, np.double)
self.do_precision(np.half, np.longdouble)
self.do_precision(np.single, np.double)
self.do_precision(np.single, np.longdouble)
self.do_precision(np.double, np.longdouble)
def test_histogram_bin_edges(self):
hist, e = histogram([1, 2, 3, 4], [1, 2])
edges = histogram_bin_edges([1, 2, 3, 4], [1, 2])
assert_array_equal(edges, e)
arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
hist, e = histogram(arr, bins=30, range=(-0.5, 5))
edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5))
assert_array_equal(edges, e)
hist, e = histogram(arr, bins='auto', range=(0, 1))
edges = histogram_bin_edges(arr, bins='auto', range=(0, 1))
assert_array_equal(edges, e)
class TestHistogramOptimBinNums(object):
"""
Provide test coverage when using provided estimators for optimal number of
bins
"""
def test_empty(self):
estimator_list = ['fd', 'scott', 'rice', 'sturges',
'doane', 'sqrt', 'auto', 'stone']
# check it can deal with empty data
for estimator in estimator_list:
a, b = histogram([], bins=estimator)
assert_array_equal(a, np.array([0]))
assert_array_equal(b, np.array([0, 1]))
def test_simple(self):
"""
Straightforward testing with a mixture of linspace data (for
consistency). All test values have been precomputed and the values
shouldn't change
"""
# Some basic sanity checking, with some fixed data.
# Checking for the correct number of bins
basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7,
'doane': 8, 'sqrt': 8, 'auto': 7, 'stone': 2},
500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10,
'doane': 12, 'sqrt': 23, 'auto': 10, 'stone': 9},
5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14,
'doane': 17, 'sqrt': 71, 'auto': 17, 'stone': 20}}
for testlen, expectedResults in basic_test.items():
# Create some sort of non uniform data to test with
# (2 peak uniform mixture)
x1 = np.linspace(-10, -1, testlen // 5 * 2)
x2 = np.linspace(1, 10, testlen // 5 * 3)
x = np.concatenate((x1, x2))
for estimator, numbins in expectedResults.items():
a, b = np.histogram(x, estimator)
assert_equal(len(a), numbins, err_msg="For the {0} estimator "
"with datasize of {1}".format(estimator, testlen))
def test_small(self):
"""
Smaller datasets have the potential to cause issues with the data
adaptive methods, especially the FD method. All bin numbers have been
precalculated.
"""
small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
'doane': 1, 'sqrt': 1, 'stone': 1},
2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2,
'doane': 1, 'sqrt': 2, 'stone': 1},
3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3,
'doane': 3, 'sqrt': 2, 'stone': 1}}
for testlen, expectedResults in small_dat.items():
testdat = np.arange(testlen)
for estimator, expbins in expectedResults.items():
a, b = np.histogram(testdat, estimator)
assert_equal(len(a), expbins, err_msg="For the {0} estimator "
"with datasize of {1}".format(estimator, testlen))
def test_incorrect_methods(self):
"""
Check a Value Error is thrown when an unknown string is passed in
"""
check_list = ['mad', 'freeman', 'histograms', 'IQR']
for estimator in check_list:
assert_raises(ValueError, histogram, [1, 2, 3], estimator)
def test_novariance(self):
"""
Check that methods handle no variance in data
Primarily for Scott and FD as the SD and IQR are both 0 in this case
"""
novar_dataset = np.ones(100)
novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
'doane': 1, 'sqrt': 1, 'auto': 1, 'stone': 1}
for estimator, numbins in novar_resultdict.items():
a, b = np.histogram(novar_dataset, estimator)
assert_equal(len(a), numbins, err_msg="{0} estimator, "
"No Variance test".format(estimator))
def test_limited_variance(self):
"""
Check when IQR is 0, but variance exists, we return the sturges value
and not the fd value.
"""
lim_var_data = np.ones(1000)
lim_var_data[:3] = 0
lim_var_data[-4:] = 100
edges_auto = histogram_bin_edges(lim_var_data, 'auto')
assert_equal(edges_auto, np.linspace(0, 100, 12))
edges_fd = histogram_bin_edges(lim_var_data, 'fd')
assert_equal(edges_fd, np.array([0, 100]))
edges_sturges = histogram_bin_edges(lim_var_data, 'sturges')
assert_equal(edges_sturges, np.linspace(0, 100, 12))
def test_outlier(self):
"""
Check the FD, Scott and Doane with outliers.
The FD estimates a smaller binwidth since it's less affected by
outliers. Since the range is so (artificially) large, this means more
bins, most of which will be empty, but the data of interest usually is
unaffected. The Scott estimator is more affected and returns fewer bins,
despite most of the variance being in one area of the data. The Doane
estimator lies somewhere between the other two.
"""
xcenter = np.linspace(-10, 10, 50)
outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter))
outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11, 'stone': 6}
for estimator, numbins in outlier_resultdict.items():
a, b = np.histogram(outlier_dataset, estimator)
assert_equal(len(a), numbins)
def test_scott_vs_stone(self):
"""Verify that Scott's rule and Stone's rule converges for normally distributed data"""
def nbins_ratio(seed, size):
rng = np.random.RandomState(seed)
x = rng.normal(loc=0, scale=2, size=size)
a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0])
return a / (a + b)
ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)]
for seed in range(10)]
# the average difference between the two methods decreases as the dataset size increases.
avg = abs(np.mean(ll, axis=0) - 0.5)
assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2)
def test_simple_range(self):
"""
Straightforward testing with a mixture of linspace data (for
consistency). Adding in a 3rd mixture that will then be
completely ignored. All test values have been precomputed and
the shouldn't change.
"""
# some basic sanity checking, with some fixed data.
# Checking for the correct number of bins
basic_test = {
50: {'fd': 8, 'scott': 8, 'rice': 15,
'sturges': 14, 'auto': 14, 'stone': 8},
500: {'fd': 15, 'scott': 16, 'rice': 32,
'sturges': 20, 'auto': 20, 'stone': 80},
5000: {'fd': 33, 'scott': 33, 'rice': 69,
'sturges': 27, 'auto': 33, 'stone': 80}
}
for testlen, expectedResults in basic_test.items():
# create some sort of non uniform data to test with
# (3 peak uniform mixture)
x1 = np.linspace(-10, -1, testlen // 5 * 2)
x2 = np.linspace(1, 10, testlen // 5 * 3)
x3 = np.linspace(-100, -50, testlen)
x = np.hstack((x1, x2, x3))
for estimator, numbins in expectedResults.items():
a, b = np.histogram(x, estimator, range = (-20, 20))
msg = "For the {0} estimator".format(estimator)
msg += " with datasize of {0}".format(testlen)
assert_equal(len(a), numbins, err_msg=msg)
@pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott',
'stone', 'rice', 'sturges'])
def test_signed_integer_data(self, bins):
# Regression test for gh-14379.
a = np.array([-2, 0, 127], dtype=np.int8)
hist, edges = np.histogram(a, bins=bins)
hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins)
assert_array_equal(hist, hist32)
assert_array_equal(edges, edges32)
def test_simple_weighted(self):
"""
Check that weighted data raises a TypeError
"""
estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto']
for estimator in estimator_list:
assert_raises(TypeError, histogram, [1, 2, 3],
estimator, weights=[1, 2, 3])
class TestHistogramdd(object):
def test_simple(self):
x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5],
[.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]])
H, edges = histogramdd(x, (2, 3, 3),
range=[[-1, 1], [0, 3], [0, 3]])
answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]],
[[0, 1, 0], [0, 0, 1], [0, 0, 1]]])
assert_array_equal(H, answer)
# Check normalization
ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]]
H, edges = histogramdd(x, bins=ed, density=True)
assert_(np.all(H == answer / 12.))
# Check that H has the correct shape.
H, edges = histogramdd(x, (2, 3, 4),
range=[[-1, 1], [0, 3], [0, 4]],
density=True)
answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]],
[[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]])
assert_array_almost_equal(H, answer / 6., 4)
# Check that a sequence of arrays is accepted and H has the correct
# shape.
z = [np.squeeze(y) for y in np.split(x, 3, axis=1)]
H, edges = histogramdd(
z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]])
answer = np.array([[[0, 0], [0, 0], [0, 0]],
[[0, 1], [0, 0], [1, 0]],
[[0, 1], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0]]])
assert_array_equal(H, answer)
Z = np.zeros((5, 5, 5))
Z[list(range(5)), list(range(5)), list(range(5))] = 1.
H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5)
assert_array_equal(H, Z)
def test_shape_3d(self):
# All possible permutations for bins of different lengths in 3D.
bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4),
(4, 5, 6))
r = np.random.rand(10, 3)
for b in bins:
H, edges = histogramdd(r, b)
assert_(H.shape == b)
def test_shape_4d(self):
# All possible permutations for bins of different lengths in 4D.
bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4),
(5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6),
(7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7),
(4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5),
(6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5),
(5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4))
r = np.random.rand(10, 4)
for b in bins:
H, edges = histogramdd(r, b)
assert_(H.shape == b)
def test_weights(self):
v = np.random.rand(100, 2)
hist, edges = histogramdd(v)
n_hist, edges = histogramdd(v, density=True)
w_hist, edges = histogramdd(v, weights=np.ones(100))
assert_array_equal(w_hist, hist)
w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True)
assert_array_equal(w_hist, n_hist)
w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2)
assert_array_equal(w_hist, 2 * hist)
def test_identical_samples(self):
x = np.zeros((10, 2), int)
hist, edges = histogramdd(x, bins=2)
assert_array_equal(edges[0], np.array([-0.5, 0., 0.5]))
def test_empty(self):
a, b = histogramdd([[], []], bins=([0, 1], [0, 1]))
assert_array_max_ulp(a, np.array([[0.]]))
a, b = np.histogramdd([[], [], []], bins=2)
assert_array_max_ulp(a, np.zeros((2, 2, 2)))
def test_bins_errors(self):
# There are two ways to specify bins. Check for the right errors
# when mixing those.
x = np.arange(8).reshape(2, 4)
assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5])
assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1])
assert_raises(
ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]])
assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
def test_inf_edges(self):
# Test using +/-inf bin edges works. See #1788.
with np.errstate(invalid='ignore'):
x = np.arange(6).reshape(3, 2)
expected = np.array([[1, 0], [0, 1], [0, 1]])
h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]])
assert_allclose(h, expected)
h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])])
assert_allclose(h, expected)
h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]])
assert_allclose(h, expected)
def test_rightmost_binedge(self):
# Test event very close to rightmost binedge. See Github issue #4266
x = [0.9999999995]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
assert_(hist[1] == 1.)
x = [1.0]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
assert_(hist[1] == 1.)
x = [1.0000000001]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
assert_(hist[1] == 0.0)
x = [1.0001]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
assert_(hist[1] == 0.0)
def test_finite_range(self):
vals = np.random.random((100, 3))
histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]])
assert_raises(ValueError, histogramdd, vals,
range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]])
assert_raises(ValueError, histogramdd, vals,
range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
def test_equal_edges(self):
""" Test that adjacent entries in an edge array can be equal """
x = np.array([0, 1, 2])
y = np.array([0, 1, 2])
x_edges = np.array([0, 2, 2])
y_edges = 1
hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
hist_expected = np.array([
[2.],
[1.], # x == 2 falls in the final bin
])
assert_equal(hist, hist_expected)
def test_edge_dtype(self):
""" Test that if an edge array is input, its type is preserved """
x = np.array([0, 10, 20])
y = x / 10
x_edges = np.array([0, 5, 15, 20])
y_edges = x_edges / 10
hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
assert_equal(edges[0].dtype, x_edges.dtype)
assert_equal(edges[1].dtype, y_edges.dtype)
def test_large_integers(self):
big = 2**60 # Too large to represent with a full precision float
x = np.array([0], np.int64)
x_edges = np.array([-1, +1], np.int64)
y = big + x
y_edges = big + x_edges
hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
assert_equal(hist[0, 0], 1)
def test_density_non_uniform_2d(self):
# Defines the following grid:
#
# 0 2 8
# 0+-+-----+
# + | +
# + | +
# 6+-+-----+
# 8+-+-----+
x_edges = np.array([0, 2, 8])
y_edges = np.array([0, 6, 8])
relative_areas = np.array([
[3, 9],
[1, 3]])
# ensure the number of points in each region is proportional to its area
x = np.array([1] + [1]*3 + [7]*3 + [7]*9)
y = np.array([7] + [1]*3 + [7]*3 + [1]*9)
# sanity check that the above worked as intended
hist, edges = histogramdd((y, x), bins=(y_edges, x_edges))
assert_equal(hist, relative_areas)
# resulting histogram should be uniform, since counts and areas are proportional
hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True)
assert_equal(hist, 1 / (8*8))
def test_density_non_uniform_1d(self):
# compare to histogram to show the results are the same
v = np.arange(10)
bins = np.array([0, 1, 3, 6, 10])
hist, edges = histogram(v, bins, density=True)
hist_dd, edges_dd = histogramdd((v,), (bins,), density=True)
assert_equal(hist, hist_dd)
assert_equal(edges, edges_dd[0])
def test_density_via_normed(self):
# normed should simply alias to density argument
v = np.arange(10)
bins = np.array([0, 1, 3, 6, 10])
hist, edges = histogram(v, bins, density=True)
hist_dd, edges_dd = histogramdd((v,), (bins,), normed=True)
assert_equal(hist, hist_dd)
assert_equal(edges, edges_dd[0])
def test_density_normed_redundancy(self):
v = np.arange(10)
bins = np.array([0, 1, 3, 6, 10])
with assert_raises_regex(TypeError, "Cannot specify both"):
hist_dd, edges_dd = histogramdd((v,), (bins,),
density=True,
normed=True)
|
<gh_stars>0
# Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simplest implementation with a MWh target
"""
from __future__ import division
from __future__ import print_function
from builtins import next
import csv
import os.path
import pandas as pd
from pyomo.environ import Var, Constraint, NonNegativeReals, Expression, value
from db.common_functions import spin_on_database_lock
from gridpath.auxiliary.db_interface import setup_results_import
def add_model_components(m, d, scenario_directory, subproblem, stage):
"""
:param m:
:param d:
:return:
"""
m.RPS_Shortage_MWh = Var(
m.RPS_ZONE_PERIODS_WITH_RPS, within=NonNegativeReals
)
def violation_expression_rule(mod, z, p):
return mod.RPS_Shortage_MWh[z, p] * mod.rps_allow_violation[z]
m.RPS_Shortage_MWh_Expression = Expression(
m.RPS_ZONE_PERIODS_WITH_RPS, rule=violation_expression_rule
)
def rps_target_rule(mod, z, p):
"""
Total delivered RPS-eligible energy must exceed target
:param mod:
:param z:
:param p:
:return:
"""
return mod.Total_Delivered_RPS_Energy_MWh[z, p] \
+ mod.RPS_Shortage_MWh_Expression[z, p] \
>= mod.RPS_Target[z, p]
m.RPS_Target_Constraint = Constraint(m.RPS_ZONE_PERIODS_WITH_RPS,
rule=rps_target_rule)
def export_results(scenario_directory, subproblem, stage, m, d):
"""
:param scenario_directory:
:param subproblem:
:param stage:
:param m:
:param d:
:return:
"""
with open(os.path.join(scenario_directory, str(subproblem), str(stage), "results",
"rps.csv"), "w", newline="") as rps_results_file:
writer = csv.writer(rps_results_file)
writer.writerow(["rps_zone", "period",
"discount_factor", "number_years_represented",
"rps_target_mwh",
"delivered_rps_energy_mwh",
"curtailed_rps_energy_mwh",
"total_rps_energy_mwh",
"fraction_of_rps_target_met",
"fraction_of_rps_energy_curtailed",
"rps_shortage_mwh"])
for (z, p) in m.RPS_ZONE_PERIODS_WITH_RPS:
writer.writerow([
z,
p,
m.discount_factor[p],
m.number_years_represented[p],
value(m.RPS_Target[z, p]),
value(m.Total_Delivered_RPS_Energy_MWh[z, p]),
value(m.Total_Curtailed_RPS_Energy_MWh[z, p]),
value(m.Total_Delivered_RPS_Energy_MWh[z, p]) +
value(m.Total_Curtailed_RPS_Energy_MWh[z, p]),
1 if float(m.rps_target_mwh[z, p]) == 0
else value(
m.Total_Delivered_RPS_Energy_MWh[z, p]) /
float(m.rps_target_mwh[z, p]),
0 if (value(m.Total_Delivered_RPS_Energy_MWh[z, p])
+ value(m.Total_Curtailed_RPS_Energy_MWh[z, p])) == 0
else value(m.Total_Curtailed_RPS_Energy_MWh[z, p]) /
(value(m.Total_Delivered_RPS_Energy_MWh[z, p])
+ value(m.Total_Curtailed_RPS_Energy_MWh[z, p])),
value(m.RPS_Shortage_MWh_Expression[z, p])
])
def save_duals(m):
m.constraint_indices["RPS_Target_Constraint"] = \
["rps_zone", "period", "dual"]
def summarize_results(scenario_directory, subproblem, stage):
"""
:param scenario_directory:
:param subproblem:
:param stage:
:return:
Summarize RPS policy results
"""
summary_results_file = os.path.join(
scenario_directory, subproblem, stage, "results", "summary_results.txt"
)
# Open in 'append' mode, so that results already written by other
# modules are not overridden
with open(summary_results_file, "a") as outfile:
outfile.write(
"\n### RPS RESULTS ###\n"
)
# All these files are small, so won't be setting indices
# Get the main RPS results file
rps_df = \
pd.read_csv(os.path.join(scenario_directory, str(subproblem), str(stage), "results",
"rps.csv")
)
# Get the RPS dual results
rps_duals_df = \
pd.read_csv(os.path.join(scenario_directory, str(subproblem), str(stage), "results",
"RPS_Target_Constraint.csv")
)
# # Get the input metadata for periods
# periods_df = \
# pd.read_csv(os.path.join(scenario_directory, "inputs", "periods.tab"),
# sep="\t")
# Join the above
results_df = pd.DataFrame(
pd.merge(
left=rps_df,
right=rps_duals_df,
how="left",
left_on=["rps_zone", "period"],
right_on=["rps_zone", "period"]
)
)
results_df.set_index(["rps_zone", "period"], inplace=True,
verify_integrity=True)
# Calculate:
# 1) the percent of RPS energy that was curtailed
# 2) the marginal RPS cost per MWh based on the RPS constraint duals --
# to convert back to 'real' dollars, we need to divide by the discount
# factor and the number of years a period represents
results_df["percent_curtailed"] = pd.Series(
index=results_df.index, dtype="float64"
)
results_df["rps_marginal_cost_per_mwh"] = pd.Series(
index=results_df.index, dtype="float64"
)
pd.options.mode.chained_assignment = None # default='warn'
for indx, row in results_df.iterrows():
if (results_df.delivered_rps_energy_mwh[indx] +
results_df.curtailed_rps_energy_mwh[indx]) == 0:
pct = 0
else:
pct = results_df.curtailed_rps_energy_mwh[indx] \
/ (results_df.delivered_rps_energy_mwh[indx] +
results_df.curtailed_rps_energy_mwh[indx]) * 100
results_df.percent_curtailed[indx] = pct
results_df.rps_marginal_cost_per_mwh[indx] = \
results_df.dual[indx] \
/ (results_df.discount_factor[indx] *
results_df.number_years_represented[indx])
# Drop unnecessary columns before exporting
results_df.drop("discount_factor", axis=1, inplace=True)
results_df.drop("number_years_represented", axis=1, inplace=True)
results_df.drop("total_rps_energy_mwh", axis=1, inplace=True)
results_df.drop("fraction_of_rps_target_met", axis=1, inplace=True)
results_df.drop("fraction_of_rps_energy_curtailed", axis=1, inplace=True)
results_df.drop("rps_shortage_mwh", axis=1, inplace=True)
# Rearrange the columns
cols = results_df.columns.tolist()
cols = cols[0:3] + [cols[4]] + [cols[3]] + [cols[5]]
results_df = results_df[cols]
results_df.sort_index(inplace=True)
with open(summary_results_file, "a") as outfile:
results_df.to_string(outfile, float_format="{:,.2f}".format)
outfile.write("\n")
def import_results_into_database(
scenario_id, subproblem, stage, c, db, results_directory, quiet
):
"""
:param scenario_id:
:param c:
:param db:
:param results_directory:
:param quiet:
:return:
"""
# Delete prior results and create temporary import table for ordering
setup_results_import(
conn=db, cursor=c,
table="results_system_rps",
scenario_id=scenario_id, subproblem=subproblem, stage=stage
)
# Load results into the temporary table
results = []
with open(os.path.join(results_directory,
"rps.csv"), "r") as \
rps_file:
reader = csv.reader(rps_file)
next(reader) # skip header
for row in reader:
rps_zone = row[0]
period = row[1]
discount_factor = row[2]
number_years = row[3]
rps_target = row[4]
rps_provision = row[5]
curtailment = row[6]
total = row[7]
fraction_met = row[8]
fraction_curtailed = row[9]
shortage = row[10]
results.append(
(scenario_id, rps_zone, period, subproblem, stage,
discount_factor, number_years, rps_target,
rps_provision, curtailment, total,
fraction_met, fraction_curtailed, shortage)
)
insert_temp_sql = """
INSERT INTO temp_results_system_rps{}
(scenario_id, rps_zone, period, subproblem_id, stage_id,
discount_factor, number_years_represented, rps_target_mwh,
delivered_rps_energy_mwh, curtailed_rps_energy_mwh,
total_rps_energy_mwh,
fraction_of_rps_target_met, fraction_of_rps_energy_curtailed,
rps_shortage_mwh)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
""".format(scenario_id)
spin_on_database_lock(conn=db, cursor=c, sql=insert_temp_sql, data=results)
# Insert sorted results into permanent results table
insert_sql = """
INSERT INTO results_system_rps
(scenario_id, rps_zone, period, subproblem_id, stage_id,
discount_factor, number_years_represented, rps_target_mwh,
delivered_rps_energy_mwh, curtailed_rps_energy_mwh,
total_rps_energy_mwh,
fraction_of_rps_target_met, fraction_of_rps_energy_curtailed,
rps_shortage_mwh)
SELECT scenario_id, rps_zone, period, subproblem_id, stage_id,
discount_factor, number_years_represented, rps_target_mwh,
delivered_rps_energy_mwh, curtailed_rps_energy_mwh,
total_rps_energy_mwh,
fraction_of_rps_target_met, fraction_of_rps_energy_curtailed,
rps_shortage_mwh
FROM temp_results_system_rps{}
ORDER BY scenario_id, rps_zone, period, subproblem_id, stage_id;
""".format(scenario_id)
spin_on_database_lock(conn=db, cursor=c, sql=insert_sql, data=(),
many=False)
# Update duals
duals_results = []
with open(os.path.join(results_directory, "RPS_Target_Constraint.csv"),
"r") as rps_duals_file:
reader = csv.reader(rps_duals_file)
next(reader) # skip header
for row in reader:
duals_results.append(
(row[2], row[0], row[1], scenario_id, subproblem, stage)
)
duals_sql = """
UPDATE results_system_rps
SET dual = ?
WHERE rps_zone = ?
AND period = ?
AND scenario_id = ?
AND subproblem_id = ?
AND stage_id = ?;
"""
spin_on_database_lock(conn=db, cursor=c, sql=duals_sql, data=duals_results)
# Calculate marginal RPS cost per MWh
mc_sql = """
UPDATE results_system_rps
SET rps_marginal_cost_per_mwh =
dual / (discount_factor * number_years_represented)
WHERE scenario_id = ?
AND subproblem_id = ?
and stage_id = ?;
"""
spin_on_database_lock(conn=db, cursor=c, sql=mc_sql,
data=(scenario_id, subproblem, stage),
many=False)
|
<filename>root_gnn/src/models/decay_simulator.py
import tensorflow as tf
import sonnet as snt
from graph_nets import utils_tf
from graph_nets import modules
from graph_nets import blocks
from root_gnn.src.models.base import InteractionNetwork
from root_gnn.src.models.base import make_mlp_model
from root_gnn.src.models.base import MLPGraphNetwork
LATENT_SIZE = 128 # do not change.. should be the same value as in base.py
class DecaySimulator(snt.Module):
def __init__(self, name="DecaySimulator"):
super(DecaySimulator, self).__init__(name=name)
self._node_linear = make_mlp_model()
self._node_rnn = snt.GRU(hidden_size=LATENT_SIZE, name='node_rnn')
self._node_proper = snt.nets.MLP([4], activate_final=False)
self._edge_block = blocks.EdgeBlock(
edge_model_fn=make_mlp_model,
use_edges=False,
use_receiver_nodes=True,
use_sender_nodes=True,
use_globals=False,
name='edge_encoder_block'
)
self._node_encoder_block = blocks.NodeBlock(
node_model_fn=make_mlp_model,
use_received_edges=False,
use_sent_edges=False,
use_nodes=True,
use_globals=False,
name='node_encoder_block'
)
self._global_encoder_block = blocks.GlobalBlock(
global_model_fn=make_mlp_model,
use_edges=True,
use_nodes=True,
use_globals=False,
nodes_reducer=tf.math.unsorted_segment_sum,
edges_reducer=tf.math.unsorted_segment_sum,
name='global_encoder_block'
)
self._core = MLPGraphNetwork()
# self._core = InteractionNetwork(
# edge_model_fn=make_mlp_model,
# node_model_fn=make_mlp_model,
# reducer=tf.math.unsorted_segment_sum
# )
# # Transforms the outputs into appropriate shapes.
node_output_size = 64
node_fn =lambda: snt.Sequential([
snt.nets.MLP([node_output_size],
activation=tf.nn.relu, # default is relu
name='node_output')])
global_output_size = 1
global_fn = lambda: snt.Sequential([
snt.nets.MLP([global_output_size],
activation=tf.nn.relu, # default is relu
name='global_output'),
tf.sigmoid])
self._output_transform = modules.GraphIndependent(
edge_model_fn=None,
node_model_fn=node_fn,
global_model_fn=global_fn)
def __call__(self, input_op, max_nodes):
node_pred = []
global_pred = []
node_hidden_state = tf.zeros([1, LATENT_SIZE], dtype=tf.float32, name='initial_node_hidden')
latent = input_op
for inode in range(max_nodes):
# print("----loopping----", inode)
# print(latent.nodes.numpy())
# print(latent.n_node.numpy())
# print(latent.edges.numpy())
# print(latent.senders.numpy())
# print(latent.receivers.numpy())
nodes = latent.nodes
# encode nodes, edges and globals
# write separately for easily debugging
latent = self._node_encoder_block(latent)
latent = self._edge_block(latent)
latent = self._global_encoder_block(latent)
# message passing and output predictions
latent = self._core(latent)
latent = self._output_transform(latent)
node_embedding = self._node_linear(latent.nodes)
node_embedding = tf.math.reduce_sum(node_embedding, axis=0, keepdims=True, name='reduce_node_embedding')
# print("node embedding:", node_embedding.shape)
# print("node hiddent state:", node_hidden_state.shape)
node_embedding, node_hidden_state = self._node_rnn(node_embedding, node_hidden_state)
node_output = self._node_proper(node_embedding)
# save output for loss calculations
global_pred.append(latent.globals)
node_pred.append(tf.squeeze(node_output))
# update the graph by adding a new node with features as predicted
# construct a fully-connected graph
# n_node_tobe = [tf.add(latent.n_node[0], 1)]
n_nodes = tf.math.reduce_sum(latent.n_node)
n_nodes = tf.add(n_nodes, 1)
nodes_tobe = tf.concat([nodes, node_output], axis=0, name='add_new_node')
rng = tf.range(n_nodes)
receivers, senders = tf.meshgrid(rng, rng)
n_edge = n_nodes * n_nodes
ind = tf.cast(1 - tf.eye(n_nodes), bool)
receivers = tf.boolean_mask(receivers, ind)
senders = tf.boolean_mask(senders, ind)
n_edge -= n_nodes
receivers = tf.reshape(tf.cast(receivers, tf.int32), [n_edge])
senders = tf.reshape(tf.cast(senders, tf.int32), [n_edge])
edges = tf.ones([1, 1], dtype=tf.float32)
n_edge = tf.reshape(n_edge, [1])
n_node = tf.reshape(n_nodes, [1])
latent = latent.replace(nodes=nodes_tobe, n_node=n_node,\
n_edge=n_edge, edges=edges, senders=senders, receivers=receivers)
return node_pred, global_pred |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrapper around NVIDIA Tools Extension for profiling MONAI ignite workflow
"""
from typing import TYPE_CHECKING, Optional, Tuple, Union
from monai.config import IgniteInfo
from monai.utils import ensure_tuple, min_version, optional_import
_nvtx, _ = optional_import("torch._C._nvtx", descriptor="NVTX is not installed. Are you sure you have a CUDA build?")
if TYPE_CHECKING:
from ignite.engine import Engine, Events
else:
Engine, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine")
Events, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Events")
__all__ = ["RangeHandler", "RangePushHandler", "RangePopHandler", "MarkHandler"]
class RangeHandler:
"""
Attach a NVTX range to a pair of Ignite events.
It pushes an NVTX range at the first event and pops it at the second event.
Stores zero-based depth of the range that is started.
Args:
events: a string, pair of Ignite events, pair of Ignite event literals, or pair of Ignite events and literals.
If a single string is provided, it should describe the base name of a pair of default Ignite events
with _STARTED and _COMPLETED postfix (like "EPOCH" for Events.EPOCH_STARTED and Events.EPOCH_COMPLETED).
The accepted events are: BATCH, ITERATION, EPOCH, and ENGINE.
If pair of literals, each should be the literal equivalent of an Ignite event, fo instance:
("EPOCH_STARTED" and "EPOCH_COMPLETED").
One can combine events and literals, like (Events.EPOCH_STARTED and "EPOCH_COMPLETED").
For the complete list of Events,
check https://pytorch.org/ignite/generated/ignite.engine.events.Events.html.
msg: ASCII message to associate with range.
If not provided, the name of first event will be assigned to the NVTX range.
"""
def __init__(
self, events: Union[str, Tuple[Union[str, Events], Union[str, Events]]], msg: Optional[str] = None
) -> None:
self.events = self.resolve_events(events)
if msg is None:
if isinstance(events, str):
# assign the prefix of the events
msg = events
else:
# combine events' names
msg = "/".join([e.name for e in self.events])
self.msg = msg
self.depth = None
def resolve_events(self, events: Union[str, Tuple]) -> Tuple[Events, Events]:
"""
Resolve the input events to create a pair of Ignite events
"""
events = ensure_tuple(events)
if len(events) == 1:
return self.create_paired_events(events[0])
if len(events) == 2:
return (self.get_event(events[0]), self.get_event(events[1]))
raise ValueError(f"Exactly two Ignite events should be provided [received {len(events)}].")
def create_paired_events(self, event: str) -> Tuple[Events, Events]:
"""
Create pair of Ignite events from a event prefix name
"""
event = event.upper()
event_prefix = {"": "", "ENGINE": "", "EPOCH": "EPOCH_", "ITERATION": "ITERATION_", "BATCH": "GET_BATCH_"}
return (self.get_event(event_prefix[event] + "STARTED"), self.get_event(event_prefix[event] + "COMPLETED"))
def get_event(self, event: Union[str, Events]) -> Events:
return Events[event.upper()] if isinstance(event, str) else event
def attach(self, engine: Engine) -> None:
"""
Attach an NVTX Range to specific Ignite events
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
engine.add_event_handler(self.events[0], self.range_push)
engine.add_event_handler(self.events[1], self.range_pop)
def range_push(self):
self.depth = _nvtx.rangePushA(self.msg)
def range_pop(self):
_nvtx.rangePop()
class RangePushHandler:
"""
At a specific event, pushes a range onto a stack of nested range span.
Stores zero-based depth of the range that is started.
Args:
msg: ASCII message to associate with range
"""
def __init__(self, event: Union[str, Events], msg: Optional[str] = None) -> None:
self.event = Events[event.upper()] if isinstance(event, str) else event
if msg is None:
msg = self.event.name
self.msg = msg
self.depth = None
def attach(self, engine: Engine) -> None:
"""
Push an NVTX range at a specific Ignite event
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
engine.add_event_handler(self.event, self.range_push)
def range_push(self):
self.depth = _nvtx.rangePushA(self.msg)
class RangePopHandler:
"""
At a specific event, pop a previously pushed range.
Stores zero-based depth of the range that is started.
Args:
msg: ASCII message to associate with range
"""
def __init__(self, event: Union[str, Events]) -> None:
self.event = Events[event.upper()] if isinstance(event, str) else event
def attach(self, engine: Engine) -> None:
"""
Pop an NVTX range at a specific Ignite event
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
engine.add_event_handler(self.event, self.range_pop)
def range_pop(self):
_nvtx.rangePop()
class MarkHandler:
"""
Mark an instantaneous event that occurred at some point.
Args:
msg: ASCII message to associate with range
"""
def __init__(self, event: Union[str, Events], msg: Optional[str] = None) -> None:
self.event = Events[event.upper()] if isinstance(event, str) else event
if msg is None:
msg = self.event.name
self.msg = msg
def attach(self, engine: Engine) -> None:
"""
Add an NVTX mark to a specific Ignite event
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
engine.add_event_handler(self.event, self.mark)
def mark(self):
_nvtx.markA(self.msg)
|
#!/usr/bin/env python3
import os
from os import listdir
from os.path import isfile, join, isdir, basename, split
from azure.storage.blob import ContentSettings, BlobServiceClient, BlobClient, ContainerClient, __version__
from typing import List, Set, Dict, Tuple, Optional
import mimetypes
def help():
print("""
This will contain the help for this monster.
Following data is required:
* Connection string: AZURE_STORAGE_CONNECTION_STRING
* Container name
* Source directory
* Destination directory - default /
This is going to work only with the environment variables.
Main reason is that this is meant to be executed from the AKS or
Kubernetes cluster.
The source directory takes only the last part. E.g. if you have source directory:
/home/testuser/source
And destination directory is:
/
Then the destination directory structure will be:
/source
""")
def check_environment_variables():
mandatory = [
'AZURE_STORAGE_CONNECTION_STRING',
'SOURCE_DIRECTORY',
'AZURE_STORAGE_CONTAINER'
]
for variable in mandatory:
if variable not in os.environ:
print(f'The environment variable {variable} is missing.')
exit(1)
Files = List[str]
def get_files(path : str, current_list : Files = [] ) -> Files:
files = []
directories = [ path ]
for path in directories:
for item in listdir(path):
fullpath = join(path, item)
if isfile(fullpath):
files.append(fullpath)
elif isdir(fullpath):
directories.append(fullpath)
return files
def get_prefix(path : str ) -> str:
(path, endpart ) = split(path)
while not endpart and path:
(path, endpart ) = split(path)
return path
def upload_files(source_dir : str,
files : Files,
connection_string : str,
container : str,
destination : str,
cache_setting : str ):
path_end_position = len(get_prefix('.'+source_dir))
container_client = BlobServiceClient.from_connection_string(connection_string).get_container_client(container)
for file in files:
target_name=join(destination, file[path_end_position:])
mimetype = mimetypes.guess_type(file)[0]
print(f"Copying file {file} to {target_name} of type {mimetype}")
with open(file, "rb") as data:
blob_client = container_client.upload_blob(name=target_name,
data=data,
overwrite=True,
content_settings=ContentSettings(
content_type=mimetype,
cache_control = cache_setting)
)
def sanitize_destination(path : str) -> str:
""" Function removes the leading / characters. They're
messing up the directory structure.
"""
if not path:
path = ""
while len(path) > 0 and path[0] == '/':
path = path[1:]
return path
def main():
check_environment_variables()
connection_string = os.getenv('AZURE_STORAGE_CONNECTION_STRING')
source_dir = os.getenv("SOURCE_DIRECTORY")
contaner_name = os.getenv("AZURE_STORAGE_CONTAINER")
destination_directory = sanitize_destination( os.getenv("DESTINATION_DIRECTORY"))
cache_setting = os.getenv("CACHE_CONTROL")
blob_service_client = BlobServiceClient.from_connection_string(connection_string)
files = get_files(source_dir)
upload_files(source_dir, files, connection_string, contaner_name, destination_directory, cache_setting)
if __name__ == "__main__":
main() |
<reponame>alperkamil/csrl
"""
Omega-Automata
"""
from subprocess import check_output
import random
import numpy as np
import os
import re
import importlib
from itertools import chain, combinations
if importlib.util.find_spec('spot'):
import spot
else:
spot=None
class OmegaAutomaton:
"""Transforms the LTL formula to an omega-automaton (OA) and stores the specifications.
Attributes
----------
q0 : int
The initial state of the OA.
delta : list of dicts
The transition function of the OA. delta[q][label_set] is the number of the state that the OA makes a transition to when it consumes the label_set in the state q.
eps : list of lists
The epsilon-moves of the OA. epsilon_moves[q] is the set of states the OA can nondeterministically make a transition from state q.
acc : array, shape (n_qs,n_pairs)
The n_qs x n_pairs matrix that represents the accepting condition. If acc[q][i] is false then it means that q belongs to the first set of ith Rabin pair,
if it is true, then q belongs to the second set and if it is none q doesn't belong either of them. The Buchi condition is represented by a single Rabin pair.
shape : tuple
The pair of the number of the Rabin pairs and the number of states in the OA, i.e. : (n_pairs,n_qs)
spot_oa : spot.twa_graph
The spot twa_graph object of the OA for visualization.
Parameters
----------
ltl : str
The linear temporal logic (LTL) formula to be transformed to a OA.
oa_type : str
The type of the OA to be constructed. The default value is 'ldba'
"""
def __init__(self,ltl,oa_type='ldba'):
self.oa_type = oa_type
q0, delta, acc, eps, shape, spot_oa = self.ltl2oa(ltl)
self.q0 = q0
self.delta = delta
self.acc = acc
self.shape = shape
self.spot_oa = spot_oa
self.eps = eps
def ltl2oa(self,ltl):
"""Constructs and returns dictionaries and lists containing the specifications of an OA obtained by translation from the ltl property.
It parses the output of ltl2ldba or ltl2dra for the ltl formula and creates a objects that store the specification of the OA.
Parameters
----------
ltl : str
The linear temporal logic (LTL) formula to be transformed to a OA.
Returns
-------
out : (q0, delta, acc, eps, shape, spot_oa)
The tuple of the initial state q0, the list of dictionaries of transitions delta,
the list of dictionaries of the accepting transitions, the list of lists of epsilon-moves,
the pair of the number of the Rabin pairs and the number of states and the spot object of the OA.
"""
# Translate the LTL formula to an OA using Rabinizer 4.
out=check_output(['ltl2ldba', '-d', '-e', ltl] if self.oa_type == 'ldba' else ['ltl2dra', '-c', ltl])
# Split the output into two parts: the header and the body
header, body = out.decode('utf-8').split('--BODY--\n')
# Parse the initial state, the atomic propositions and the number of Rabin pairs
for line in header.splitlines():
if line.startswith('Start'):
q0 = int(line[7:]) # The initial state
elif line.startswith('AP'):
char_map = {i:c for i,c in enumerate(re.sub("[^\w]", " ", line[4:]).split()[1:])} # Maps ids to atomic propositions
ap_list = [tuple(ap) for ap in self.powerset(sorted(char_map.values()))] # The list of all subsets of AP.
elif line.startswith('Acceptance'):
n_pairs = int(line.split()[1])//2 # Zero for the Buchi condition
body_lines = body.splitlines()[:-1] # Ignore the last line
# Get the number of states
n_qs = 0 # The number of states
for line in reversed(body_lines): # Loop over all states because the states might not be ordered.
if line.startswith('State'):
n_qs = max(int(line[7:]),n_qs) # Get the maximum of them
n_qs += 2 # +1 because the index origin is 0 and +1 for the trap state
n_i = max(1,n_pairs) # Because n_pairs is zero for the Buchi condition
shape = n_i, n_qs
# The transition function delta[q][label] stores the next state The OA makes a transition when the it consumes 'label' at state 'q'.
delta = [{ap:n_qs-1 for ap in ap_list} for i in range(n_qs)] # The default target of a transition is the trap state whose index is n_qs-1
acc = [{ap:[None]*n_i for ap in ap_list} for i in range(n_qs)] # The default acceptance value is None, meaning the transition does not belong to any acceptance set.
eps = [[] for i in range(n_qs)] # The epsilon moves in the OA. eps[q] is the list of states can be reached from `q` by making an epsilon-transition.
# Parse the transitions, acceptance values
q=-1 # The state to be parsed
for line in body_lines:
if line.startswith('State'):
q = int(line[7:]) # Update the state to be parsed
else:
# Parse the transition into three parts
_, _label, _dst, _, _acc_set = re.findall('(\[(.*)\])? ?(\d+) ?(\{(.*)\})?',line)[0]
dst = int(_dst) # Get the destination
if not _label: # If there is no label then the transition is an epsilon-move
eps[q].append(dst)
else:
# Get the acceptance status of the transition
acc_set = set([int(a) for a in _acc_set.split()]) # The set of acceptance states that the transition belongs to
if not n_pairs: # acc_name == 'Buchi':
t_acc = [True if 0 in acc_set else None] # If it is an Buchi set, then it is True and None otherwise
else:
t_acc = [None]*n_pairs
for i in range(n_pairs): # For each Rabin pairs
if 2*i+1 in acc_set:
t_acc[i] = True # True if it belongs to the second set of the Rabin pair
if 2*i in acc_set:
t_acc[i] = False # False if it belongs to the first set of the Rabin pair
labels = ['']
_labels = re.compile('[()]').split(_label) # The transitions might have subformulas
for _l in _labels:
labels = [l+_ll for l in labels for _ll in _l.split('|')] # Add all the combinations
for label in labels:
if label == 't': # Means all the transitions
label_acc, label_rej = set(()), set(())
else:
ls = list(filter(None,re.compile('[\s&]').split(label))) # Get the atoms
label_acc = set([char_map[int(l)] for l in ls if not l.startswith('!')]) # Transitions having these atoms
label_rej = set([char_map[int(l[1:])] for l in ls if l.startswith('!')]) # Transitions that doesn't have these
for ap in delta[q]: # Find all the matching transitions
# If matches, update the transition properties
if not(label_acc-set(ap)) and (label_rej-set(ap))==label_rej:
delta[q][ap] = dst
acc[q][ap] = t_acc
# Construct a spot object for visualization
if spot:
filename = self.random_hoa_filename()
with open(filename,'wb') as f:
f.write(check_output(['ltl2ldba', '-d', ltl] if self.oa_type == 'ldba' else ['ltl2dra', '-c', ltl]))
spot.setup()
spot_oa = spot.automaton(filename)
spot_oa.merge_edges() # For better visualization
os.remove(filename)
else:
spot_oa=None
return q0, delta, acc, eps, shape, spot_oa
def powerset(self,a):
"""Returns the power set of the given list.
Parameters
----------
a : list
The input list.
Returns
-------
out: str
The power set of the list.
"""
return chain.from_iterable(combinations(a, k) for k in range(len(a)+1))
def _repr_html_(self,show=None):
"""Returns the string of svg representation of the OA within div tags to plot in a Jupyter notebook.
Returns
-------
out: str
The string of svg representation of the OA within div tags.
"""
if spot:
return '<div>%s</div>' % self.spot_oa.show(show)._repr_svg_()
def random_hoa_filename(self):
"""Returns a random file name.
Returns
-------
filename: str
A random file name.
"""
filename = 'temp_%032x.hoa' % random.getrandbits(128)
while os.path.isfile(filename):
filename = 'temp_%032x.hoa' % random.getrandbits(128)
return filename |
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for inferring and representing seasonality."""
import collections
import enum
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import prefer_static as ps
class SeasonTypes(enum.Enum):
SECOND_OF_MINUTE = 0,
MINUTE_OF_HOUR = 1,
HOUR_OF_DAY = 2,
DAY_OF_WEEK = 3
MONTH_OF_YEAR = 4
SeasonConversion = collections.namedtuple(
'SeasonConversion', ['num', 'duration'])
_SEASONAL_PROTOTYPES = collections.OrderedDict({
SeasonTypes.SECOND_OF_MINUTE: SeasonConversion(num=60, duration=1),
SeasonTypes.MINUTE_OF_HOUR: SeasonConversion(num=60, duration=60),
SeasonTypes.HOUR_OF_DAY: SeasonConversion(num=24, duration=3600),
SeasonTypes.DAY_OF_WEEK: SeasonConversion(num=7, duration=86400)
})
def create_seasonal_structure(frequency, num_steps, min_cycles=2):
"""Creates a set of suitable seasonal structures for a time series.
Args:
frequency: a Pandas `pd.DateOffset` instance.
num_steps: Python `int` number of steps at the given frequency.
min_cycles: Python `int` minimum number of cycles to include an effect.
Returns:
A dictionary of SeasonConversion instances representing different
seasonal components.
Example 1: For data.index.freq: pd.DateOffset(hours=1)
Seasonal components:
{
SeasonTypes.HOUR_OF_DAY: SeasonConversion(num=24, duration=1),
SeasonTypes.DAY_OF_WEEK: SeasonConversion(num=7, duration=24)
}
Example 2: For data.index.freq: pd.DateOffset(seconds=30)
Seasonal components:
{
SeasonTypes.SECOND_OF_MINUTE: SeasonConversion(num=2, duration=1),
SeasonTypes.MINUTE_OF_HOUR: SeasonConversion(num=60, duration=2),
SeasonTypes.HOUR_OF_DAY: SeasonConversion(num=24, duration=120),
SeasonTypes.DAY_OF_WEEK: SeasonConversion(num=7, duration=2880)
}
If the frequency is N times per year, for integer 2 <= N <= 12 (e.g.,
12 for monthly or 4 for quarterly), then a fixed structure of (N, 1)
will be created.
"""
num_periods = periods_per_year(frequency)
if num_periods is not None:
# Fixed structure for monthly or quarterly data.
return {
SeasonTypes.MONTH_OF_YEAR: SeasonConversion(num=num_periods, duration=1)
}
# Compute seasonal components by cycling through _SEASONAL_PROTOTYPES and
# filter out None components.
components = { # pylint: disable=g-complex-comprehension
k: make_component(v,
frequency=frequency,
num_steps=num_steps,
min_cycles=min_cycles)
for k, v in _SEASONAL_PROTOTYPES.items()}
return {k: v for (k, v) in components.items() if v is not None}
def make_component(season_tuple, frequency, num_steps, min_cycles=2):
"""Make a seasonal component from a template component.
This is a helper function to the _create_seasonal_structure() method. It
takes a SeasonConversion instance from _SEASONAL_PROTOTYPES and
creates a seasonal component based on the number of observations
`num_steps` in the data and the time series frequency `freq_sec`. A
custom seasonal component is created if it fulfills 4 conditions:
Condition 1: time series must cover at least _MIN_CYCLES full cycles.
Condition 2: full cycle must be a multiple of the granularity.
Condition 3: if the season is longer than the granularity, it must be a
multiple of the granularity.
Condition 4: number of seasons must be greater than 1.
Args:
season_tuple: an `SeasonConversion` instance the number of
seasons, and season duration for a template seasonal component e.g.
(60, 1) for seconds-of-minute or (60, 60) for minute-of-hour.
See _SEASONAL_PROTOTYPES for more details.
frequency: a `pd.DateOffset` instance.
num_steps: Python `int` number of steps at the given frequency.
min_cycles: Python `int` minimum number of cycles to include an effect.
Returns:
An `SeasonConversion` instance, where num and duration
is the inferred structure for the seasonal component. If a seasonal
component can not be created it returns None for that component.
"""
freq_sec = freq_to_seconds(frequency)
if not freq_sec:
return None
num_seasons = season_tuple.num
duration_seconds = season_tuple.duration
# None component returned if no component can be created below.
component = None
# Condition 1: time series must cover at least _MIN_CYCLES full cycles.
minimum_observations = ((num_seasons * duration_seconds * min_cycles) /
freq_sec)
cond1 = num_steps >= minimum_observations
# Condition 2: full cycle must be a multiple of the granularity.
cond2 = (num_seasons * duration_seconds) % freq_sec == 0
# Condition 3: if the season is longer than the granularity, it must be a
# multiple of the granularity.
cond3 = ((duration_seconds <= freq_sec) or
(duration_seconds % freq_sec == 0))
if cond1 and cond2 and cond3:
nseasons = min(num_seasons * duration_seconds /
freq_sec, num_seasons)
season_duration = max(duration_seconds / freq_sec, 1)
# Condition 4: number of seasons must be greater than 1.
cond4 = ((nseasons > 1) and (nseasons <= num_seasons))
if cond4:
component = SeasonConversion(
num=int(nseasons),
duration=int(season_duration))
return component
def _design_matrix_for_one_seasonal_effect(num_steps, duration, period, dtype):
current_period = np.int32(np.arange(num_steps) / duration) % period
return np.transpose([
ps.where(current_period == p, # pylint: disable=g-complex-comprehension
ps.ones([], dtype=dtype),
ps.zeros([], dtype=dtype))
for p in range(period)])
def build_fixed_effects(num_steps,
seasonal_structure=None,
covariates=None,
dtype=tf.float32):
"""Builds a design matrix treating seasonality as fixed-effects regression."""
if seasonal_structure is None:
seasonal_structure = {}
if seasonal_structure:
design_matrix = ps.concat(
[
_design_matrix_for_one_seasonal_effect(
num_steps, seasonal_effect.duration, seasonal_effect.num, dtype)
for seasonal_effect in seasonal_structure.values()
], axis=-1)
else:
design_matrix = ps.ones([num_steps, 1], dtype=dtype)
if covariates:
design_matrix = ps.concat(
[design_matrix] +
[tf.convert_to_tensor(x)[..., :num_steps, :] for x in covariates],
axis=-1)
return design_matrix
def freq_to_seconds(freq):
"""Converts time series DateOffset frequency to seconds."""
if not freq:
return None
if not is_fixed_duration(freq):
return None
freq_secs = 0.
for kwds_unit, kwds_value in freq.kwds.items():
switch_to_seconds = {
'weeks': kwds_value * 60 * 60 * 24 * 7,
'days': kwds_value * 60 * 60 * 24,
'hours': kwds_value * 60 * 60,
'minutes': kwds_value * 60,
'seconds': kwds_value
}
freq_secs += switch_to_seconds[kwds_unit]
return freq_secs
def periods_per_year(frequency):
"""Counts number of steps that equal a year, if defined and 2 <= N <= 12."""
# pylint: disable=unused-import,g-import-not-at-top
import pandas as pd # Defer import to avoid a package-level Pandas dep.
# pylint: enable=unused-import,g-import-not-at-top
if is_fixed_duration(frequency):
return None # No fixed duration divides both leap and non-leap years.
start = pd.Timestamp('1900-01-01')
# Align the start date with any constraints imposed by the frequency, e.g.,
# `pd.offsets.MonthEnd()`.
start = (start + frequency) - frequency
end = start + pd.DateOffset(years=1)
for num_steps in range(2, 13):
if start + num_steps * frequency == end:
return num_steps
return None
def is_fixed_duration(frequency):
"""Determines if a `pd.DateOffset` represents a fixed number of seconds."""
# pylint: disable=unused-import,g-import-not-at-top
import pandas as pd # Defer import to avoid a package-level Pandas dep.
# pylint: enable=unused-import,g-import-not-at-top
# Most Pandas offsets define `self.nanos` if and only if they are
# fixed-duration (this is checked below), but `pd.DateOffset` doesn't do
# this for some reason, so handle this case explicitly.
if type(frequency) == pd.DateOffset: # pylint: disable=unidiomatic-typecheck
if frequency.kwds.get('months', 0) != 0:
return False
if frequency.kwds.get('years', 0) != 0:
return False
return True
# Handle custom frequencies like `pd.offsets.MonthsEnd()`.
try:
frequency.nanos
except ValueError:
return False
return True
|
<reponame>sohwaje/oci-ansible-collection<gh_stars>0
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_database_migration_job_actions
short_description: Perform actions on a Job resource in Oracle Cloud Infrastructure
description:
- Perform actions on a Job resource in Oracle Cloud Infrastructure
- "For I(action=abort), note: Deprecated. Use the new resource model APIs instead.
Aborts a Migration Job (either Evaluation or Migration)."
- "For I(action=resume), note: Deprecated. Use the new resource model APIs instead.
Resume a migration Job."
version_added: "2.9.0"
author: Oracle (@oracle)
options:
job_id:
description:
- The OCID of the job
type: str
aliases: ["id"]
required: true
wait_after:
description:
- Name of a migration phase. The Job will wait after executing this
phase until Resume Job endpoint is called again.
- Applicable only for I(action=resume).
type: str
choices:
- "ODMS_VALIDATE_TGT"
- "ODMS_VALIDATE_SRC"
- "ODMS_VALIDATE_GG_HUB"
- "ODMS_VALIDATE_DATAPUMP_SETTINGS"
- "ODMS_VALIDATE_DATAPUMP_SETTINGS_SRC"
- "ODMS_VALIDATE_DATAPUMP_SETTINGS_TGT"
- "ODMS_VALIDATE"
- "ODMS_PREPARE"
- "ODMS_INITIAL_LOAD_EXPORT"
- "ODMS_DATA_UPLOAD"
- "ODMS_INITIAL_LOAD_IMPORT"
- "ODMS_POST_INITIAL_LOAD"
- "ODMS_PREPARE_REPLICATION_TARGET"
- "ODMS_MONITOR_REPLICATION_LAG"
- "ODMS_SWITCHOVER"
- "ODMS_CLEANUP"
action:
description:
- The action to perform on the Job.
type: str
required: true
choices:
- "abort"
- "resume"
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Perform action abort on job
oci_database_migration_job_actions:
job_id: "ocid1.job.oc1..xxxxxxEXAMPLExxxxxx"
action: abort
- name: Perform action resume on job
oci_database_migration_job_actions:
job_id: "ocid1.job.oc1..xxxxxxEXAMPLExxxxxx"
action: resume
"""
RETURN = """
job:
description:
- Details of the Job resource acted upon by the current operation
returned: on success
type: complex
contains:
id:
description:
- The OCID of the Migration Job.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- Name of the job.
returned: on success
type: str
sample: display_name_example
migration_id:
description:
- The OCID of the Migration that this job belongs to.
returned: on success
type: str
sample: "ocid1.migration.oc1..xxxxxxEXAMPLExxxxxx"
type:
description:
- The job type.
returned: on success
type: str
sample: EVALUATION
time_created:
description:
- The time the Migration Job was created. An RFC3339 formatted datetime string
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- The time the Migration Job was last updated. An RFC3339 formatted datetime string
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
progress:
description:
- ""
returned: on success
type: complex
contains:
current_status:
description:
- Current status of the job.
returned: on success
type: str
sample: PENDING
current_phase:
description:
- Current phase of the job.
returned: on success
type: str
sample: ODMS_VALIDATE_TGT
phases:
description:
- List of phase status for the job.
returned: on success
type: complex
contains:
name:
description:
- Phase name
returned: on success
type: str
sample: ODMS_VALIDATE_TGT
status:
description:
- Phase status
returned: on success
type: str
sample: PENDING
duration_in_ms:
description:
- Duration of the phase in milliseconds
returned: on success
type: int
sample: 56
progress:
description:
- Percent progress of job phase.
returned: on success
type: int
sample: 56
unsupported_objects:
description:
- Database objects not supported.
returned: on success
type: complex
contains:
type:
description:
- Type of unsupported object
returned: on success
type: str
sample: GOLDEN_GATE
owner:
description:
- Owner of the object (regular expression is allowed)
returned: on success
type: str
sample: owner_example
object_name:
description:
- Name of the object (regular expression is allowed)
returned: on success
type: str
sample: object_name_example
lifecycle_state:
description:
- The current state of the migration job.
returned: on success
type: str
sample: ACCEPTED
lifecycle_details:
description:
- A message describing the current state in more detail. For example, can be used to provide actionable information
for a resource in Failed state.
returned: on success
type: str
sample: lifecycle_details_example
freeform_tags:
description:
- "Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\\"bar-key\\": \\"value\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
system_tags:
description:
- "Usage of system tag keys. These predefined keys are scoped to namespaces.
Example: `{\\"orcl-cloud\\": {\\"free-tier-retained\\": \\"true\\"}}`"
returned: on success
type: dict
sample: {}
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"migration_id": "ocid1.migration.oc1..xxxxxxEXAMPLExxxxxx",
"type": "EVALUATION",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"progress": {
"current_status": "PENDING",
"current_phase": "ODMS_VALIDATE_TGT",
"phases": [{
"name": "ODMS_VALIDATE_TGT",
"status": "PENDING",
"duration_in_ms": 56,
"progress": 56
}]
},
"unsupported_objects": [{
"type": "GOLDEN_GATE",
"owner": "owner_example",
"object_name": "object_name_example"
}],
"lifecycle_state": "ACCEPTED",
"lifecycle_details": "lifecycle_details_example",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"system_tags": {}
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.database_migration import DatabaseMigrationClient
from oci.database_migration.models import ResumeJobDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class JobActionsHelperGen(OCIActionsHelperBase):
"""
Supported actions:
abort
resume
"""
@staticmethod
def get_module_resource_id_param():
return "job_id"
def get_module_resource_id(self):
return self.module.params.get("job_id")
def get_get_fn(self):
return self.client.get_job
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_job, job_id=self.module.params.get("job_id"),
)
def abort(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.abort_job,
call_fn_args=(),
call_fn_kwargs=dict(job_id=self.module.params.get("job_id"),),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
def resume(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, ResumeJobDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.resume_job,
call_fn_args=(),
call_fn_kwargs=dict(
job_id=self.module.params.get("job_id"),
resume_job_details=action_details,
),
waiter_type=oci_wait_utils.LIFECYCLE_STATE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
JobActionsHelperCustom = get_custom_class("JobActionsHelperCustom")
class ResourceHelper(JobActionsHelperCustom, JobActionsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=True
)
module_args.update(
dict(
job_id=dict(aliases=["id"], type="str", required=True),
wait_after=dict(
type="str",
choices=[
"ODMS_VALIDATE_TGT",
"ODMS_VALIDATE_SRC",
"ODMS_VALIDATE_GG_HUB",
"ODMS_VALIDATE_DATAPUMP_SETTINGS",
"ODMS_VALIDATE_DATAPUMP_SETTINGS_SRC",
"ODMS_VALIDATE_DATAPUMP_SETTINGS_TGT",
"ODMS_VALIDATE",
"ODMS_PREPARE",
"ODMS_INITIAL_LOAD_EXPORT",
"ODMS_DATA_UPLOAD",
"ODMS_INITIAL_LOAD_IMPORT",
"ODMS_POST_INITIAL_LOAD",
"ODMS_PREPARE_REPLICATION_TARGET",
"ODMS_MONITOR_REPLICATION_LAG",
"ODMS_SWITCHOVER",
"ODMS_CLEANUP",
],
),
action=dict(type="str", required=True, choices=["abort", "resume"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="job",
service_client_class=DatabaseMigrationClient,
namespace="database_migration",
)
result = resource_helper.perform_action(module.params.get("action"))
module.exit_json(**result)
if __name__ == "__main__":
main()
|
<reponame>beevans/integrated-manager-for-lustre<filename>chroma_api/log.py
# Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
from chroma_core.lib.util import normalize_nid
from chroma_api.utils import DateSerializer
from tastypie import fields
from chroma_api.authentication import AnonymousAuthentication, PatchedDjangoAuthorization
from chroma_core.models.log import LogMessage, MessageClass
from chroma_api.chroma_model_resource import ChromaModelResource
class LogAuthorization(PatchedDjangoAuthorization):
"""
custom authorization class for log retrieval
Only users in the superusers and filesystem_administrators groups are
allowed to retrieve non-Lustre messages
"""
def read_list(self, object_list, bundle):
request = bundle.request
if (
request.user.is_authenticated()
and request.user.groups.filter(name__in=["filesystem_administrators", "superusers"]).exists()
):
return object_list
else:
# Lustre messages have a leading space
return object_list.filter(message_class__in=[MessageClass.LUSTRE, MessageClass.LUSTRE_ERROR])
class LogResource(ChromaModelResource):
"""
syslog messages collected by the manager server.
"""
substitutions = fields.ListField(
null=True,
help_text="List of dictionaries describing substrings which "
"may be used to decorate the 'message' attribute by adding "
"hyperlinks. Each substitution has `start`, `end`, `label` "
"and `resource_uri` attributes.",
)
message_class = fields.CharField(
attribute="message_class",
help_text="Unicode string. One of %s" % MessageClass.strings(),
enumerations=MessageClass.strings(),
)
def dehydrate_substitutions(self, bundle):
return self._substitutions(bundle.obj)
class Meta:
queryset = LogMessage.objects.all()
filtering = {
"fqdn": ChromaModelResource.ALL_FILTER_STR,
"datetime": ChromaModelResource.ALL_FILTER_DATE,
"message": ChromaModelResource.ALL_FILTER_STR,
"message_class": ChromaModelResource.ALL_FILTER_ENUMERATION,
"tag": ChromaModelResource.ALL_FILTER_STR,
}
serializer = DateSerializer()
authorization = LogAuthorization()
authentication = AnonymousAuthentication()
ordering = ["datetime", "fqdn"]
list_allowed_methods = ["get"]
detail_allowed_methods = ["get"]
def dehydrate_message_class(self, bundle):
return MessageClass.to_string(bundle.obj.message_class)
def build_filters(self, filters=None, **kwargs):
# TODO: make the UI filter on FQDN to avoid the need for this mangling
host_id = filters.get("host_id", None)
if host_id is not None:
del filters["host_id"]
from chroma_core.models import ManagedHost
host = ManagedHost.objects.get(id=host_id)
filters["fqdn"] = host.fqdn
if "message_class__in" in filters:
msg_classes = filters.get("message_class__in")
if isinstance(msg_classes, basestring):
msg_classes = [msg_classes]
filters.set("message_class__in", [MessageClass.from_string(s).__str__() for s in msg_classes])
if "message_class" in filters:
filters["message_class"] = MessageClass.from_string(filters["message_class"])
return super(LogResource, self).build_filters(filters, **kwargs)
def _substitutions(self, obj):
message = obj.message
from chroma_api import api_log
from chroma_api.urls import api
from chroma_core.models import ManagedHost, ManagedTarget
import re
substitutions = []
def substitute(obj, match, group=1):
resource_uri = api.get_resource_uri(obj)
substitutions.append(
{
"start": match.start(group),
"end": match.end(group),
"label": obj.get_label(),
"resource_uri": resource_uri,
}
)
# TODO: detect other NID types (cray?)
nid_regex = re.compile("(\d{1,3}\.){3}\d{1,3}@(tcp|ib)(_\d+)?")
target_regex = re.compile("[^\w](\w{1,8}-(MDT|OST)[\da-f]{4})")
for match in nid_regex.finditer(message):
nid = match.group(0)
nid = normalize_nid(nid)
try:
host = ManagedHost.get_by_nid(nid)
except ManagedHost.DoesNotExist:
api_log.warn("No host has NID %s" % nid)
continue
except ManagedHost.MultipleObjectsReturned:
api_log.warn("Multiple hosts have NID %s" % nid)
continue
if host.state != "removed":
substitute(host, match, 0)
for match in target_regex.finditer(message):
target_name = match.group(1)
for target in ManagedTarget.objects.filter(name=target_name)[:1]:
substitute(target, match)
return sorted(substitutions, key=lambda sub: sub["start"])
|
# Big Data, Xarxes Neuronals i Màrqueting: la clau de l'èxit?
# Treball de recerca (TR)
# <NAME> - <NAME>
#
#
#
# Copyright (c) 2021, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY MARC VERGÉS ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.callbacks import EarlyStopping
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import instaloader
from contrasenyes import usuari, contrasenya
def profile_preferences_to_NN(user):
L = instaloader.Instaloader()
L.login(usuari, contrasenya)
list_to_append_csv = []
none = 0
creators_celebrities = 0
personal_gods = 0
local_events = 0
professional_services = 0
restaurants = 0
non_profits = 0
general_interest = 0
publishers = 0
transportation_and_accomodation = 0
business_and_utility = 0
home_services = 0
auto_dealers = 0
food_and_personal_goods = 0
government_agencies = 0
content_apps = 0
grocery = 0
entities = 0
lifestyle_services = 0
geography = 0
profile = instaloader.Profile.from_username(L.context, user)
preferences = []
for followee in profile.get_followees():
preferences.append(followee.business_category_name)
print(followee.username + " - " + str(followee.business_category_name))
if followee.business_category_name == "None":
none += 1
if followee.business_category_name == "Creators & Celebrities":
creators_celebrities += 1
if followee.business_category_name == "Personal Goods & General Merchandise Stores":
personal_gods += 1
if followee.business_category_name == "Local Events":
local_events += 1
if followee.business_category_name == "Professional Services":
professional_services += 1
if followee.business_category_name == "Restaurants":
restaurants += 1
if followee.business_category_name == "Non-Profits & Religious Organizations":
non_profits += 1
if followee.business_category_name == "General Interest":
general_interest += 1
if followee.business_category_name == "Publishers":
publishers += 1
if followee.business_category_name == "Transportation & Accomodation Services":
transportation_and_accomodation += 1
if followee.business_category_name == "Business & Utility Services":
business_and_utility += 1
if followee.business_category_name == "Home Services":
home_services += 1
if followee.business_category_name == "Auto Dealers":
auto_dealers += 1
if followee.business_category_name == "Food & Personal Goods":
food_and_personal_goods += 1
if followee.business_category_name == "Government Agencies":
government_agencies += 1
if followee.business_category_name == "Content & Apps":
content_apps += 1
if followee.business_category_name == "Grocery & Convenience Stores":
grocery += 1
if followee.business_category_name == "Entities":
entities += 1
if followee.business_category_name == "Lifestyle Services":
lifestyle_services += 1
if followee.business_category_name == "Geography":
geography += 1
print(preferences)
print("None: " + str(none))
print("Creators & Celebrities: " + str(creators_celebrities))
print("Personal Goods & General Merchandise Stores: " + str(personal_gods))
print("Local Events: " + str(local_events))
print("Professional Services: " + str(professional_services))
print("Restaurants: " + str(restaurants))
print("Non-Profits & Religious Organizations: " + str(non_profits))
print("General Interest: " + str(general_interest))
print("Publishers: " + str(publishers))
print("Transportation & Accomodation Services: " + str(transportation_and_accomodation))
print("Business & Utility Services: " + str(business_and_utility))
print("Home Services: " + str(home_services))
print("Auto Dealers: " + str(auto_dealers))
print("Food & Personal Goods: " + str(food_and_personal_goods))
print("Government Agencies: " + str(government_agencies))
print("Content & Apps: " + str(content_apps))
print("Grocery & Convenience Stores: " + str(grocery))
print("Entities: " + str(entities))
print("Lifestyle Services: " + str(lifestyle_services))
print("Geography: " + str(geography))
followers = 0
following = 0
for follower in profile.get_followers():
followers += 1
for follower in profile.get_followees():
following += 1
return preferences
def neural_network(list):
# url = 'https://gist.githubusercontent.com/curran/a08a1080b88344b0c8a7/raw/639388c2cbc2120a14dcf466e85730eb8be498bb/iris.csv'
df = pd.read_csv("data_set3.csv")
df = df.sample(frac=1).reset_index(drop=True)
Y = df['Tematica']
print(Y) # output
X = df.drop(['Tematica'], axis=1)
print(X) # input o dataset
print(X.shape)
print(Y.shape)
X = np.array(X)
Y.head()
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
dummy_y = np_utils.to_categorical(encoded_Y, 10)
print(encoded_Y)
print(dummy_y)
model = Sequential()
model.add(Dense(16, input_shape=(X.shape[1],), activation='relu')) # input shape is (features,)
model.add(Dense(16, input_shape=(X.shape[1],), activation='relu')) # input shape is (features,)
model.add(Dense(10, activation='softmax'))
model.summary()
# compile the model
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
# this is different instead of binary_crossentropy (for regular classification)
metrics=['accuracy'])
es = keras.callbacks.EarlyStopping(monitor='val_loss',
mode='min',
patience=10,
restore_best_weights=True) # important - otherwise you just return the last weigths...
'''
# now we just update our model fit call
history = model.fit(X,
dummy_y,
callbacks=[es],
epochs=200, # you can set this to a big number!
batch_size=1,
shuffle=True,
validation_split=0.2,
verbose=1)
es = keras.callbacks.EarlyStopping(monitor='val_loss',
mode='min',
patience=10,
restore_best_weights=True) # important - otherwise you just return the last weigths...
'''
# now we just update our model fit call
history = model.fit(X,
dummy_y,
callbacks=[es],
epochs=50, # you can set this to a big number!
batch_size=2,
shuffle=True,
validation_split=0.2,
verbose=1)
history_dict = history.history
# learning curve
# accuracy
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
# loss
loss = history_dict['loss']
val_loss = history_dict['val_loss']
# range of X (no. of epochs)
epochs = range(1, len(acc) + 1)
# plot
# "r" is for "solid red line"
plt.plot(epochs, acc, 'r', label='Training accuracy')
# b is for "solid blue line"
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
preds = model.predict(X) # see how the model did!
print(preds[0]) # i'm spreading that prediction across three nodes and they sum to 1
print(np.sum(preds[0])) # sum it up! Should be 1
## [9.9999988e-01 1.3509347e-07 6.7064638e-16]
## 1.0
# Almost a perfect prediction
# actual is left, predicted is top
# names can be found by inspecting Y
matrix = confusion_matrix(dummy_y.argmax(axis=1), preds.argmax(axis=1))
matrix
## array([[50, 0, 0],
## [ 0, 46, 4],
## [ 0, 1, 49]])
# more detail on how well things were predicted
print(classification_report(dummy_y.argmax(axis=1), preds.argmax(axis=1)))
model.predict(list, batch_size=1, verbose=1)
|
#
# This file is part of LiteHyperBus
#
# Copyright (c) 2019 <NAME> <<EMAIL>>
# Copyright (c) 2019-2021 <NAME> <<EMAIL>>
# Copyright (c) 2021 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
from migen import *
from migen.genlib.misc import timeline
from litex.build.io import DifferentialOutput
from litex.soc.interconnect import wishbone
# HyperRAM -----------------------------------------------------------------------------------------
class HyperRAM(Module):
"""HyperRAM
Provides a very simple/minimal HyperRAM core that should work with all FPGA/HyperRam chips:
- FPGA vendor agnostic.
- no setup/chip configuration (use default latency).
This core favors portability and ease of use over performance.
"""
def __init__(self, pads, latency=6):
self.pads = pads
self.bus = bus = wishbone.Interface()
# # #
clk = Signal()
clk_phase = Signal(2)
cs = Signal()
ca = Signal(48)
ca_active = Signal()
sr = Signal(48)
dq = self.add_tristate(pads.dq) if not hasattr(pads.dq, "oe") else pads.dq
rwds = self.add_tristate(pads.rwds) if not hasattr(pads.rwds, "oe") else pads.rwds
dw = len(pads.dq) if not hasattr(pads.dq, "oe") else len(pads.dq.o)
assert dw in [8, 16]
# Drive rst_n, cs_n, clk from internal signals ---------------------------------------------
if hasattr(pads, "rst_n"):
self.comb += pads.rst_n.eq(1)
self.comb += pads.cs_n[0].eq(~cs)
assert len(pads.cs_n) <= 2
if len(pads.cs_n) == 2:
self.comb += pads.cs_n[1].eq(1)
if hasattr(pads, "clk"):
self.comb += pads.clk.eq(clk)
else:
self.specials += DifferentialOutput(clk, pads.clk_p, pads.clk_n)
# Clock Generation (sys_clk/4) -------------------------------------------------------------
self.sync += clk_phase.eq(clk_phase + 1)
cases = {}
cases[1] = clk.eq(cs) # Set pads clk on 90° (if cs is set)
cases[3] = clk.eq(0) # Clear pads clk on 270°
self.sync += Case(clk_phase, cases)
# Data Shift Register (for write and read) -------------------------------------------------
dqi = Signal(dw)
self.sync += dqi.eq(dq.i) # Sample on 90° and 270°
self.sync += [
If((clk_phase == 0) | (clk_phase == 2), # Shift on 0° and 180°
# During Command-Address, only D[7:0] are used
If(ca_active,
sr.eq(Cat(dqi[:8], sr[:-8]))
).Else(
sr.eq(Cat(dqi, sr[:-dw]))
)
)
]
self.comb += [
bus.dat_r.eq(sr), # To Wisbone
If(ca_active,
dq.o.eq(sr[-8:]), # To HyperRAM, 8-bits mode
).Else(
dq.o.eq(sr[-dw:]), # To HyperRAM, 16-bits mode
)
]
# Command generation -----------------------------------------------------------------------
self.comb += [
ca[47].eq(~self.bus.we), # R/W#
ca[45].eq(1), # Burst Type (Linear)
]
if dw == 8:
self.comb += [
ca[16:45].eq(self.bus.adr[2:]), # Row & Upper Column Address
ca[1:3].eq(self.bus.adr[0:]), # Lower Column Address
ca[0].eq(0), # Lower Column Address
]
else:
self.comb += [
ca[16:45].eq(self.bus.adr[3:]), # Row & Upper Column Address
ca[1:3].eq(self.bus.adr[1:]), # Lower Column Address
ca[0].eq(self.bus.adr[0]), # Lower Column Address
]
# Latency count starts from the middle of the command (it's where -4 comes from).
# In fixed latency mode (default), latency is 2*Latency count.
# Because we have 4 sys clocks per ram clock:
lat = (latency * 8) - 4
# Sequencer --------------------------------------------------------------------------------
# Command.
# --------
dt_seq = [
# DT, Action
(3, []),
(12, [cs.eq(1), dq.oe.eq(1), sr.eq(ca), ca_active.eq(1)]), # Command: 6 clk
(lat, [dq.oe.eq(0), ca_active.eq(0)]), # Latency
]
# Write/Read.
# -----------
rwdso = Signal(2)
self.comb += rwds.o.eq(rwdso)
if dw == 8:
dt_seq += [
(2, [dq.oe.eq(self.bus.we), # Write/Read data byte: 2 clk
sr[:16].eq(0),
sr[16:].eq(self.bus.dat_w),
rwds.oe.eq(self.bus.we),
rwdso[0].eq(~bus.sel[3])]),
(2, [rwdso[0].eq(~bus.sel[2])]), # Write/Read data byte: 2 clk
(2, [rwdso[0].eq(~bus.sel[1])]), # Write/Read data byte: 2 clk
(2, [rwdso[0].eq(~bus.sel[0])]), # Write/Read data byte: 2 clk
]
if dw == 16:
dt_seq += [
(2, [dq.oe.eq(self.bus.we), # Write/Read data byte: 2 clk
sr[:16].eq(0),
sr[16:].eq(self.bus.dat_w),
rwds.oe.eq(self.bus.we),
rwdso[1].eq(~bus.sel[3]),
rwdso[0].eq(~bus.sel[2])]),
(2, [rwdso[1].eq(~bus.sel[1]),
rwdso[0].eq(~bus.sel[0])]), # Write/Read data byte: 2 clk
]
# End.
# ----
dt_seq += [
(2, [cs.eq(0), rwds.oe.eq(0), dq.oe.eq(0)]),
(1, [bus.ack.eq(1)]),
(1, [bus.ack.eq(0)]),
(0, [])
]
# Convert delta-time sequencer to time sequencer
t_seq = []
t_seq_start = (clk_phase == 1)
t = 0
for dt, a in dt_seq:
t_seq.append((t, a))
t += dt
self.sync += timeline(bus.cyc & bus.stb & t_seq_start, t_seq)
def add_tristate(self, pad):
t = TSTriple(len(pad))
self.specials += t.get_tristate(pad)
return t
|
<gh_stars>1-10
import re
# The available regex functions in the Python re module fall into the following
# three categories:
# 1. Searching functions
# 2. Substitution functions
# 3. Utility functions
# Searching functions scan a search string for one or more matches of the
# specified regex:
# re.search(<regex>, <string>, flags=0) - Scans a string for a regex match
print(re.search(r'(\d+)', 'foo123bar'))
print(re.search(r'[a-zA-Z]+', '123FOO456'))
print(re.search(r'\d+', 'foo.bar'))
# The function returns a match object if it finds a match and None otherwise.
# re.match(<regex>, <string>, flags=0) - Looks for a regex match at the
# beginning of a string. This is identical to re.search(), except that
# re.search() returns a match if <regex> matches anywhere in <string>, whereas
# re.match() returns a match only if <regex> matches at the beginning of
# <string>
print()
print(re.match(r'\d+', '123foobar'))
print(re.match(r'\d+', 'foo123bar'))
# re.fullmatch(<regex>, <string>, flags=0) - Looks for a regex match on an
# entire string. This is similar to re.search() and re.match(), but
# re.fullmatch() returns a match only if <regex> matches <string> in its
# entirety
print()
print(re.fullmatch(r'\d+', '123foo'))
print(re.fullmatch(r'\d+', 'foo123'))
print(re.fullmatch(r'\d+', '123'))
print(re.search(r'^\d+$', '123'))
# The re.search() call, in which the \d+ regex is explicitly anchored at the
# start and end of the search string, is functionally equivalent
# re.findall(<regex>, <string>, flags=0) - Returns a list of all matches of a
# regex in a string. re.findall(<regex>, <string>) returns a list of all
# non-overlapping matches of <regex> in <string>. It scans the search string
# from left to right and returns all matches in the order found
print()
print(re.findall(r'\w+', ' foo,,,,bar:%$baz//|'))
# If <regex> contains a capturing group, then the return list contains only
# contents of the group, not the entire match
print(re.findall(r'#(\w+)#', '#foo#.#bar#.#baz#'))
# In this case, the specified regex is #(\w+)#. The matching strings are
# '#foo#', '#bar#', and '#baz#'. But the hash (#) characters don’t appear in the
# return list because they’re outside the grouping parentheses
print(re.findall(r'(\w+),(\w+)', 'foo,bar,baz,qux,quux,corge'))
# If <regex> contains more than one capturing group, then re.findall() returns a
# list of tuples containing the captured groups. The length of each tuple is
# equal to the number of groups specified
# re.finditer(<regex>, <string>, flags=0) - Returns an iterator that yields
# regex matches. Scans <string> for non-overlapping matches of <regex> and
# returns an iterator that yields the match objects from any it finds. It scans
# the search string from left to right and returns matches in the order it finds
# them
print()
for i in re.finditer(r'\w+', ' foo,,,,bar:%$baz//|'):
print(i)
# re.findall() returns a list, whereas re.finditer() returns an iterator.
# The items in the list that re.findall() returns are the actual matching
# strings, whereas the items yielded by the iterator that re.finditer() returns
# are match objects
# Substitution Functions replace portions of a search string that match a
# specified regex
# re.sub(<regex>, <repl>, <string>, count=0, flags=0) - Returns a new string
# that results from performing replacements on a search string. Finds the
# leftmost non-overlapping occurrences of <regex> in <string>, replaces each
# match as indicated by <repl>, and returns the result. <string> remains
# unchanged
print()
s = 'foo.123.bar.789.baz'
print(re.sub(r'\d+', '#', s))
print(re.sub('[a-z]+', '(*)', s))
# Substitution by Function - If you specify <repl> as a function, then re.sub()
# calls that function for each match found. It passes each corresponding match
# object as an argument to the function to provide information about the match.
# The function return value then becomes the replacement string
def f(match_obj):
s = match_obj.group(0) # The matching string
# s.isdigit() returns True if all characters in s are digits
if s.isdigit():
return str(int(s) * 10)
else:
return s.upper()
print()
print(re.sub(r'\w+', f, 'foo.10.bar.20.baz.30'))
# Limiting the Number of Replacements. If you specify a positive integer for the
# optional count parameter, then re.sub() performs at most that many
# replacements
print(re.sub(r'\w+', 'xxx', 'foo.bar.baz.qux', count=2))
# re.subn(<regex>, <repl>, <string>, count=0, flags=0) - Returns a new string
# that results from performing replacements on a search string and also returns
# the number of substitutions made
print()
print(re.subn(r'\w+', 'xxx', 'foo.bar.baz.qux'))
# Utility Functions - There are two remaining regex functions in the Python re
# module that you’ve yet to cover
# re.split(<regex>, <string>, maxsplit=0, flags=0) - Splits a string into
# substrings.
print()
print(re.split(r'\s*[,;/]\s*', 'foo,bar ; baz / qux corge'))
# splits the specified string into substrings delimited by a comma (,),
# semicolon (;), or slash (/) character, surrounded by any amount of whitespace
print(re.split(r'(\s*[,;/]\s*)', 'foo,bar ; baz / qux'))
# If <regex> contains capturing groups, then the return list includes the
# matching delimiter strings as well
print(re.split(r'(?:\s*[,;/]\s*)', 'foo,bar ; baz / qux'))
# If you need to use groups but don’t want the delimiters included in the
# return list, then you can use noncapturing groups
# If the optional maxsplit argument is present and greater than zero, then
# re.split() performs at most that many splits. The final element in the return
# list is the remainder of <string> after all the splits have occurred
# Explicitly specifying maxsplit=0 is equivalent to omitting it entirely. If
# maxsplit is negative, then re.split() returns <string> unchanged
# Refer: https://realpython.com/regex-python-part-2/#re-module-functions
|
<gh_stars>100-1000
import datetime
import math
import random
import gym
import numpy as np
from gym import spaces
from gym.utils import seeding
from stable_baselines3.common.vec_env import DummyVecEnv
from finrl_meta.env_fx_trading.util.log_render import render_to_file
from finrl_meta.env_fx_trading.util.plot_chart import TradingChart
from finrl_meta.env_fx_trading.util.read_config import EnvConfig
class tgym(gym.Env):
"""forex/future/option trading gym environment
1. Three action space (0 Buy, 1 Sell, 2 Nothing)
2. Multiple trading pairs (EURUSD, GBPUSD...) under same time frame
3. Timeframe from 1 min to daily as long as use candlestick bar (Open, High, Low, Close)
4. Use StopLose, ProfitTaken to realize rewards. each pair can configure it own SL and PT in configure file
5. Configure over night cash penalty and each pair's transaction fee and overnight position holding penalty
6. Split dataset into daily, weekly or monthly..., with fixed time steps, at end of len(df). The business
logic will force to Close all positions at last Close price (game over).
7. Must have df column name: [(time_col),(asset_col), Open,Close,High,Low,day] (case sensitive)
8. Addition indicators can add during the data process. 78 available TA indicator from Finta
9. Customized observation list handled in json config file.
10. ProfitTaken = fraction_action * max_profit_taken + SL.
11. SL is pre-fixed
12. Limit order can be configure, if limit_order == True, the action will preset buy or sell at Low or High of the bar,
with a limit_order_expiration (n bars). It will be triggered if the price go cross. otherwise, it will be drop off
13. render mode:
human -- display each steps realized reward on console
file -- create a transaction log
graph -- create transaction in graph (under development)
14.
15. Reward, we want to incentivize profit that is sustained over long periods of time.
At each step, we will set the reward to the account balance multiplied by
some fraction of the number of time steps so far.The purpose of this is to delay
rewarding the agent too fast in the early stages and allow it to explore
sufficiently before optimizing a single strategy too deeply.
It will also reward agents that maintain a higher balance for longer,
rather than those who rapidly gain money using unsustainable strategies.
16. Observation_space contains all of the input variables we want our agent
to consider before making, or not making a trade. We want our agent to “see”
the forex data points (Open price, High, Low, Close, time serial, TA) in the game window,
as well a couple other data points like its account balance, current positions,
and current profit.The intuition here is that for each time step, we want our agent
to consider the price action leading up to the current price, as well as their
own portfolio’s status in order to make an informed decision for the next action.
17. reward is forex trading unit Point, it can be configure for each trading pair
"""
metadata = {'render.modes': ['graph', 'human', 'file', 'none']}
def __init__(self, df, env_config_file='./neo_finrl/env_fx_trading/config/gdbusd-test-1.json') -> None:
assert df.ndim == 2
super(tgym, self).__init__()
self.cf = EnvConfig(env_config_file)
self.observation_list = self.cf.env_parameters("observation_list")
self.balance_initial = self.cf.env_parameters("balance")
self.over_night_cash_penalty = self.cf.env_parameters("over_night_cash_penalty")
self.asset_col = self.cf.env_parameters("asset_col")
self.time_col = self.cf.env_parameters("time_col")
self.random_start = self.cf.env_parameters("random_start")
self.log_filename = self.cf.env_parameters("log_filename") + datetime.datetime.now(
).strftime('%Y%m%d%H%M%S') + '.csv'
self.df = df
self.df["_time"] = df[self.time_col]
self.df["_day"] = df["weekday"]
self.assets = df[self.asset_col].unique()
self.dt_datetime = df[self.time_col].sort_values().unique()
self.df = self.df.set_index(self.time_col)
self.visualization = False
# --- reset value ---
self.equity_list = [0] * len(self.assets)
self.balance = self.balance_initial
self.total_equity = self.balance + sum(self.equity_list)
self.ticket_id = 0
self.transaction_live = []
self.transaction_history = []
self.transaction_limit_order = []
self.current_draw_downs = [0.0] * len(self.assets)
self.max_draw_downs = [0.0] * len(self.assets)
self.max_draw_down_pct = sum(self.max_draw_downs) / self.balance * 100
self.current_step = 0
self.episode = -1
self.current_holding = [0] * len(self.assets)
self.tranaction_open_this_step = []
self.tranaction_close_this_step = []
self.current_day = 0
self.done_information = ''
self.log_header = True
# --- end reset ---
self.cached_data = [
self.get_observation_vector(_dt) for _dt in self.dt_datetime
]
self.cached_time_serial = ((self.df[["_time", "_day"]].sort_values("_time")) \
.drop_duplicates()).values.tolist()
self.reward_range = (-np.inf, np.inf)
self.action_space = spaces.Box(low=0,
high=3,
shape=(len(self.assets),))
# first two 3 = balance,current_holding, max_draw_down_pct
_space = 3 + len(self.assets) \
+ len(self.assets) * len(self.observation_list)
self.observation_space = spaces.Box(low=-np.inf,
high=np.inf,
shape=(_space,))
print(
f'initial done:\n'
f'observation_list:{self.observation_list}\n '
f'assets:{self.assets}\n '
f'time serial: {min(self.dt_datetime)} -> {max(self.dt_datetime)} length: {len(self.dt_datetime)}'
)
self._seed()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _history_df(self, i):
pass
def _take_action(self, actions, done):
# action = math.floor(x),
# profit_taken = math.ceil((x- math.floor(x)) * profit_taken_max - stop_loss_max )
# _actions = np.floor(actions).astype(int)
# _profit_takens = np.ceil((actions - np.floor(actions)) *self.cf.symbol(self.assets[i],"profit_taken_max")).astype(int)
_action = 2
_profit_taken = 0
rewards = [0] * len(self.assets)
self.tranaction_open_this_step = []
self.tranaction_close_this_step = []
# need use multiply assets
for i, x in enumerate(actions):
self._o = self.get_observation(self.current_step, i, "Open")
self._h = self.get_observation(self.current_step, i, "High")
self._l = self.get_observation(self.current_step, i, "Low")
self._c = self.get_observation(self.current_step, i, "Close")
self._t = self.get_observation(self.current_step, i, "_time")
self._day = self.get_observation(self.current_step, i, "_day")
_action = math.floor(x)
rewards[i] = self._calculate_reward(i, done)
if self.cf.symbol(self.assets[i], "limit_order"):
self._limit_order_process(i, _action, done)
if _action in (0, 1) and not done \
and self.current_holding[i] < self.cf.symbol(self.assets[i], "max_current_holding"):
# generating PT based on action fraction
_profit_taken = math.ceil(
(x - _action) * self.cf.symbol(self.assets[i], "profit_taken_max")) + self.cf.symbol(self.assets[i],
"stop_loss_max")
self.ticket_id += 1
if self.cf.symbol(self.assets[i], "limit_order"):
transaction = {
"Ticket": self.ticket_id,
"Symbol": self.assets[i],
"ActionTime": self._t,
"Type": _action,
"Lot": 1,
"ActionPrice": self._l if _action == 0 else self._h,
"SL": self.cf.symbol(self.assets[i], "stop_loss_max"),
"PT": _profit_taken,
"MaxDD": 0,
"Swap": 0.0,
"CloseTime": "",
"ClosePrice": 0.0,
"Point": 0,
"Reward": -self.cf.symbol(self.assets[i], "transaction_fee"),
"DateDuration": self._day,
"Status": 0,
"LimitStep": self.current_step,
"ActionStep": -1,
"CloseStep": -1,
}
self.transaction_limit_order.append(transaction)
else:
transaction = {
"Ticket": self.ticket_id,
"Symbol": self.assets[i],
"ActionTime": self._t,
"Type": _action,
"Lot": 1,
"ActionPrice": self._c,
"SL": self.cf.symbol(self.assets[i], "stop_loss_max"),
"PT": _profit_taken,
"MaxDD": 0,
"Swap": 0.0,
"CloseTime": "",
"ClosePrice": 0.0,
"Point": 0,
"Reward": -self.cf.symbol(self.assets[i], "transaction_fee"),
"DateDuration": self._day,
"Status": 0,
"LimitStep": self.current_step,
"ActionStep": self.current_step,
"CloseStep": -1,
}
self.current_holding[i] += 1
self.tranaction_open_this_step.append(transaction)
self.balance -= self.cf.symbol(self.assets[i], "transaction_fee")
self.transaction_live.append(transaction)
return sum(rewards)
def _calculate_reward(self, i, done):
_total_reward = 0
_max_draw_down = 0
for tr in self.transaction_live:
if tr["Symbol"] == self.assets[i]:
_point = self.cf.symbol(self.assets[i], "point")
# cash discount overnight
if self._day > tr["DateDuration"]:
tr["DateDuration"] = self._day
tr["Reward"] -= self.cf.symbol(self.assets[i], "over_night_penalty")
if tr["Type"] == 0: # buy
# stop loss trigger
_sl_price = tr["ActionPrice"] - tr["SL"] / _point
_pt_price = tr["ActionPrice"] + tr["PT"] / _point
if done:
p = (self._c - tr["ActionPrice"]) * _point
self._manage_tranaction(tr, p, self._c, status=2)
_total_reward += p
elif self._l <= _sl_price:
self._manage_tranaction(tr, -tr["SL"], _sl_price)
_total_reward += -tr["SL"]
self.current_holding[i] -= 1
elif self._h >= _pt_price:
self._manage_tranaction(tr, tr["PT"], _pt_price)
_total_reward += tr["PT"]
self.current_holding[i] -= 1
else: # still open
self.current_draw_downs[i] = int(
(self._l - tr["ActionPrice"]) * _point)
_max_draw_down += self.current_draw_downs[i]
if (
self.current_draw_downs[i] < 0
and tr["MaxDD"] > self.current_draw_downs[i]
):
tr["MaxDD"] = self.current_draw_downs[i]
elif tr["Type"] == 1: # sell
# stop loss trigger
_sl_price = tr["ActionPrice"] + tr["SL"] / _point
_pt_price = tr["ActionPrice"] - tr["PT"] / _point
if done:
p = (tr["ActionPrice"] - self._c) * _point
self._manage_tranaction(tr, p, self._c, status=2)
_total_reward += p
elif self._h >= _sl_price:
self._manage_tranaction(tr, -tr["SL"], _sl_price)
_total_reward += -tr["SL"]
self.current_holding[i] -= 1
elif self._l <= _pt_price:
self._manage_tranaction(tr, tr["PT"], _pt_price)
_total_reward += tr["PT"]
self.current_holding[i] -= 1
else:
self.current_draw_downs[i] = int(
(tr["ActionPrice"] - self._h) * _point)
_max_draw_down += self.current_draw_downs[i]
if (
self.current_draw_downs[i] < 0
and tr["MaxDD"] > self.current_draw_downs[i]
):
tr["MaxDD"] = self.current_draw_downs[i]
if _max_draw_down > self.max_draw_downs[i]:
self.max_draw_downs[i] = _max_draw_down
return _total_reward
def _limit_order_process(self, i, _action, done):
for tr in self.transaction_limit_order:
if tr["Symbol"] == self.assets[i]:
if tr["Type"] != _action or done:
self.transaction_limit_order.remove(tr)
tr["Status"] = 3
tr["CloseStep"] = self.current_step
self.transaction_history.append(tr)
elif (tr["ActionPrice"] >= self._l and _action == 0) or (tr["ActionPrice"] <= self._h and _action == 1):
tr["ActionStep"] = self.current_step
self.current_holding[i] += 1
self.balance -= self.cf.symbol(self.assets[i], "transaction_fee")
self.transaction_limit_order.remove(tr)
self.transaction_live.append(tr)
self.tranaction_open_this_step.append(tr)
elif tr["LimitStep"] + self.cf.symbol(self.assets[i], "limit_order_expiration") > self.current_step:
tr["CloseStep"] = self.current_step
tr["Status"] = 4
self.transaction_limit_order.remove(tr)
self.transaction_history.append(tr)
def _manage_tranaction(self, tr, _p, close_price, status=1):
self.transaction_live.remove(tr)
tr["ClosePrice"] = close_price
tr["Point"] = int(_p)
tr["Reward"] = int(tr["Reward"] + _p)
tr["Status"] = status
tr["CloseTime"] = self._t
self.balance += int(tr["Reward"])
self.total_equity -= int(abs(tr["Reward"]))
self.tranaction_close_this_step.append(tr)
self.transaction_history.append(tr)
def step(self, actions):
# Execute one time step within the environment
self.current_step += 1
done = (self.balance <= 0
or self.current_step == len(self.dt_datetime) - 1)
if done:
self.done_information += f'Episode: {self.episode} Balance: {self.balance} Step: {self.current_step}\n'
self.visualization = True
reward = self._take_action(actions, done)
if self._day > self.current_day:
self.current_day = self._day
self.balance -= self.over_night_cash_penalty
if self.balance != 0:
self.max_draw_down_pct = abs(
sum(self.max_draw_downs) / self.balance * 100)
# no action anymore
obs = ([self.balance, self.max_draw_down_pct] +
self.current_holding +
self.current_draw_downs +
self.get_observation(self.current_step))
return np.array(obs).astype(np.float32), reward, done, {
"Close": self.tranaction_close_this_step
}
def get_observation(self, _step, _iter=0, col=None):
if (col is None):
return self.cached_data[_step]
if col == '_day':
return self.cached_time_serial[_step][1]
elif col == '_time':
return self.cached_time_serial[_step][0]
col_pos = -1
for i, _symbol in enumerate(self.observation_list):
if _symbol == col:
col_pos = i
break
assert col_pos >= 0
return self.cached_data[_step][_iter * len(self.observation_list) +
col_pos]
def get_observation_vector(self, _dt, cols=None):
cols = self.observation_list
v = []
for a in self.assets:
subset = self.df.query(
f'{self.asset_col} == "{a}" & {self.time_col} == "{_dt}"')
assert not subset.empty
v += subset.loc[_dt, cols].tolist()
assert len(v) == len(self.assets) * len(cols)
return v
def reset(self):
# Reset the state of the environment to an initial state
self.seed()
if self.random_start:
self.current_step = random.choice(
range(int(len(self.dt_datetime) * 0.5)))
else:
self.current_step = 0
self.equity_list = [0] * len(self.assets)
self.balance = self.balance_initial
self.total_equity = self.balance + sum(self.equity_list)
self.ticket_id = 0
self.transaction_live = []
self.transaction_history = []
self.transaction_limit_order = []
self.current_draw_downs = [0.0] * len(self.assets)
self.max_draw_downs = [0.0] * len(self.assets)
self.max_draw_down_pct = sum(self.max_draw_downs) / self.balance * 100
self.episode = -1
self.current_holding = [0] * len(self.assets)
self.tranaction_open_this_step = []
self.tranaction_close_this_step = []
self.current_day = 0
self.done_information = ''
self.log_header = True
self.visualization = False
_space = (
[self.balance, self.max_draw_down_pct] +
[0] * len(self.assets) +
[0] * len(self.assets) +
self.get_observation(self.current_step))
return np.array(_space).astype(np.float32)
def render(self, mode='human', title=None, **kwargs):
# Render the environment to the screen
if mode in ('human', 'file'):
printout = mode == 'human'
pm = {
"log_header": self.log_header,
"log_filename": self.log_filename,
"printout": printout,
"balance": self.balance,
"balance_initial": self.balance_initial,
"tranaction_close_this_step": self.tranaction_close_this_step,
"done_information": self.done_information
}
render_to_file(**pm)
if self.log_header: self.log_header = False
elif mode == 'graph' and self.visualization:
print('plotting...')
p = TradingChart(self.df, self.transaction_history)
p.plot()
def close(self):
pass
def get_sb_env(self):
e = DummyVecEnv([lambda: self])
obs = e.reset()
return e, obs
|
<gh_stars>1-10
#!/usr/bin/python3
import nltk
import os, argparse, json, re, math, statistics, sys
### from: http://www.aclweb.org/anthology/P89-1010.pdf
# How to calculate PMI:
# What is "mutual information"? According to [Fano (1961), p. 28], if
# two points (words), x and y, have probabilities P(x) and P(y), then
# their mutual information, I(x,y), is defined to be
# I(x,y) = log2 (P(x,y) / (P(x) P(y)))
# Informally, mutual information compares the probability of observing
# x and y together (the joint probability) with the probabilities of
# observing x and y independently (chance). If there is a genuine
# association between x and y, then the joint probability P(x,y) will be
# much larger than chance P(x) P(y), and consequently I(x,y) >> 0. If
# there is no interesting relationship between x and y, then P(x,y) ~
# P(x) P(y), and thus, I(x,y) ~ 0. If x and y are in complementary
# distribution, then P(x,y) will be much less than P(x) P(y), forcing
# I(x,y) << O.
# In our application, word probabilities, P(x) and P(y), are estimated
# by counting the number of observations of x and y in a corpus, f(x)
# and f(y), and normalizing by N, the size of the corpus. (Our
# examples use a number of different corpora with different sizes: 15
# million words for the 1987 AP corpus, 36 million words for the 1988
# AP corpus, and 8.6 million tokens for the tagged corpus.) Joint
# probabilities, P(x,y), are estimated by counting the number of times
# that x is followed by y in a window of w words fw(x,y), and
# normalizing by N.
# The window size parameter allows us to look at different
# scales. Smaller window sizes will identify fixed expressions
# (idioms) and other relations that hold over short ranges; larger
# window sizes will highlight semantic concepts and other
# relationships that hold over larger scales. For the remainder of
# this paper, the window size, w, will be set to 5 words as a
# compromise; thissettingislargeenough to show some of the constraints
# between verbs and arguments, but not so large that it would wash out
# constraints that make use of strict adjacency.
### from: https://www.aaai.org/ocs/index.php/AAAI/AAAI16/paper/view/11963
# The PMI solver formalizes a way of computing and applying such
# associational knowledge. Given a question q and an answer option ai,
# it uses pointwise mutual information (Church and Hanks 1989) to
# measure the strength of the associations between parts of q and
# parts of ai. Given a large corpus C, PMI for two n-grams x and y is
# defined as:
# PMI (x, y) = log p(x, y) p(x)p(y)
# Here p(x, y) is the joint probability that x and y occur together in
# the corpus C, within a certain window of text (we use a 10 word
# window). The term p(x)p(y), on the other hand, represents the
# probability with which x and y would occur together if they were
# statistically independent. The ratio of p(x, y) to p(x)p(y) is thus
# the ratio of the observed co-occurrence to the expected
# co-occurrence. The larger this ratio, the stronger the association
# between x and y.
# We extract unigrams, bigrams, trigrams, and skip-bigrams from the
# question q and each answer option ai. We use the SMART stop word
# list (Salton 1971) to filter the extracted n-grams, but allow
# trigrams to have a stop word as their middle word. The answer with
# the largest average PMI, calculated over all pairs of question
# n-grams and answer option n-grams, is the best guess for the PMI
# solver.
# need to remove stopwords
def split(s, stopwords=None):
split = [ x.lower() for x in re.sub(r'\W+', ' ', s).split() ]
if stopwords:
sw_set = set(stopwords)
return [ x.strip() for x in split if x not in sw_set ]
return [ x.strip() for x in split ]
def count_occurrences(x, corpus, normalized=True):
"Count occurrences of n-gram X in CORPUS."
total_words = 0
total_occurrences = 0
for (sentence,sentence_len) in corpus:
total_occurrences += len(re.findall(x, sentence))
total_words += sentence_len
if normalized:
return total_occurrences / total_words
return total_occurrences
def count_co_occurrences(x,y, corpus):
x_y = " ".join([x,y])
return count_occurrences(x_y, corpus)
def pmi(x,y,corpus):
"""Compute PMI of X and Y in CORPUS; here X and Y are strings
representing n-grams (each gram separated by space) and CORPUS is an
array of strings. For this experiment we are considering the window
size the extension of each string."""
px = count_occurrences(x, corpus)
py = count_occurrences(y, corpus)
pxy = count_co_occurrences(x, y, corpus)
if pxy == 0:
return 0
return math.log( pxy / (px*py), 2)
|
import hashlib
import json
import os
from time import time
from typing import Dict, Optional, Union
from flask import Flask, render_template, request
from flask.typing import ResponseReturnValue
APP_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_DIR = os.path.join(APP_DIR, "static")
DATA_DIR = os.path.join(APP_DIR, "data")
HASHING_ALGORITHM = hashlib.sha3_512
app = Flask(__name__, static_folder=STATIC_DIR)
@app.errorhandler(404)
def page_not_found(e=None) -> ResponseReturnValue:
return "Invalid address!", 404
@app.errorhandler(403)
def unauthorized(e=None) -> ResponseReturnValue:
return "Unauthorized!", 403
@app.route("/")
def index() -> ResponseReturnValue:
return render_template("index.html")
def _fresh_note(name: str) -> ResponseReturnValue:
context = {"name": name, "authorized": True}
return render_template("notes.html", context=context)
@app.route("/<name>", methods=["GET"])
def get_note(name):
context = {"name": name}
note_path = os.path.join(DATA_DIR, f"{name}.json")
note_existed = os.path.isfile(note_path)
if not note_existed:
return _fresh_note(name)
with open(note_path, "r") as inf:
data = json.loads(inf.read())
if (ttl := data.get("ttl")) and ttl < time():
os.remove(note_path)
return _fresh_note(name)
if data.get("password"):
context["message"] = "Note is password-protected"
return render_template("notes.html", context=context)
context["authorized"] = True
context["content"] = data["content"]
context["ttl"] = int(data["ttl"] - time()) if data["ttl"] is not None else ""
return render_template("notes.html", context=context)
@app.route("/<name>", methods=["POST"])
def post_note(name) -> ResponseReturnValue:
context = {"name": name}
note_path = os.path.join(DATA_DIR, f"{name}.json")
note_existed = os.path.isfile(note_path)
content = request.form.get("content")
get_ttl = request.form.get("ttl", "")
try:
ttl = int(get_ttl)
except ValueError:
ttl = None
else:
if ttl <= 0:
return _fresh_note(name)
ttl_timestamp = ttl + time() if ttl is not None else None
password = ""
if (request_password := request.form.get("password")):
password = <PASSWORD>_<PASSWORD>(request_password.encode()).hex<PASSWORD>()
if not note_existed:
return _post_new_note(content, context, note_path, password, ttl_timestamp)
return _post_existing_note(content, context, note_path, password, ttl_timestamp)
def _post_existing_note(
content: Optional[str],
context: Dict[str, Union[str, bool, None]],
note_path: str,
password: str,
ttl: Optional[int],
) -> ResponseReturnValue:
with open(note_path, "r") as inf:
data = json.loads(inf.read())
if (data_ttl := data["ttl"]) is not None and data_ttl < time():
os.remove(note_path)
return _fresh_note(context["name"])
if (previous_password := data["password"]) and (previous_password != password):
return unauthorized()
context["authorized"] = True
if content is None:
context["content"] = data["content"]
return render_template("notes.html", context=context)
data = {"content": content, "password": password, "ttl": ttl}
with open(note_path, "w") as ouf:
ouf.write(json.dumps(data))
context["message"] = "Note saved"
context["content"] = content
context["ttl"] = context["ttl"] = int(ttl - time()) if ttl is not None else ""
return render_template("notes.html", context=context)
def _post_new_note(
content: Optional[str],
context: Dict[str, Union[str, bool, None]],
note_path: str,
password: str,
ttl: Optional[int],
) -> ResponseReturnValue:
data = {"content": content, "password": password, "ttl": ttl}
with open(note_path, "w") as ouf:
ouf.write(json.dumps(data))
context["message"] = "Note saved"
context["authorized"] = True
context["content"] = content
context["ttl"] = int(ttl - time()) if ttl is not None else ""
return render_template("notes.html", context=context)
if __name__ == "__main__":
# Only for debugging while developing
app.run(host="0.0.0.0", debug=True, port=8001)
|
# -*- coding:utf-8 -*-
import sys
sys.path.append("..")
import time
import requests
from bs4 import BeautifulSoup
from tools.Date_Process import time_process
from tools.Emoji_Process import filter_emoji
from tools.Mysql_Process import mysqlHelper
from tools import Cookie_Process
from tools.Mysql_Process import get_db
url_template = 'https://weibo.cn/{}?page={}' # 要访问的微博搜索接口URL
flag = 0
"""抓取关键词某一页的数据"""
def fetch_weibo_data(wb_userid, wb_username, page_id):
cookie = Cookie_Process.read_cookie() # 获取文件中存储的cookie
cookies = {
"Cookie": cookie}
# 通过' https://weibo.cn/%d '网站获取用户微博的信息
url_weibo = "https://weibo.cn/%s?page=%d" % (wb_userid, page_id)
r_weibo = requests.get(url_weibo, cookies=cookies)
soup_weibo = BeautifulSoup(r_weibo.text, 'lxml')
all_contents = soup_weibo.select('.c')[1:-2]
wb_count = 0
mblog = [] # 保存处理过的微博
for card in all_contents:
wb_id = str(card.get('id')).split("_")[1]
wb_content = filter_emoji(card.select_one('.ctt').text) # 微博内容
temp_href = card.select('a')
for href in temp_href:
if 'comment' in href.get('href') and '原文评论' not in href.text:
wb_commentnum = href.text[3:-1]
if 'attitude' in href.get('href'):
wb_likenum = href.text[2:-1]
if 'repost' in href.get('href'):
wb_forwardnum = href.text[3:-1]
wb_createtime = time_process(card.select_one('.ct').text.split('\xa0')[0]) # 微博内容
print('用户名:' + wb_username)
print('用户ID:' + wb_userid)
print('微博ID:' + wb_id)
print('微博内容:' + wb_content)
print('微博评论数:' + wb_commentnum)
print('微博点赞数:' + wb_likenum)
print('微博转发数:' + wb_forwardnum)
print('微博创建时间:' + wb_createtime)
print('------------------------------\n')
blog = {'wb_userid': wb_userid, # 生成一条微博记录的列表
'wb_username': wb_username,
'wb_id': wb_id,
'wb_content': wb_content,
'wb_createtime': wb_createtime,
'wb_forwardnum': wb_forwardnum,
'wb_commentnum': wb_commentnum,
'wb_likenum': wb_likenum
}
mblog.append(blog)
wb_count = wb_count + 1 # 表示此页的微博数
global flag
if (wb_count > 0):
print("---------- 第%s页微博爬取完成 ---------- " % page_id + "当前页微博数:" + str(wb_count))
flag = 0
else:
flag = 1
print("********** 第%s页微博开始被反爬,程序自动睡眠5秒后进行爬取...... " % page_id)
time.sleep(5)
print()
print()
return mblog
"""抓取关键词多页的数据"""
def fetch_pages(user_id):
cookie = Cookie_Process.read_cookie() # 获取文件中存储的cookie
cookies = {
"Cookie": cookie}
# 通过' https://weibo.cn/%d '网站微博第一页获取用户的用户名和总页数
url_user = "https://weibo.cn/%s?page=%d" % (user_id, 1)
r_user = requests.get(url_user, cookies=cookies)
soup_user = BeautifulSoup(r_user.text, 'lxml')
# 判断用户是否发表了微博,如没有,则返回
panduan_weibo = soup_user.select_one('.tc').text[3:-1]
if panduan_weibo == '0':
print('此用户微博数量为0!')
return
user_contents = soup_user.select_one('.ut').select('.ctt')
temp_user = user_contents[0].text.split()
wb_username = temp_user[0] # 获取微博用户名
# print(wb_username)
try:
page_num = int(soup_user.select_one('.pa').text.split()[1].split('/')[1][:-1]) # 获取微博总页数
print('--------- 微博总页数为:' + str(page_num) + ' ---------\n')
except Exception as e:
page_num = 1
mblogs = [] # 此次时间单位内的搜索全部结果先临时用列表保存,后存入数据库
page_id = 1
while page_id <= page_num:
try:
mblogs.extend(fetch_weibo_data(user_id, wb_username, page_id)) # 每页调用fetch_data函数进行微博信息的抓取
if (flag == 1):
continue
except Exception as e:
print(e)
if (page_id % 50 == 0): # 每多少条数据执行一次 提交 插入数据库操作
# 保存到mysql数据库
mh = mysqlHelper(get_db()[0], get_db()[1], get_db()[2], get_db()[3], get_db()[4], int(get_db()[5]))
sql = "insert into user_weibo(wb_userid,wb_username,wb_id,wb_content,wb_createtime,wb_forwardnum,wb_commentnum,wb_likenum) values(%s,%s,%s,%s,%s,%s,%s,%s)"
mh.open();
for i in range(len(mblogs)):
mh.cud(sql, (mblogs[i]['wb_userid'], mblogs[i]['wb_username'], mblogs[i]['wb_id'],
filter_emoji(mblogs[i]['wb_content']),
mblogs[i]['wb_createtime'], mblogs[i]['wb_forwardnum'], mblogs[i]['wb_commentnum'],
mblogs[i]['wb_likenum']))
mh.tijiao();
mh.close()
mblogs = [] # 提交数据库之后将列表清空
page_id = page_id + 1
if len(mblogs) > 0: # 将余下的数据提交数据库
# 保存到mysql数据库
mh = mysqlHelper(get_db()[0], get_db()[1], get_db()[2], get_db()[3], get_db()[4], int(get_db()[5]))
sql = "insert into user_weibo(wb_userid,wb_username,wb_id,wb_content,wb_createtime,wb_forwardnum,wb_commentnum,wb_likenum) values(%s,%s,%s,%s,%s,%s,%s,%s)"
mh.open();
for i in range(len(mblogs)):
mh.cud(sql, (
mblogs[i]['wb_userid'], mblogs[i]['wb_username'], mblogs[i]['wb_id'], filter_emoji(mblogs[i]['wb_content']),
mblogs[i]['wb_createtime'], mblogs[i]['wb_forwardnum'], mblogs[i]['wb_commentnum'],
mblogs[i]['wb_likenum']))
mh.tijiao();
mh.close()
if __name__ == '__main__':
Cookie_Process.write_cookie()
user_id = input("请输入要搜索的用户ID:")
time_start = time.time()
fetch_pages(user_id)
time_end = time.time()
print('本次操作数据全部爬取成功,爬取用时秒数:', (time_end - time_start))
|
<gh_stars>0
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base handler class."""
import abc
import json
import os
import subprocess
import sys
import tempfile
from typing import Any, Collection, Dict, List, Text
import click
from six import with_metaclass
from tfx.dsl.components.base import base_driver
from tfx.dsl.io import fileio
from tfx.tools.cli import labels
from tfx.utils import io_utils
class BaseHandler(with_metaclass(abc.ABCMeta, object)):
"""Base Handler for CLI.
Attributes:
flags_dict: A dictionary with flags provided in a command.
"""
def __init__(self, flags_dict: Dict[Text, Any]):
self.flags_dict = flags_dict
self._handler_home_dir = self._get_handler_home()
@abc.abstractmethod
def create_pipeline(self) -> None:
"""Creates pipeline for the handler."""
pass
@abc.abstractmethod
def update_pipeline(self) -> None:
"""Updates pipeline for the handler."""
pass
@abc.abstractmethod
def list_pipelines(self) -> None:
"""List all the pipelines in the environment."""
pass
@abc.abstractmethod
def delete_pipeline(self) -> None:
"""Deletes pipeline for the handler."""
pass
@abc.abstractmethod
def compile_pipeline(self) -> None:
"""Compiles pipeline for the handler."""
pass
@abc.abstractmethod
def create_run(self) -> None:
"""Runs a pipeline for the handler."""
pass
@abc.abstractmethod
def delete_run(self) -> None:
"""Deletes a run."""
pass
@abc.abstractmethod
def terminate_run(self) -> None:
"""Stops a run."""
pass
@abc.abstractmethod
def list_runs(self) -> None:
"""Lists all runs of a pipeline."""
pass
@abc.abstractmethod
def get_run(self) -> None:
"""Checks run status."""
pass
def _check_pipeline_dsl_path(self) -> None:
"""Check if pipeline dsl path exists."""
pipeline_dsl_path = self.flags_dict[labels.PIPELINE_DSL_PATH]
if not fileio.exists(pipeline_dsl_path):
sys.exit('Invalid pipeline path: {}'.format(pipeline_dsl_path))
def _check_dsl_runner(self) -> None:
"""Check if runner in dsl is same as engine flag."""
engine_flag = self.flags_dict[labels.ENGINE_FLAG]
with open(self.flags_dict[labels.PIPELINE_DSL_PATH], 'r') as f:
dsl_contents = f.read()
runner_names = {
labels.AIRFLOW_ENGINE: 'AirflowDagRunner',
labels.KUBEFLOW_ENGINE: 'KubeflowDagRunner',
labels.BEAM_ENGINE: 'BeamDagRunner',
labels.LOCAL_ENGINE: 'LocalDagRunner',
}
if runner_names[engine_flag] not in dsl_contents:
sys.exit('{} runner not found in dsl.'.format(engine_flag))
def _extract_pipeline_args(self) -> Dict[Text, Any]:
"""Get pipeline args from the DSL.
Returns:
Python dictionary with pipeline details extracted from DSL.
"""
# TODO(b/157599419): Consider using a better way to extract pipeline info:
# e.g. pipeline name/root. Currently we relies on consulting a env var when
# creating Pipeline object, which is brittle.
pipeline_dsl_path = self.flags_dict[labels.PIPELINE_DSL_PATH]
if os.path.isdir(pipeline_dsl_path):
sys.exit('Provide dsl file path.')
# Create an environment for subprocess.
temp_env = os.environ.copy()
# Create temp file to store pipeline_args from pipeline dsl.
temp_file = tempfile.mkstemp(prefix='cli_tmp_', suffix='_pipeline_args')[1]
# Store temp_file path in temp_env.
# LINT.IfChange
temp_env[labels.TFX_JSON_EXPORT_PIPELINE_ARGS_PATH] = temp_file
# LINT.ThenChange(
# ../../../orchestration/beam/beam_dag_runner.py,
# ../../../orchestration/local/local_dag_runner.py,
# )
# Run dsl with mock environment to store pipeline args in temp_file.
self._subprocess_call([sys.executable, pipeline_dsl_path], env=temp_env)
if os.stat(temp_file).st_size != 0:
# Load pipeline_args from temp_file for TFX pipelines
with open(temp_file, 'r') as f:
pipeline_args = json.load(f)
else:
# For non-TFX pipelines, extract pipeline name from the dsl filename.
pipeline_args = {
labels.PIPELINE_NAME:
os.path.basename(pipeline_dsl_path).split('.')[0]
}
# Delete temp file
io_utils.delete_dir(temp_file)
return pipeline_args
def _get_handler_home(self) -> Text:
"""Sets handler home.
Returns:
Path to handler home directory.
"""
engine_flag = self.flags_dict[labels.ENGINE_FLAG]
handler_home_dir = engine_flag.upper() + '_HOME'
if handler_home_dir in os.environ:
return os.environ[handler_home_dir]
return os.path.join(os.environ['HOME'], 'tfx', engine_flag, '')
def _get_deprecated_handler_home(self) -> Text:
"""Sets old handler home for compatibility.
Returns:
Path to handler home directory.
"""
engine_flag = self.flags_dict[labels.ENGINE_FLAG]
handler_home_dir = engine_flag.upper() + '_HOME'
if handler_home_dir in os.environ:
return os.environ[handler_home_dir]
return os.path.join(os.environ['HOME'], engine_flag, '')
def _subprocess_call(self,
command: List[Text],
env: Dict[Text, Any] = None) -> None:
return_code = subprocess.call(command, env=env)
if return_code != 0:
sys.exit('Error while running "{}" '.format(' '.join(command)))
def _format_table(self, header: Collection[Any],
data: Collection[Collection[Any]]):
def _format_as_strings(items):
return [f' {item} ' for item in items]
header = _format_as_strings(header)
data = [_format_as_strings(row) for row in data]
max_widths = [len(s) for s in header]
for row in data:
max_widths = [
max(c_max, len(item)) for c_max, item in zip(max_widths, row)
]
def _make_line(record, widths, sep='|', fill=' '):
return (sep + sep.join(
item.ljust(width, fill) for item, width in zip(record, widths)) +
sep + '\n')
empty_data = [''] * len(header) # empty data for horizontal line
double_separator = _make_line(empty_data, max_widths, '+', '=')
single_separator = _make_line(empty_data, max_widths, '+', '-')
result = double_separator
result += _make_line(header, max_widths)
result += double_separator
result += single_separator.join(
_make_line(record, max_widths) for record in data)
result += double_separator
return result
def _check_pipeline_existence(self,
pipeline_name: Text,
required: bool = True) -> None:
"""Check if pipeline folder exists and if not, exit system.
Args:
pipeline_name: Name of the pipeline.
required: Set it as True if pipeline needs to exist else set it to False.
"""
handler_pipeline_path = os.path.join(self._handler_home_dir, pipeline_name)
# Check if pipeline folder exists.
exists = fileio.exists(handler_pipeline_path)
if required and not exists:
# Check pipeline directory prior 0.25 and move files to the new location
# automatically.
old_handler_pipeline_path = os.path.join(
self._get_deprecated_handler_home(), pipeline_name)
if fileio.exists(old_handler_pipeline_path):
fileio.makedirs(os.path.dirname(handler_pipeline_path))
fileio.rename(old_handler_pipeline_path, handler_pipeline_path)
engine_flag = self.flags_dict[labels.ENGINE_FLAG]
handler_home_variable = engine_flag.upper() + '_HOME'
click.echo(
('[WARNING] Pipeline "{pipeline_name}" was found in "{old_path}", '
'but the location that TFX stores pipeline information was moved '
'since TFX 0.25.0.\n'
'[WARNING] Your files in "{old_path}" was automatically moved to '
'the new location, "{new_path}".\n'
'[WARNING] If you want to keep the files at the old location, set '
'`{handler_home}` environment variable to "{old_handler_home}".'
).format(
pipeline_name=pipeline_name,
old_path=old_handler_pipeline_path,
new_path=handler_pipeline_path,
handler_home=handler_home_variable,
old_handler_home=self._get_deprecated_handler_home()),
err=True)
else:
sys.exit('Pipeline "{}" does not exist.'.format(pipeline_name))
elif not required and exists:
sys.exit('Pipeline "{}" already exists.'.format(pipeline_name))
def get_schema(self):
pipeline_name = self.flags_dict[labels.PIPELINE_NAME]
# Check if pipeline exists.
self._check_pipeline_existence(pipeline_name)
# Path to pipeline args.
pipeline_args_path = os.path.join(self._handler_home_dir,
self.flags_dict[labels.PIPELINE_NAME],
'pipeline_args.json')
# Get pipeline_root.
with open(pipeline_args_path, 'r') as f:
pipeline_args = json.load(f)
self._read_schema_from_pipeline_root(pipeline_name,
pipeline_args[labels.PIPELINE_ROOT])
def _read_schema_from_pipeline_root(self, pipeline_name, pipeline_root):
# Check if pipeline root created. If not, it means that the user has not
# created a run yet or the pipeline is still running for the first time.
if not fileio.exists(pipeline_root):
sys.exit(
'Create a run before inferring schema. If pipeline is already running, then wait for it to successfully finish.'
)
# If pipeline_root exists, then check if SchemaGen output exists.
components = fileio.listdir(pipeline_root)
if 'SchemaGen' not in components:
sys.exit(
'Either SchemaGen component does not exist or pipeline is still running. If pipeline is running, then wait for it to successfully finish.'
)
# Get the latest SchemaGen output.
component_output_dir = os.path.join(pipeline_root, 'SchemaGen')
schema_dir = os.path.join(component_output_dir, 'schema')
schemagen_outputs = fileio.listdir(schema_dir)
latest_schema_folder = max(schemagen_outputs, key=int)
# Copy schema to current dir.
latest_schema_uri = base_driver._generate_output_uri( # pylint: disable=protected-access
component_output_dir, 'schema', int(latest_schema_folder))
latest_schema_path = os.path.join(latest_schema_uri, 'schema.pbtxt')
curr_dir_path = os.path.join(os.getcwd(), 'schema.pbtxt')
io_utils.copy_file(latest_schema_path, curr_dir_path, overwrite=True)
# Print schema and path to schema
click.echo('Path to schema: {}'.format(curr_dir_path))
click.echo('*********SCHEMA FOR {}**********'.format(pipeline_name.upper()))
with open(curr_dir_path, 'r') as f:
click.echo(f.read())
|
import csv
import sys
import os
import mapdamage
import pysam
import itertools
import math
import logging
import time
def phred_pval_to_char(pval):
"""Transforming error rate to ASCII character using the Phred scale"""
return chr(int(round(-10*math.log10(abs(pval)))+33))
def phred_char_to_pval(ch):
"""Transforming ASCII character in the Phred scale to the error rate"""
return 10**(-(float(ord(ch))-float(33))/10)
def get_corr_prob(folder, rescale_length_5p, rescale_length_3p):
"""
Reads the damage probability correction file, returns a
dictionary with this structure
position (one based) - CT - probability
- GA - probability
"""
full_path = os.path.join(folder,"Stats_out_MCMC_correct_prob.csv")
if not os.path.isfile(full_path):
sys.exit("Missing file, the file \n\tStats_out_MCMC_correct_prob.csv\nshould be in the folder\n\t"+folder+"\nDid you run the MCMC estimates of the parameters?")
try:
fi_handle = csv.DictReader(open(full_path))
corr_prob = {}
for line in fi_handle:
if (corr_prob.has_key(line["Position"])):
sys.exit('This file has multiple position definitions %s, line %d: %s' % \
(folder, fi_handle.line_num, corr_prob[line["Position"]]))
else:
corr_prob[int(line["Position"])] = {'C.T':float(line["C.T"]), 'G.A':float(line["G.A"])}
# Exclude probabilities for positions outside of user-specified region
for key in corr_prob.keys():
if key < -rescale_length_3p or key > rescale_length_5p:
corr_prob.pop(key)
return corr_prob
except csv.Error as e:
sys.exit('File %s, line %d: %s' % (os.path.join(folder,"Stats_out_MCMC_correct_prob.csv"), \
fi_handle.line_num, e))
def corr_this_base(corr_prob, nt_seq, nt_ref, pos, length,direction="both"):
"""
The position specific damaging correction, using the input
corr_prob dictionary holding the damage correcting values
nt_seq nucleotide in the sequence
nt_ref nucleotide in the reference
pos relative position from the 5' end
length length of the sequence
direction which end to consider the rescaling
returns the correction probability for this particular set
"""
if (pos == 0):
# not using 0 based indexing
raise SystemError
if ( nt_seq == "T" and nt_ref == "C" ):
# an C to T transition
subs = "C.T"
elif( nt_seq == "A" and nt_ref == "G" ):
# an G to A transition
subs = "G.A"
else:
# other transitions/transversions are not affected by damage
return 0
back_pos = pos-length-1
# position from 3' end
if corr_prob.has_key(pos):
p5_corr = corr_prob[pos][subs]
# correction from 5' end
else:
p5_corr = 0
if corr_prob.has_key(back_pos):
p3_corr = corr_prob[back_pos][subs]
# correction from 3' end
else:
p3_corr = 0
if direction == "forward":
return p5_corr
elif direction == "backward":
return p3_corr
elif direction == "both":
if pos < abs(back_pos) :
# then we use the forward correction
return p5_corr
else :
# else the backward correction
return p3_corr
else:
# this should not happen
raise SystemExit("Abnormal direction in the rescaling procedure")
def initialize_subs():
"""Initialize a substitution table, to track the expected substitution counts"""
per_qual = dict(zip(range(0,130),[0]*130))
subs = {"CT-before":per_qual.copy(),\
"TC-before":per_qual.copy(),\
"GA-before":per_qual.copy(),\
"AG-before":per_qual.copy(),\
"CT-after":per_qual.copy(),\
"TC-after":per_qual.copy(),\
"GA-after":per_qual.copy(),\
"AG-after":per_qual.copy(),\
"A":0,\
"C":0,\
"G":0,\
"T":0,\
"CT-pvals":0.0,\
"CT-pvals_before":0.0,\
"TC-pvals":0.0,\
"GA-pvals":0.0,\
"GA-pvals_before":0.0,\
"AG-pvals":0.0,\
}
return subs
def record_subs(subs,nt_seq,nt_ref,nt_qual,nt_newqual,prob_corr):
""" record the expected substitution change, prob_corr is the excact version for nt_qual"""
if ( nt_seq == "T" and nt_ref == "C"):
sub_type = "CT"
subs["CT-pvals"] += prob_corr
subs["CT-pvals_before"] += 1-phred_char_to_pval(nt_qual)
elif ( nt_seq == "A" and nt_ref == "G"):
sub_type = "GA"
subs["GA-pvals"] += prob_corr
subs["GA-pvals_before"] += 1-phred_char_to_pval(nt_qual)
elif ( nt_seq == "C" and nt_ref == "T"):
sub_type = "TC"
subs["TC-pvals"] += 1-phred_char_to_pval(nt_qual)
if (nt_qual != nt_newqual):
raise SystemError("Internal error: rescaling qualities for the wrong transitions")
elif ( nt_seq == "G" and nt_ref == "A"):
sub_type = "AG"
subs["AG-pvals"] += 1-phred_char_to_pval(nt_qual)
if (nt_qual != nt_newqual):
raise SystemError("Internal error: rescaling qualities for the wrong transitions")
else:
sub_type = "NN"
if (sub_type != "NN"):
# record only transitions
subs[sub_type+"-before"][int(ord(nt_qual))-33] += 1
subs[sub_type+"-after"][int(ord(nt_newqual))-33] += 1
if (nt_ref in ["A","C","G","T"]):
subs[nt_ref] += 1
def qual_summary_subs(subs):
"""Calculates summary statistics for the substition table subs"""
for i in ["CT-before","TC-before","GA-before","AG-before","CT-after","TC-after","GA-after","AG-after"]:
for lv in [0,10,20,30,40]:
for qv in subs[i]:
if qv >= lv :
key = <KEY>
if subs.has_key(key):
subs[key] += subs[i][qv]
else:
subs[key] = subs[i][qv]
def print_subs(subs):
"""Print the substition table"""
print("\tThe expected substition frequencies before and after scaling using the scaled qualities as probalities:")
if subs["C"]!=0:
# the special case of no substitutions
print("\tCT\t"+str(subs["CT-pvals_before"]/subs["C"])+"\t\t"+str(subs["CT-pvals"]/subs["C"]))
else:
print("\tCT\tNA\t\tNA")
if subs["T"]!=0:
print("\tTC\t"+str(subs["TC-pvals"]/subs["T"])+"\t\t"+str(subs["TC-pvals"]/subs["T"]))
else:
print("\tTC\tNA\t\tNA")
if subs["G"]!=0:
print("\tGA\t"+str(subs["GA-pvals_before"]/subs["G"])+"\t\t"+str(subs["GA-pvals"]/subs["G"]))
else:
print("\tGA\tNA\t\tNA")
if subs["A"]!=0:
print("\tAG\t"+str(subs["AG-pvals"]/subs["A"])+"\t\t"+str(subs["AG-pvals"]/subs["A"]))
else:
print("\tAG\tNA\t\tNA")
print("\tQuality metrics before and after scaling")
print("\tCT-Q0 \t"+str(subs["CT-before-Q0"])+"\t\t"+str(subs["CT-after-Q0"]))
print("\tCT-Q10 \t"+str(subs["CT-before-Q10"])+"\t\t"+str(subs["CT-after-Q10"]))
print("\tCT-Q20 \t"+str(subs["CT-before-Q20"])+"\t\t"+str(subs["CT-after-Q20"]))
print("\tCT-Q30 \t"+str(subs["CT-before-Q30"])+"\t\t"+str(subs["CT-after-Q30"]))
print("\tCT-Q40 \t"+str(subs["CT-before-Q40"])+"\t\t"+str(subs["CT-after-Q40"]))
print("\tGA-Q0 \t"+str(subs["GA-before-Q0"])+"\t\t"+str(subs["GA-after-Q0"]))
print("\tGA-Q10 \t"+str(subs["GA-before-Q10"])+"\t\t"+str(subs["GA-after-Q10"]))
print("\tGA-Q20 \t"+str(subs["GA-before-Q20"])+"\t\t"+str(subs["GA-after-Q20"]))
print("\tGA-Q30 \t"+str(subs["GA-before-Q30"])+"\t\t"+str(subs["GA-after-Q30"]))
print("\tGA-Q40 \t"+str(subs["GA-before-Q40"])+"\t\t"+str(subs["GA-after-Q40"]))
def rescale_qual_read(bam, read, ref, corr_prob,subs, debug = False,direction="both"):
"""
bam a pysam bam object
read a pysam read object
ref a pysam fasta ref file
reflengths a dictionary holding the length of the references
subs a dictionary holding the corrected number of substition before and after scaling
corr_prob dictionary from get_corr_prob
returns a read with rescaled quality score
Iterates through the read and reference, rescales the quality
according to corr_prob
"""
if not debug:
# no need to log when unit testing
logger = logging.getLogger(__name__)
raw_seq = read.query
# external coordinates 5' and 3' , 0-based offset
coordinate = mapdamage.align.get_coordinates(read)
# fetch reference name, chromosome or contig names
chrom = bam.getrname(read.tid)
refseq = ref.fetch(chrom, min(coordinate), max(coordinate)).upper()
# add gaps to qualities and mask read and reference nucleotides if below desired threshold
(seq, qual, refseq) = mapdamage.align.align_with_qual(read.cigar, \
raw_seq, read.qqual, -100, refseq)
length_read = len(raw_seq)
length_align = len(seq)
# reverse complement read and reference when mapped reverse strand
if read.is_reverse:
refseq = mapdamage.seq.revcomp(refseq)
seq = mapdamage.seq.revcomp(seq)
qual = qual[::-1]
new_qual = [-100]*length_read
pos_on_read = 0
number_of_rescaled_bases = 0.0
for (i, nt_seq, nt_ref, nt_qual) in itertools.izip(xrange(length_align), seq, refseq, qual):
# rescale the quality according to the triplet position,
# pair of the reference and the sequence
if ((nt_seq == "T" and nt_ref =="C") or (nt_seq == "A" and nt_ref =="G")):
# need to rescale this subs.
pdam = 1 - corr_this_base(corr_prob, nt_seq, nt_ref, pos_on_read + 1, length_read,direction=direction)
pseq = 1 - phred_char_to_pval(nt_qual)
newp = pdam*pseq # this could be numerically unstable
newq = phred_pval_to_char(1-newp)
number_of_rescaled_bases += 1-pdam
else:
# don't rescale, other bases
newp = 1 - phred_char_to_pval(nt_qual)
newq = nt_qual
if pos_on_read < length_read:
new_qual[pos_on_read] = newq
record_subs(subs,nt_seq,nt_ref,nt_qual,new_qual[pos_on_read],newp)
if nt_seq != "-":
pos_on_read += 1
# done with the aligned portion of the read
else:
if not debug:
logger.warning("Warning: The aligment of the read is longer than the actual read %s",(read.qname))
break
new_qual = "".join(new_qual)
if read.is_reverse:
new_qual = new_qual[::-1]
if (read.cigar[0][0] == 4):
# check for soft clipping at forward end
new_qual = read.qual[0:read.cigar[0][1]] + new_qual
if (read.cigar[-1][0] == 4):
# the same backwards
new_qual = new_qual + read.qual[-read.cigar[-1][1]:]
read.qual = new_qual
# truncate this to 5 digits
number_of_rescaled_bases = float("%.5f" % number_of_rescaled_bases)
if read.has_tag("MR"):
raise SystemExit("Read: %s already has a MR tag, can't rescale" % read)
read.set_tag("MR", number_of_rescaled_bases, 'f')
return read
def rescale_qual(ref, options,debug=False):
"""
ref a pysam fasta ref file
bam_filename name of a BAM/SAM file to read
fi file containing the csv with correction probabilities
reflengths dictionary with the reference lengths
options options from the command line parsing
Iterates through BAM file, makes a new BAM file with rescaled qualities.
"""
if not debug:
# no need to log when unit testing
logger = logging.getLogger(__name__)
logger.info("Rescaling BAM: '%s' -> '%s'" % (options.filename, options.rescale_out))
start_time = time.time()
# open SAM/BAM files
bam = pysam.Samfile(options.filename)
if debug:
write_mode = "wh"
else:
write_mode = "wb"
bam_out = pysam.Samfile(options.rescale_out,write_mode, template = bam)
corr_prob = get_corr_prob(options.folder,
rescale_length_5p=options.rescale_length_5p,
rescale_length_3p=options.rescale_length_3p)
subs = initialize_subs()
first_pair = True
number_of_non_proper_pairs = 0
for hit in bam:
if hit.is_unmapped:
pass
elif not hit.qual and not debug:
logger.warning("Cannot rescale base PHRED scores for read '%s'; no scores assigned." % hit.qname)
elif hit.is_paired :
if first_pair and not debug:
# assuming the ends are non-overlapping
logger.warning("Warning! Assuming the pairs are non-overlapping, facing inwards and correctly paired.")
first_pair=False
#5p --------------> 3p
#3p <-------------- 5p
# pair 1 (inwards)
#5p ---->
# <---- 5p
# A B
# pair 2 (outwards), this happens if the reference is RC this is not supported
# ----> 3p
#3p <----
# A B
# Correct outwards pairs from the 3p and inwards pairs with the 5p end
if ((not hit.is_reverse) and hit.mate_is_reverse and (hit.pnext>hit.pos) and hit.tid==hit.mrnm):
# the inwards case mate A
hit = rescale_qual_read(bam, hit, ref, corr_prob,subs,direction="forward",debug=debug)
elif (hit.is_reverse and (not hit.mate_is_reverse) and (hit.pnext<hit.pos) and hit.tid==hit.mrnm):
# the inwards case mate B
hit = rescale_qual_read(bam, hit, ref, corr_prob,subs,direction="forward",debug=debug)
else:
number_of_non_proper_pairs += 1
# cannot do much with conflicting pairing information
else:
hit = rescale_qual_read(bam, hit, ref, corr_prob,subs,debug=debug)
bam_out.write(hit)
if number_of_non_proper_pairs!=0 and not debug:
logger.warning("Number of non-rescaled reads due to improper pairing: %d" % number_of_non_proper_pairs)
if (subs["TC-before"] != subs["TC-after"] or subs["AG-before"] != subs["AG-after"]):
sys.exit("Qualities for T.C and A.G transitions should not change in the rescaling, please contact the authors.")
qual_summary_subs(subs)
bam.close()
bam_out.close()
if not options.quiet:
print_subs(subs)
if not debug:
logger.debug("Rescaling completed in %f seconds" % (time.time() - start_time,))
|
<reponame>dberardi2020/MovieSorter
import shutil
import subprocess
import sys
import time
from os import path
from InquirerPy import inquirer
from InquirerPy.base import Choice
from Classes import Directories, ANSI, Statistics
from Classes.Logger import Logger
from Classes.Movie import Movie
from definitions import const, helpers
upload_limit = 6
def check_name():
check = Directories.downloads.contains("title.mkv")
if check and not check.is_locked():
Directories.downloads.print()
name = inquirer.text(message="Please rename title.mkv in Downloaded: ",
raise_keyboard_interrupt=False).execute()
if name:
check.rename(name)
def change_name():
check_name()
# Give selection prompt
selected: Movie = inquirer.select(
message="Which movie would you like to rename?",
choices=[Choice(movie, name=movie.name) for movie in helpers.get_all_movie()] +
[Choice(value=None, name="Exit")],
show_cursor=False,
raise_keyboard_interrupt=False
).execute()
if not selected:
return
name = inquirer.text(message=f"Please provide the new name for {selected.name}: ",
raise_keyboard_interrupt=False, default=selected.remove_extension()).execute()
if not name:
return
confirmed = inquirer.confirm(
message=f"Are you sure you want to rename {selected.name} to {name}{selected.get_extension()}?",
raise_keyboard_interrupt=False).execute()
if confirmed:
selected.rename(name)
def get_external_info():
total, used, free = shutil.disk_usage(const.external_drive)
print("Total: %d GB" % (total // (2 ** 30)))
print("Free: %d GB" % (free // (2 ** 30)))
print("Used: %d GB" % (used // (2 ** 30)))
def get_dir_info():
get_external_info()
print()
Directories.downloads.print()
Directories.queued.print()
Directories.ready.print()
def sort():
sort_downloaded()
clean_compression_queue()
def sort_downloaded():
check_name()
print("Sorting movies in Downloaded...")
for movie in Directories.downloads.get_movies():
if movie.is_locked():
continue
if movie.size < const.upload_limit:
movie.move_to_upload()
else:
movie.move_to_compression()
print()
def clean_compression_queue():
print("Cleaning movies in Ready for Compression...")
for movie in Directories.queued.get_movies():
if movie.is_compressed():
movie.delete()
print()
def run_compression():
logger = Logger()
eta = Statistics.compression.estimate(Directories.queued.get_size())
logger.log_and_print(f"Compressing movies in Ready for Compression... [ETA: {eta}]")
queued = Directories.queued.get_movies()
total_tasks = len(queued)
current_task = 0
master_start_time = time.time()
log_cache = []
for movie in queued:
target = 0
current_task += 1
output_path = Directories.ready.append(movie.name).replace(".mkv", ".mp4")
handbrake_command = [r"HandBrakeCLI.exe", "-i", f"{movie.path}", "-o",
f"{output_path}", "-e", "x264", "-q", "20", "-B", "160"]
start_time = time.time()
process = subprocess.Popen(handbrake_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True)
for line in process.stdout:
if helpers.process_compression_output(movie.name, current_task, total_tasks, line, target, logger):
target += 10
compressed_movie_size = helpers.convert_to_gb(path.getsize(output_path))
run_time = helpers.run_time(start_time)
Statistics.compression.add_stat(movie.size, run_time)
output_log = f"Compressed {movie.name} from {movie.size} GB to {compressed_movie_size} " \
f"GB in {helpers.format_time(run_time)}"
log_cache.append(output_log)
print()
logger.log_and_print(
f"Completed {total_tasks} compression(s) in {helpers.format_time(helpers.run_time(master_start_time))}... "
f"[ETA: {eta}]")
for log in log_cache:
logger.log_and_print(log)
print()
def upload_to_nas():
num_uploads = Directories.ready.get_movies_cnt()
uploads_left = Directories.ready.get_movies_cnt()
size_total = Directories.ready.get_size()
eta = Statistics.upload.estimate(size_total)
print(f"Uploading movies in Ready for Upload... [ETA: {eta}]\n")
start_time = time.time()
for idx, movie in enumerate(Directories.ready.get_movies()):
print(
f"{ANSI.up_char(idx + 1)}{uploads_left} movie(s) left to upload - [{size_total} GB]{ANSI.clr_char()}{ANSI.down_char(idx)}")
uploads_left = uploads_left - 1
if movie.is_locked():
continue
movie.upload_to_nas()
Statistics.upload.add_stat(movie.size, helpers.run_time(start_time))
size_total = size_total - movie.size
print(
f"{ANSI.up_char(num_uploads + 1)}{uploads_left} movie(s) left to upload - [{size_total} GB]{ANSI.clr_char()}{ANSI.down_char(num_uploads)}")
print(f"\nUploaded {num_uploads} movies in {helpers.format_time(helpers.run_time(start_time))}\n")
def dev_func():
Statistics.compression.print_stat()
print()
Statistics.upload.print_stat()
sys.exit()
def mark_failure():
check_name()
if Directories.downloads.get_movies_cnt() > 0:
last_rip = Directories.downloads.get_movies()[0]
last_rip_name = last_rip.remove_extension()
confirmed = inquirer.confirm(message=f"Are you sure you want to mark {last_rip_name} as a failure?",
raise_keyboard_interrupt=False).execute()
if confirmed:
if helpers.write_failure(last_rip_name):
last_rip.delete()
else:
custom = inquirer.text(message="What is the name of the failure?: ",
raise_keyboard_interrupt=False).execute()
if custom:
helpers.write_failure(custom)
else:
custom = inquirer.text(message="What is the name of the failure?: ", raise_keyboard_interrupt=False).execute()
if custom:
helpers.write_failure(custom)
def mark_series():
series = inquirer.text(message="What is the name of the series?: ", raise_keyboard_interrupt=False).execute()
if series:
helpers.write_series(series)
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Models declaration for application ``django_mailbox``.
"""
from email.encoders import encode_base64
from email.message import Message as EmailMessage
from email.utils import formatdate, parseaddr, parsedate_tz, parsedate_to_datetime
from quopri import encode as encode_quopri
import base64
import email
import logging
import mimetypes
import os.path
import sys
import uuid
import six
from urllib import parse
import django
from django.conf import settings as django_settings
from django.core.files.base import ContentFile
from django.core.mail.message import make_msgid
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from model_utils.managers import InheritanceManager
from communication.utils import comm_utils
from communication.transports.harvest_transports import HarvestImapTransport, HarvestPop3Transport, HarvestGmailTransport, \
HarvestImapExchangeTransport
from communication.transports import transport_exceptions
from cryptographic_fields.fields import EncryptedCharField
from phonenumber_field.modelfields import PhoneNumberField
from oauth2client.contrib.django_util.models import CredentialsField
import assets
import crm
from crm.models import Person
from cedar_settings.models import GeneralSetting
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# For DRF Serializing See:
# http://www.django-rest-framework.org/api-guide/relations/#rest-framework-generic-relations
class ActiveObjectManager(models.Manager):
"""
Filters all objects that are not active.
Requires a boolean field called 'active'
"""
def get_queryset(self):
return super(ActiveObjectManager, self).get_queryset().filter(active=True)
class Communication(models.Model):
"""
Sort-of parent object for Communication Type (Phone, Fax, Message, etc.) objects.
Note: a post_delete signal is attached that will delete the comm_type instance when
a Communication instance is deleted. Since CommunicationRelation objects will cascade
we don't have to worry about those.
"""
subject = models.TextField(max_length=1000)
date = models.DateTimeField(verbose_name='Date & time of communication.')
from_contacts = models.ManyToManyField(crm.models.Person, related_name='from_contact')
to_contacts = models.ManyToManyField(crm.models.Person, related_name='to_contact')
# Generic foreign keys to communication types (Fax, PhoneCall, Message, etc.)
comm_type_ct = models.ForeignKey(ContentType)
comm_type_oid = models.PositiveIntegerField()
comm_type = GenericForeignKey('comm_type_ct', 'comm_type_oid')
class Meta:
ordering = ['-date']
def __str__(self):
return '{}: {}'.format(self.date, self.subject)
@classmethod
def create_communication(cls, subject, date, from_contacts, to_contacts, comm_type_obj):
"""
Takes a communication type object, creates a communication instance and creates a relation between the two.
:param subject:
:param date:
:param from_contacts:
:param to_contacts:
:param comm_type_obj: PhoneCall, Message, Fax, etc.
:return:
"""
comm = Communication(
subject=subject,
date=date,
)
comm.comm_type = comm_type_obj
comm.save()
if from_contacts:
comm.from_contacts = from_contacts
if to_contacts:
comm.to_contacts = to_contacts
comm.save()
return comm
@classmethod
def create_related_communication(cls, subject, date, from_contacts, to_contacts, comm_type_obj, related_obj):
"""
Takes a communication type object, creates a communication instance and creates a relation between the two.
:param subject:
:param date:
:param from_contacts:
:param to_contacts:
:param comm_type_obj: PhoneCall, Message, Fax, etc.
:param related_obj: eg HER Prj, DEV Prj, etc.
:return:
"""
comm = Communication(
subject=subject,
date=date,
# comm_type=comm_type_obj
)
comm.comm_type = comm_type_obj
if from_contacts:
comm.from_contacts = from_contacts
if to_contacts:
comm.to_contacts = to_contacts
comm.save()
CommunicationRelation(
comm=comm,
related_object=related_obj
).save()
return comm
@classmethod
def get_communications_related_to(cls, related_object):
"""
Takes some object (eg development project instance) and returns communication objects
related to it.
:param related_object:
:return: communication queryset
"""
return Communication.objects.filter(
related_communication__related_object_oid=related_object.id,
related_communication__related_object_ct=ContentType.objects.get_for_model(related_object)
)
def get_absolute_url(self):
"""
:return:the url to the comm_type object not the parent communication object itself.
"""
return self.comm_type.get_absolute_url()
class CommunicationRelation(models.Model):
"""
Relates a communication instance to some other model in the database. The expectation
for now is that the related_object will be a development project or heritage project.
"""
comm = models.ForeignKey(Communication, related_name='related_communication')
related_object_ct = models.ForeignKey(ContentType)
related_object_oid = models.PositiveIntegerField()
related_object = GenericForeignKey('related_object_ct', 'related_object_oid')
def __str__(self):
return "{}: {}: {}".format(self.comm.date, self.comm.comm_type_ct, self.related_object)
class CommunicationAsset(assets.models.SecureAsset):
"""
This is a deprecated model - created prior to generic link assets.
"""
@property
def storage_string(self):
return "communication_assets"
objects = InheritanceManager()
class CommunicationFileRelation(models.Model):
"""
Provides a method for communication type instances to have a x-many
relationship with any (asset) model instance(s). The presumption here is that
the "asset" points to an implementation of the assets.SecureAsset class.
"""
asset_ct = models.ForeignKey(ContentType, related_name='communicationfilerelation_ct')
asset_oid = models.PositiveIntegerField()
asset = GenericForeignKey('asset_ct', 'asset_oid')
# Generic foreign keys to communication types (Fax, PhoneCall, Message, etc.)
comm_type_ct = models.ForeignKey(ContentType)
comm_type_oid = models.PositiveIntegerField()
comm_type = GenericForeignKey('comm_type_ct', 'comm_type_oid')
class CommunicationTypeAbstract(models.Model):
communication = GenericRelation(Communication, content_type_field='comm_type_ct', object_id_field='comm_type_oid')
class Meta:
abstract = True
class MailAccount(models.Model):
protocol_choices = (
('pop3', 'pop3'),
('imap', 'imap'),
('imap-exchange', 'imap-exchange'),
('gmail', 'imap-gmail')
)
email_address = models.EmailField(help_text="Email address for this account. May differ from username.")
username = models.CharField(
max_length=100,
help_text="Username required for login to the mail server.")
password = EncryptedCharField(max_length=50, blank=True, null=True)
server_address = models.CharField(
max_length=300,
verbose_name="Address of the server",
help_text="Address of the mail server. Eg: www.example.com, 192.168.5.1, etc.")
protocol = models.CharField(
max_length=20,
choices=protocol_choices,
default='imap',
help_text="If you use gmail SSL must be enabled."
)
ssl = models.BooleanField(default=False)
def get_folders(self):
"""
Queries server via a temp mailbox for folder names
:return: list of foldernames on the server.
"""
# use a temporary mailbox for it's connection:
m = Mailbox(mail_account=self)
return m.get_mail_folders()
def update_folders(self):
"""
Creates mailboxes for each folder returned from the server.
:return: list of names of created folders
"""
new = []
for folder in self.get_folders():
mbx, created = Mailbox.objects.get_or_create(folder_name=folder, mail_account=self)
if created:
new.append(mbx)
return new
def harvest_mail(self):
comm_utils.harvest_mailboxes(self.mailbox_set.filter(active=True))
def __str__(self):
return '{} - {}'.format(self.username, self.server_address)
class Meta:
permissions = (
("harvest_mail_account", "Can run mailharvest on mail account"),
)
class Mailbox(models.Model):
folder_name = models.CharField(max_length=300,
default='INBOX',
help_text='This is the un-url-quoted folder name')
active = models.BooleanField(
_(u'Active'),
help_text=(_(
"Check this e-mail inbox for new e-mail messages during polling "
"cycles. This checkbox does not have an effect upon whether "
"mail is collected here when this mailbox receives mail from a "
"pipe, and does not affect whether e-mail messages can be "
"dispatched from this mailbox. "
)),
blank=True,
default=False,
)
mail_account = models.ForeignKey(MailAccount)
incoming = models.BooleanField(
default=True,
verbose_name="Is Incoming",
help_text="False if this is an outgoing mailbox (e.g. 'Sent Mail'), True if otherwise.")
# hierarchy_delimiter = models.CharField(
# max_length=1,
# blank=True,
# null=True,
# verbose_name='IMAP folder hierarchy delimiter. Set automatically by the mailaccount when folders (mailboxes) are created.')
objects = models.Manager()
@property
def uri_template(self):
return '{protocol}://{user}:{password}@{server_address}?folder={folder}'
@property
def uri(self):
"""
Most important property of mailbox. Everything derives from this.
:return:
"""
if self.mail_account.ssl:
protocol = self.mail_account.protocol + "+ssl"
else:
protocol = self.mail_account.protocol
password = None
if self.mail_account.password:
password = parse.quote(self.mail_account.password)
return self.uri_template.format(
protocol=protocol,
user=parse.quote(self.mail_account.username),
password=password,
server_address=self.mail_account.server_address,
folder=parse.quote(self.folder_name)
)
@property
def uri_sani_pretty(self):
"""
Same as uri property but with user/pass excluded and things unquoted.
:return:
"""
return self.uri_template.format(
protocol=self.mail_account.protocol,
user="username",
password="password",
server_address=self.mail_account.server_address,
folder=self.folder_name
)
@property
def _protocol_info(self):
return parse.urlparse(self.uri)
@property
def _query_string(self):
return parse.parse_qs(self._protocol_info.query)
@property
def _domain(self):
return self._protocol_info.hostname
@property
def folder(self):
"""Returns the folder to fetch mail from."""
# return parse.quote(self.folder_name)
folder = self._query_string.get('folder', None)[0]
# see BUG: https://bugs.python.org/issue13940
# if there are special characters we should quote them ourselves:
# folder = '"{}"'.format(folder)
return folder
@property
def folder_pretty(self):
# Todo: implement field to store imap folder hierachy delimiter. For now, assume it's a "."
f = self.folder
return f.split('.')[-1]
@property
def name(self):
return '{}__{}'.format(self.mail_account.username, self.folder)
@property
def port(self):
"""Returns the port to use for fetching messages."""
return self._protocol_info.port
@property
def username(self):
"""Returns the username to use for fetching messages."""
return parse.unquote(self._protocol_info.username)
@property
def password(self):
"""Returns the password to use for fetching messages."""
return parse.unquote(self._protocol_info.password)
@property
def from_email(self):
return self.mail_account.email_address
@property
def location(self):
"""Returns the location (domain and path) of messages."""
return self._domain if self._domain else '' + self._protocol_info.path
@property
def type(self):
"""Returns the 'transport' name for this mailbox."""
scheme = self._protocol_info.scheme.lower()
if '+' in scheme:
return scheme.split('+')[0]
return scheme
@property
def use_ssl(self):
"""Returns whether or not this mailbox's connection uses SSL."""
return '+ssl' in self._protocol_info.scheme.lower()
@property
def use_tls(self):
"""Returns whether or not this mailbox's connection uses STARTTLS."""
return '+tls' in self._protocol_info.scheme.lower()
@property
def archive(self):
"""Returns (if specified) the folder to archive messages to."""
archive_folder = self._query_string.get('archive', None)
if not archive_folder:
return None
return archive_folder[0]
def get_connection(self):
"""
Decides on the transport required and initiates the connection.
:return:
"""
# Define method-level variable that connect_to_transport() can reference outside of its own scope.
# I have doubts that this will work when connect_to_transport() is executed in its own process.
transport = None
if not self.uri:
transport = None
elif self.type == 'imap':
transport = HarvestImapTransport(
self.location,
port=self.port if self.port else None,
ssl=self.use_ssl,
tls=self.use_tls,
archive=self.archive,
folder=self.folder
)
elif self.type == 'imap-exchange':
transport = HarvestImapExchangeTransport(
self.location,
port=self.port if self.port else None,
ssl=self.use_ssl,
tls=self.use_tls,
archive=self.archive,
folder=self.folder
)
elif self.type == 'gmail':
mail_account = self.mail_account
credentials = mail_account.gmailcredential.credential
transport = HarvestGmailTransport(
self.location,
port=self.port if self.port else None,
ssl=True,
archive=self.archive,
credentials=credentials,
folder=self.folder
)
elif self.type == 'pop3':
transport = HarvestPop3Transport(
self.location,
port=self.port if self.port else None,
ssl=self.use_ssl
)
else:
logger.error("Error choosing mail transport class for mailbox:", str(self))
transport = None
if transport is None:
logger.error("A valid transport class could not be determined for this mailbox:", str(self))
else:
try:
default_timeout = GeneralSetting.objects.get('communication__mailharvest_get_connection_timeout')
transport.connect(self.username, self.password, default_timeout)
except (transport_exceptions.ConnectionError,
transport_exceptions.LoginError,
transport_exceptions.TimeoutError) as e:
# The transport will have already logged the error. We should give some user feed back here.
# TODO: present to user some error message about failing to connect to the mail server.
return None
return transport
def get_mail_folders(self):
"""
Connect to this transport and fetch imap folders
:param:
:return:
"""
folders = []
connection = self.get_connection()
if not connection:
return folders
else:
folders = connection.list_folders()
return folders
def get_message_ids(self):
"""
Connect to this transport and fetch message Ids
:return:
"""
connection = self.get_connection()
message_ids = []
if not connection:
return message_ids
for thing in connection.list_message_ids():
message_ids.append(thing)
print("message id:", thing)
return message_ids
def get_message_uids(self):
"""
Connect to this transport and fetch message UIds
:return:
"""
connection = self.get_connection()
message_uids = []
if not connection:
return message_uids
for thing in connection.list_message_uids():
message_uids.append(thing)
return message_uids
def harvest_mail(self):
"""Connect to this transport and fetch new messages."""
new_mail = comm_utils.harvest_mailbox(self)
return new_mail
def _get_dehydrated_message(self, msg, record):
"""
Gets the various message pieces of msg (EmailMessage) and stores as Attachments related to record (Message).
Called by Mailbox._process_message()
Calls itself resursively if the message is MULTIPART.
:param msg: EmailMessage object instance
:param record: communications.Message model instance
:return: EmailMessage object (NOT communication.Message model)
"""
settings = comm_utils.get_settings()
new = EmailMessage()
if msg.is_multipart():
for header, value in msg.items():
new[header] = value
for part in msg.get_payload():
new.attach(
self._get_dehydrated_message(part, record)
)
# cedar8 is not using the strip_unnallowed mimetypes setting, no this code is never executed.
elif settings['strip_unallowed_mimetypes'] and not msg.get_content_type() in settings['allowed_mimetypes']:
for header, value in msg.items():
new[header] = value
# Delete header, otherwise when attempting to deserialize the
# payload, it will be expecting a body for this.
del new['Content-Transfer-Encoding']
new[settings['altered_message_header']] = (
'Stripped; Content type %s not allowed' % (
msg.get_content_type()
)
)
new.set_payload('')
elif (
(msg.get_content_type() not in settings['text_stored_mimetypes'])
or
('attachment' in msg.get('Content-Disposition', ''))
):
filename = None
raw_filename = msg.get_filename()
if raw_filename:
filename = comm_utils.convert_header_to_unicode(raw_filename)
if not filename:
extension = mimetypes.guess_extension(msg.get_content_type())
else:
_, extension = os.path.splitext(filename)
if not extension:
extension = '.bin'
filename_safe = uuid.uuid4().hex + extension
if not filename:
filename = filename_safe
# create a blank attachment instance and copy in some email stuff:
attachment = MessageAttachment()
attachment.message = record
for key, value in msg.items():
attachment[key] = value
content_file = ContentFile(six.BytesIO(msg.get_payload(decode=True)).getvalue())
# test the attachment before saving. If it's an image, inline, and small, don't save it.
# If we return "new" without setting the EmailMessage's 'attachment_interpolation_header' then I think we're cool.
# TODO Implement Message Attachment Blacklist HERE.
try:
if attachment['Content-Disposition'].startswith('inline') and attachment['Content-Type'].startswith('image'):
min_img_size_KB = GeneralSetting.objects.get('communication__mailharvest_min_inline_img_size_KB')
if content_file.size/1000 < min_img_size_KB:
logger.warning("SKIP attachment: inline image size {} KB is smaller than min image size setting of {} KB."
.format(content_file.size/1000, min_img_size_KB))
placeholder = EmailMessage()
# return placeholder without the interpolation header:
return placeholder
except KeyError:
# we tried to check a header that wasn't there. that's ok, keep on keepin' on.
logger.warning("checking for attachment headers - we referenced a header that wasn't there. that's probably ok.")
# if we've made it here, continue with saving and relating the attachment.
attachment.save()
'''
create temporary CommunicationAssets. "Temporary" because
the message hasn't yet been parsed for harvest codes.
these will be transferred into related asset classes at that point (if there
are found to be related instances, other will remain CommunicationAssets).
'''
comm_asset = CommunicationAsset()
comm_asset.name = filename
comm_asset.save()
comm_asset.file.save(filename_safe, content_file)
attachment.create_file_relation(comm_asset)
placeholder = EmailMessage()
placeholder[
settings['attachment_interpolation_header']
] = str(attachment.pk)
new = placeholder
else:
content_charset = msg.get_content_charset()
if not content_charset:
content_charset = 'ascii'
try:
# Make sure that the payload can be properly decoded in the
# defined charset, if it can't, let's mash some things
# inside the payload :-\
msg.get_payload(decode=True).decode(content_charset)
except LookupError:
logger.warning(
"Unknown encoding %s; interpreting as ASCII!",
content_charset
)
msg.set_payload(
msg.get_payload(decode=True).decode(
'ascii',
'ignore'
)
)
except ValueError:
logger.warning(
"Decoding error encountered; interpreting %s as ASCII!",
content_charset
)
msg.set_payload(
msg.get_payload(decode=True).decode(
'ascii',
'ignore'
)
)
new = msg
return new
def _process_message(self, message):
"""
This is the guy that does the magic.
:param message:
:return: msg (Message)
"""
msg = Message()
settings = comm_utils.get_settings()
# Get message-id - this is a critical piece, it shouldn't be conditional.
# if 'message-id' in message:
msg.message_id = message['message-id'].strip()
if not msg.message_id:
raise AttributeError("Harvest acquired a message without a message id. Message: {}".format(message))
if Message.objects.filter(mailbox=self, message_id=msg.message_id).exists():
logger.error("Problem. A message was about to be processed that already exists in C8. Message id: {}".format(msg.message_id))
return
# Set owning mailbox:
msg.mailbox = self
# Get message date:
date = parsedate_to_datetime(message.get('date'))
if settings['store_original_message']:
msg.eml.save(
'%s.eml' % uuid.uuid4(),
ContentFile(message.as_string()),
save=False
)
# Get message subject:
if 'subject' in message:
msg.subject = comm_utils.convert_header_to_unicode(message['subject'])[0:255]
else:
msg.subject = '[No subject]'
# Get senders:
if 'from' in message:
msg.from_header = comm_utils.convert_header_to_unicode(message['from'])
from_contacts = crm.models.Person.objects.filter(email__in=msg.from_address)
else:
from_contacts = Person.objects.none()
# Get receivers
if 'to' in message:
msg.to_header = comm_utils.convert_header_to_unicode(message['to'])
to_contacts = crm.models.Person.objects.filter(email__in=msg.to_addresses)
elif 'Delivered-To' in message:
msg.to_header = comm_utils.convert_header_to_unicode(message['Delivered-To'])
to_contacts = crm.models.Person.objects.filter(email__in=msg.to_addresses)
else:
to_contacts = Person.objects.none()
msg.save()
message = self._get_dehydrated_message(message, msg)
msg.set_body(message.as_string())
if message['in-reply-to']:
try:
msg.in_reply_to = Message.objects.filter(
message_id=message['in-reply-to'].strip()
)[0]
except IndexError:
pass
msg.save()
# Process Message for harvest codes:
relations = comm_utils.parse_message_for_entity_relations(
harvest_prefixes=HarvestCodePrefix.objects.all(),
message=msg
)
'''
Use the message/object relations we made in the previous step to set up
the Communication, CommunicationRelation, HarvestPrefixRelations. Also,
if the message is related to something and it has attachments, then those
attachments need to be converted from CommunicationAssets in to whatever
asset class the related object suggests (eg. get_asset_class()
'''
if relations:
# Create the communication object (one message, one comm, but possibly many relations):
comm = Communication.create_communication(
subject=msg.subject,
date=date,
from_contacts=from_contacts,
to_contacts=to_contacts,
comm_type_obj=msg
)
for relation in relations:
# This HMR step should be dropped in favour of the CR below - but I like it, so don't drop it.
HarvestMessageRelation.objects.create(
related_object=relation['related_object'],
message=relation['message'], # This should be the same as "msg" in this method scope.
harvest_code_prefix=relation['harvest_code_prefix'],
harvest_code_full=relation['harvest_code_full']
)
CommunicationRelation(
comm=comm,
related_object=relation['related_object']
).save()
# determine what do to with message attachments' CFRs:
if msg.attachments.count() > 0:
for attachment in msg.attachments.all():
# save a pointer to the original Communication File Relation
attachment_file_relation = attachment.file_relations.first()
try:
new_asset_instance = relation['related_object'].get_asset_class()
except Exception as err:
msg = "Could not get asset class for {} when processing message {}. Error: {}" \
.format(relation['related_object'], relation['message'].id, str(err))
logger.warning(msg=msg)
continue
# transfer attributes to new asset instance:
new_asset_instance.name = attachment_file_relation.asset.name
new_asset_instance.comment = "Harvested from email on {}. Related message id: {}" \
.format(timezone.now(), msg.id)
new_asset_instance.save()
content_file = ContentFile(attachment_file_relation.asset.file.read())
attachment_file_relation.asset.file.close()
new_asset_instance.file.save(attachment_file_relation.asset.file.name, content_file)
attachment.create_file_relation(new_asset_instance)
log_msg = "Created asset from email attachment for: {}, message id: {}, filename: {}" \
.format(relation['related_object'], relation['message'].id, new_asset_instance.name)
logger.info(msg=log_msg)
# Close the asset file
attachment_file_relation.asset.file.close()
# When the asset linking is done (ie after this loop) we need to clean up generic communications assets as they
# are existing on their own, not linked to projects.
else:
'''
If no relations were discovered then wipe the message.
This should only happen for Exchange server messages due to
the face that server-side searches are disabled.
'''
logger.warning("Wipe disabled for message id: {} in mailbox: {}".format(msg.message_id, msg.mailbox))
# msg.wipe()
'''
CLEAN up lingering CommunicationAssets
by this point all linkages between messageattachment assets and related project should be done.
loop over message attachments again and squanch any that are generic CommunicationAssets
'''
for attachment in msg.attachments.all():
for cfr in attachment.file_relations.all():
if isinstance(cfr.asset, CommunicationAsset):
cfr.asset.delete()
cfr.delete()
return msg
def __str__(self):
return self.folder
class Meta:
verbose_name_plural = "Mailboxes"
permissions = (
("harvest_mailbox", "Can run mailharvest on mailbox"),
)
class Message(CommunicationTypeAbstract):
mailbox = models.ForeignKey(
Mailbox,
related_name='messages',
verbose_name=_(u'Mailbox'),
)
# Kept subject in Message model to make parsing for Harvest Codes slightly easier.
# could be dropped if the parse method were refactored.
subject = models.TextField(
_(u'Subject'),
max_length=1000
)
message_id = models.TextField(
_(u'Message ID')
)
in_reply_to = models.ForeignKey(
'communication.Message',
related_name='replies',
blank=True,
null=True,
verbose_name=_(u'In reply to'),
)
from_header = models.CharField(
_('From header'),
max_length=255,
)
to_header = models.TextField(
_(u'To header'),
)
body = models.TextField(
_(u'Body'),
)
encoded = models.BooleanField(
_(u'Encoded'),
default=False,
help_text=_('True if the e-mail body is Base64 encoded'),
)
processed = models.DateTimeField(
_('Processed'),
auto_now_add=True
)
eml = models.FileField(
_(u'Raw message contents'),
blank=True,
null=True,
upload_to="messages",
help_text=_(u'Original full content of message')
)
harvest_code_prefixes = models.ManyToManyField('HarvestCodePrefix', through='HarvestMessageRelation', related_name='harvest_code_prefixes')
objects = models.Manager()
@property
def from_address(self):
"""Returns the address (as a list) from which this message was received
.. note::
This was once (and probably should be) a string rather than a list,
but in a pull request received long, long ago it was changed;
presumably to make the interface identical to that of
`to_addresses`.
"""
if self.from_header:
return [parseaddr(self.from_header)[1].lower()]
else:
return []
@property
def to_addresses(self):
"""Returns a list of addresses to which this message was sent."""
addresses = []
for address in self.to_header.split(','):
if address:
addresses.append(
parseaddr(
address
)[1].lower()
)
return addresses
# def reply(self, message):
# """Sends a message as a reply to this message instance.
#
# Although Django's e-mail processing will set both Message-ID
# and Date upon generating the e-mail message, we will not be able
# to retrieve that information through normal channels, so we must
# pre-set it.
#
# """
# if not message.from_email:
# if self.mailbox.from_email:
# message.from_email = self.mailbox.from_email
# else:
# message.from_email = django_settings.DEFAULT_FROM_EMAIL
# message.extra_headers['Message-ID'] = make_msgid()
# message.extra_headers['Date'] = formatdate()
# message.extra_headers['In-Reply-To'] = self.message_id.strip()
# message.send()
# return self.mailbox.record_outgoing_message(
# email.message_from_string(
# message.message().as_string()
# )
# )
@property
def text(self):
"""
Returns the message body matching content type 'text/plain'.
"""
return comm_utils.get_body_from_message(
self.get_email_object(), 'text', 'plain'
).replace('=\n', '').strip()
@property
def html(self):
"""
Returns the message body matching content type 'text/html'.
"""
return comm_utils.get_body_from_message(
self.get_email_object(), 'text', 'html'
).replace('\n', '').strip()
def _rehydrate(self, msg):
new = EmailMessage()
settings = comm_utils.get_settings()
if msg.is_multipart():
for header, value in msg.items():
new[header] = value
for part in msg.get_payload():
new.attach(
self._rehydrate(part)
)
elif settings['attachment_interpolation_header'] in msg.keys():
try:
attachment = MessageAttachment.objects.get(
pk=msg[settings['attachment_interpolation_header']]
)
for header, value in attachment.items():
new[header] = value
encoding = new['Content-Transfer-Encoding']
if encoding and encoding.lower() == 'quoted-printable':
# Cannot use `email.encoders.encode_quopri due to
# bug 14360: http://bugs.python.org/issue14360
output = six.BytesIO()
# att_file = attachment.file
# file_contents = att_file.read()
# att_file.close()
# I don't want it to do this very often, and who needs the attachments here anyways?
file_contents = ContentFile("File contents not read for optimization").read()
encode_quopri(
six.BytesIO(file_contents),
output,
quotetabs=True,
header=False,
)
new.set_payload(
output.getvalue().decode().replace(' ', '=20')
)
del new['Content-Transfer-Encoding']
new['Content-Transfer-Encoding'] = 'quoted-printable'
else:
# att_file = attachment.file # put in a var so the .file property doesn't loop so much.
# file_contents = att_file.read()
# att_file.close()
file_contents = ContentFile("File contents not read due for optimization").read()
new.set_payload(file_contents)
del new['Content-Transfer-Encoding']
encode_base64(new)
except MessageAttachment.DoesNotExist:
new[settings['altered_message_header']] = (
'Missing; Attachment %s not found' % (
msg[settings['attachment_interpolation_header']]
)
)
new.set_payload('')
else:
for header, value in msg.items():
new[header] = value
new.set_payload(
msg.get_payload()
)
return new
def get_body(self):
"""Returns the `body` field of this record.
This will automatically base64-decode the message contents
if they are encoded as such.
"""
if self.encoded:
return base64.b64decode(self.body.encode('ascii'))
return self.body.encode('utf-8')
def set_body(self, body):
"""Set the `body` field of this record.
This will automatically base64-encode the message contents to
circumvent a limitation in earlier versions of Django in which
no fields existed for storing arbitrary bytes.
"""
if six.PY3:
body = body.encode('utf-8')
self.encoded = True
self.body = base64.b64encode(body).decode('ascii')
def get_email_object(self):
"""Returns an `email.message.Message` instance representing the
contents of this message and all attachments.
See [email.Message.Message]_ for more information as to what methods
and properties are available on `email.message.Message` instances.
.. note::
Depending upon the storage methods in use (specifically --
whether ``DJANGO_MAILBOX_STORE_ORIGINAL_MESSAGE`` is set
to ``True``, this may either create a "rehydrated" message
using stored attachments, or read the message contents stored
on-disk.
.. [email.Message.Message]: Python's `email.message.Message` docs
(https://docs.python.org/2/library/email.message.html)
"""
if self.eml:
self.eml.open()
body = self.eml.file.read()
else:
body = self.get_body()
if six.PY3:
flat = email.message_from_bytes(body)
else:
flat = email.message_from_string(body)
return self._rehydrate(flat)
def wipe(self):
"""
Deletes all fields and attachments associated with this message. Leaves message_id intact
Allows us to keep a record of message-ids that have been downloaded without tracking every
email that has ever been sent.
:return:
"""
for attachment in self.attachments.all():
# This attachment is attached only to this message.
attachment.delete()
wiped_text = 'wiped'
self.subject = wiped_text
self.in_reply_to = None
self.from_header = wiped_text
self.to_header = wiped_text
self.body = wiped_text
self.eml.delete()
# for field in self._meta.get_fields():
self.save()
def delete(self, *args, **kwargs):
"""Delete this message and all stored attachments."""
for attachment in self.attachments.all():
# This attachment is attached only to this message.
attachment.delete()
return super(Message, self).delete(*args, **kwargs)
def get_absolute_url(self):
return reverse('communication:message-detail', args=[str(self.id)])
def __str__(self):
return self.subject
class MessageAttachment(models.Model):
message = models.ForeignKey(
Message,
related_name='attachments',
null=True,
blank=True,
verbose_name=_('Message'),
)
headers = models.TextField(
_(u'Headers'),
null=True,
blank=True,
)
# files = models.Many(CommunicationFileRelation)
file_relations = GenericRelation(CommunicationFileRelation, content_type_field='comm_type_ct', object_id_field='comm_type_oid')
def _get_rehydrated_headers(self):
headers = self.headers
if headers is None:
return EmailMessage()
if sys.version_info < (3, 0):
try:
headers = headers.encode('utf-8')
except UnicodeDecodeError as e:
# headers = unicode(headers, 'utf-8').encode('utf-8')
logger.error("Unicode error at MessageAttachment._get_rehydrated_headers: {}".format(str(e)))
return email.message_from_string(headers)
def _set_dehydrated_headers(self, email_object):
self.headers = email_object.as_string()
def __delitem__(self, name):
rehydrated = self._get_rehydrated_headers()
del rehydrated[name]
self._set_dehydrated_headers(rehydrated)
def __setitem__(self, name, value):
rehydrated = self._get_rehydrated_headers()
rehydrated[name] = value
self._set_dehydrated_headers(rehydrated)
def get_filename(self):
"""
:return: original filename of this attachment.
"""
file_name = self._get_rehydrated_headers().get_filename()
if isinstance(file_name, six.string_types):
result = comm_utils.convert_header_to_unicode(file_name)
if result is None:
return file_name
return result
else:
return None
def items(self):
return self._get_rehydrated_headers().items()
def __getitem__(self, name):
value = self._get_rehydrated_headers()[name]
if value is None:
raise KeyError('Header %s does not exist' % name)
return value
def create_file_relation(self, asset):
"""
Takes an instantiated asset class, creates a file relation instance for it, and
saves a pointer to the file relation.
:param asset: an instantiated asset class
:return: communication file relation instance
"""
cfr = CommunicationFileRelation(asset=asset, comm_type=self)
cfr.save()
return cfr
def create_file_relations(self, assets):
"""
Takes a list of instantiated asset classes, creates file relation instances for them, and
saves a pointer to the file relations set.
:param assets: an instantiated asset class
:return: communication file relation queryset
"""
cfr_ids = []
for asset in assets:
cfr = self.create_file_relation(asset)
cfr_ids.append(cfr.id)
return CommunicationFileRelation.objects.filter(comm_type=self.message)
@property
def file(self):
"""
Provides a short-cut for other mailbox code accessing the related asset's file contents (message hydration)
This should only be used by other mailbox code that handles reading file CONTENTS. Do not use this for writing!
Note if an asset is deleted, say on a Dev't project, then a CFR instance may not point to anything.
:return: file contents
"""
if self.asset is None:
return ContentFile("File {} not found.".format(self.get_filename()))
else:
return self.asset.file
# except Exception as err:
# msg = "Failed while trying to return file contents for message attachment id: {}: {}".format(self.id, str(err))
# logger.warning(msg=msg)
@property
def asset(self):
"""
Return the FIRST related ASSET that doesn't throw an exception.
:return: asset instance
"""
# for cfr in CommunicationFileRelation.objects.filter(comm_type=self.message):
for cfr in self.file_relations.all():
try:
# test if there is actually a file attached:
cfr.asset.file.name
return cfr.asset
except Exception as err:
msg = "Failed while trying to return the asset instance for message attachment id: {}: {}".format(self.id, str(err))
logger.warning(msg=msg)
# If we've gotten here it because we've tried to get an asset attached to this MessageAttachment but
# there are none.
return None
# raise MessageAttachment.DoesNotExist
def delete(self, using=None):
"""
Clean up file relation before deleting.
:param using:
:return:
"""
try:
for cfr in self.file_relations.all():
# Delete the asset:
try:
cfr.asset.delete()
except AttributeError as e:
msg = "MessageAttachment - self delete - tried to delete an asset in a CFR that was None." \
" Most likely the file was already deleted manually by a user."
logger.warning(msg)
# Delete the communication File Relation record:
cfr.delete()
except django.db.utils.ProgrammingError as err:
logger.error("Error in MessageAttachment.delete() method for id: {}. Error: \'{}\"".format(self.id, str(err)))
super(MessageAttachment, self).delete(using=using)
def __str__(self):
return str(self.get_filename())
class HarvestCodePrefix(models.Model):
prefix = models.CharField(
max_length=30,
verbose_name="Harvest Code Prefix")
active = models.BooleanField(default=True)
# Removed - too much to manage for now but could be useful in the future.
# mailboxes = models.ManyToManyField(Mailbox, verbose_name="Mailboxes that will be checked for this prefix.")
content_type = models.ForeignKey(ContentType, verbose_name="Model type that the prefix belongs to.")
objects = ActiveObjectManager()
admin_objects = models.Manager()
class Meta:
verbose_name_plural = 'Harvest code prefixes'
def __str__(self):
return self.prefix
class HarvestMessageRelation(models.Model):
"""
Mailbox._process_message() calls the comm_utils.parse method which creates these relations,
but it will be more efficient if only Comm. objects are created directly without te HMRs in the middle.
"""
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
related_object = GenericForeignKey('content_type', 'object_id')
message = models.ForeignKey(Message, related_name='related_message')
harvest_code_prefix = models.ForeignKey(HarvestCodePrefix)
harvest_code_full = models.CharField(max_length=100)
def __str__(self):
return "{}:{}".format(self.harvest_code_full, self.content_type)
class HarvestHistory(models.Model):
mailbox = models.ForeignKey(Mailbox)
last_harvest = models.DateTimeField(default=timezone.now)
message = models.TextField(blank=True, null=True)
class Meta:
ordering = ['-last_harvest']
verbose_name_plural = 'harvest histories'
class PhoneCall(CommunicationTypeAbstract):
from_number = PhoneNumberField(blank=True, null=True)
to_number = PhoneNumberField(blank=True, null=True)
duration = models.FloatField(
blank=True,
null=True,
verbose_name='Duration of phone call in minutes.',
help_text='The duration of the phone call in decimal minutes. 1 minute, 30 seconds should be entered as "1.5"')
notes = models.TextField(blank=True, null=True)
def __str__(self):
"""
Try to get the from/to numbers first. Failing those, get numbers from contacts.
:return:
"""
from_num = self.from_number or ""
to_num = self.to_number or ""
if self.from_number is None:
if self.communication.first() is not None:
for contact in self.communication.first().from_contacts.all():
if contact.phone:
from_num = contact.phone
break
if self.to_number is None:
if self.communication.first() is not None:
for contact in self.communication.first().to_contacts.all():
if contact.phone:
to_num = contact.phone
break
return "from: {}, to: {}".format(from_num, to_num)
def get_absolute_url(self):
return reverse('communication:phonecall-detail', kwargs={'pk': self.id})
class Fax(CommunicationTypeAbstract):
from_number = PhoneNumberField(blank=True, null=True)
to_number = PhoneNumberField(blank=True, null=True)
# document = models.ForeignKey(CommunicationAsset, blank=True, null=True)
document = models.ForeignKey(CommunicationFileRelation, blank=True, null=True)
# document = models.OneToOneField('CommunicationFileRelation', through='CommunicationFileRelation', related_name='asset')
# harvest_code_prefixes = models.ManyToManyField('HarvestCodePrefix', through='HarvestMessageRelation', related_name='harvest_code_prefixes')
# document_ct = models.ForeignKey(ContentType)
# document_oid = models.PositiveIntegerField()
# document = GenericForeignKey('document_ct', 'document_oid')
notes = models.TextField(blank=True, null=True)
class Meta:
verbose_name_plural = 'faxes'
def __str__(self):
"""
Try to get the from/to numbers first. Failing those, get numbers from contacts.
:return:
"""
from_num = self.from_number or ""
to_num = self.to_number or ""
if self.from_number is None:
if self.communication.first() is not None:
for contact in self.communication.first().from_contacts.all():
if contact.phone:
from_num = contact.phone
break
if self.to_number is None:
if self.communication.first() is not None:
for contact in self.communication.first().to_contacts.all():
if contact.phone:
to_num = contact.phone
break
return "from: {}, to: {}".format(from_num, to_num)
def get_absolute_url(self):
return reverse('communication:fax-detail', args=[str(self.id)])
class Letter(CommunicationTypeAbstract):
document = models.ForeignKey(CommunicationFileRelation, blank=True, null=True)
notes = models.TextField(blank=True, null=True)
def __str__(self):
return "letter. {}".format(self.document.asset.file.name)
def get_absolute_url(self):
return reverse('communication:letter-detail', args=[str(self.id)])
# Google GMail OAuth2 helper models
class GmailCredential(models.Model):
mail_account = models.OneToOneField(MailAccount)
credential = CredentialsField()
def __str__(self):
return "credential for {}".format(self.mail_account)
|
import pandas as pd
import numpy as np
import matplotlib as plt
import math
import random
import decimal
from sympy import symbols, diff
wine = pd.read_csv("winequality-red.csv",delimiter=",")
def get_slope():
slope = []
for i in range (0,11):
slope.append(float(decimal.Decimal(random.randrange(-30, 30))/100))
return slope
def pre_qual(intercept,slope):
predict_qual = []
for j in range(0,1599):
predict_qual.append(intercept + np.sum(np.multiply(slope,wine.iloc[j,:-1])))
return predict_qual
def observed_qual():
observe = []
for i in range(0,1599):
observe.append(wine.iloc[:,11][i])
return observe
def get_mse(observe,predict):
residual = np.subtract(predict,observe)
square = np.multiply(residual,residual)
sum = np.sum(square)
return sum/1600
# def get_partials(observe,predict):
# p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, pi = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
# for i in range(0, 1599):
# partial_xi =
# pi = pi + partial_xi
# partial_x1 = np.subtract(observe,predict)*wine.iloc[i,:-1][0]
# p1 = p1 +
def get_partials(slope,intercept,observe):
p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11,pi=0,0,0,0,0,0,0,0,0,0,0,0
for i in range (0, 1599):
partial_xi = ((intercept + np.sum(np.insert(list(np.multiply(slope,wine.iloc[i,:-1])),0,intercept)))-observe[i])/1600
pi = pi+partial_xi
for i in range(0, 1599):
partial_x1 = (((intercept + np.sum(
np.insert(list(np.multiply(slope, wine.iloc[i, :-1])), 0, intercept))) - observe[i]) \
* wine.iloc[i, :-1][0])/1600
p1 = p1 + partial_x1
for i in range(0, 1599):
partial_x2 = (((intercept + np.sum(
np.insert(list(np.multiply(slope, wine.iloc[i, :-1])), 0, intercept))) - observe[i]) \
* wine.iloc[i, :-1][1])/1600
p2 = p2 + partial_x2
for i in range(0, 1599):
partial_x3 = (((intercept + np.sum(
np.insert(list(np.multiply(slope, wine.iloc[i, :-1])), 0, intercept))) - observe[i]) \
* wine.iloc[i, :-1][2])/1600
p3 = p3 + partial_x3
for i in range(0, 1599):
partial_x4 = (((intercept + np.sum(
np.insert(list(np.multiply(slope, wine.iloc[i, :-1])), 0, intercept))) - observe[i]) \
* wine.iloc[i, :-1][3])/1600
p4 = p4 + partial_x4
for i in range(0, 1599):
partial_x5 = (((intercept + np.sum(
np.insert(list(np.multiply(slope, wine.iloc[i, :-1])), 0, intercept))) - observe[i]) \
* wine.iloc[i, :-1][4])/1600
p5 = p5 + partial_x5
for i in range(0, 1599):
partial_x6 = (((intercept + np.sum(
np.insert(list(np.multiply(slope, wine.iloc[i, :-1])), 0, intercept))) - observe[i]) \
* wine.iloc[i, :-1][5])/1600
p6 = p6 + partial_x6
for i in range(0, 1599):
partial_x7 = (((intercept + np.sum(
np.insert(list(np.multiply(slope, wine.iloc[i, :-1])), 0, intercept))) - observe[i]) \
* wine.iloc[i, :-1][6])/1600
p7 = p7 + partial_x7
for i in range(0, 1599):
partial_x8 = (((intercept + np.sum(
np.insert(list(np.multiply(slope, wine.iloc[i, :-1])), 0, intercept))) - observe[i]) \
* wine.iloc[i, :-1][7])/1600
p8 = p8 + partial_x8
for i in range(0, 1599):
partial_x9 = (((intercept + np.sum(
np.insert(list(np.multiply(slope, wine.iloc[i, :-1])), 0, intercept))) - observe[i]) \
* wine.iloc[i, :-1][8])/1600
p9 = p9 + partial_x9
for i in range(0, 1599):
partial_x10 = (((intercept + np.sum(
np.insert(list(np.multiply(slope, wine.iloc[i, :-1])), 0, intercept))) - observe[i]) \
* wine.iloc[i, :-1][9])/1600
p10 = p10 + partial_x10
for i in range(0, 1599):
partial_x11 = (((intercept + np.sum(
np.insert(list(np.multiply(slope, wine.iloc[i, :-1])), 0, intercept))) - observe[i]) \
* wine.iloc[i, :-1][10])/1600
p11 = p11 + partial_x11
partials = [pi,p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11]
return partials
def update_step(partials,intercept,slope):
learning_rate = 0.01
lr = []
for i in range (0,12):
lr.append(learning_rate)
step_size = np.multiply(lr,partials)
# print(step_size)
newintercept = intercept - step_size[0]
newslope = np.subtract(slope,step_size[1:])
return newintercept,newslope
def gradient_decent(intercept,slope,observe, predict):
for i in range (0,1000):
partials = get_partials(slope,intercept,observe)
mse = get_mse(observe,predict)
intercept,slope = update_step(partials,intercept,slope)
print(intercept)
print(slope)
print("This is iteration: ", i, "The mse is ", mse)
predict = pre_qual(intercept,slope)
def main():
intercept = 1.0
slope = get_slope()
predict = pre_qual(1,slope)
# print(predict)
observe = observed_qual()
# print(slope)
# partials = get_partials(slope, intercept, observe)
# print(partials)
# newintercept,newslope = update_step(partials,intercept,slope)
# print(newintercept,newslope)
gradient_decent(intercept,slope,observe,predict)
if __name__ == '__main__':
main() |
<filename>web/tests.py
import datetime
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from django.urls import reverse
from django_dynamic_fixture import G
from accounts.models import Child, DemographicData, User
from studies.models import Lab, Study, StudyType
class ParticipantAccountViewsTestCase(TestCase):
def setUp(self):
self.participant_email = "<EMAIL>"
self.valid_password = "<PASSWORD>"
self.too_short_password = "<PASSWORD>"
# Participant Setup
self.participant = G(
User,
username=self.participant_email,
is_active=True,
is_researcher=False,
nickname="Participant family",
)
self.participant.set_password(self.valid_password)
self.participant.save()
self.demographic_orig = G(
DemographicData,
user=self.participant,
languages_spoken_at_home="French",
additional_comments="Original comments",
previous=None,
number_of_children="",
number_of_guardians_explanation="",
number_of_guardians="2",
number_of_books=75,
former_lookit_annual_income="",
lookit_referrer="Google",
)
self.demographic_current = G(
DemographicData,
user=self.participant,
languages_spoken_at_home="Spanish",
previous=self.demographic_orig,
number_of_children="",
number_of_guardians_explanation="",
number_of_guardians="2",
number_of_books=75,
former_lookit_annual_income="",
lookit_referrer="Google",
)
self.child = G(
Child,
user=self.participant,
existing_conditions=Child.existing_conditions.dyslexia,
languages_spoken=Child.languages_spoken.en,
birthday=datetime.date.today() - datetime.timedelta(days=10 * 365),
)
# Site fixture enabling login
self.fake_site = G(Site, id=1)
# FlatPage fixture enabling login redirect to work.
self.home_page = G(FlatPage, url="/")
self.home_page.sites.add(self.fake_site)
self.home_page.save()
def test_participant_signup_flow(self):
response = self.client.post(
reverse("web:participant-signup"),
{
"username": "<EMAIL>",
"password1": <PASSWORD>,
"password2": <PASSWORD>,
"nickname": "Testfamily",
},
follow=True,
)
# We're redirected successfully to demographic data update
self.assertEqual(
response.redirect_chain, [(reverse("web:demographic-data-update"), 302)]
)
self.assertEqual(response.status_code, 200)
# And are a logged-in user with the expected attributes for new participant
user = response.wsgi_request.user
self.assertFalse(user.is_anonymous)
self.assertTrue(user.is_authenticated)
self.assertFalse(user.is_researcher)
self.assertTrue(user.is_active)
self.assertEqual(user.nickname, "Testfamily")
self.assertEqual(user.username, "<EMAIL>")
self.assertFalse(user.labs.exists())
self.assertFalse(user.user_permissions.exists())
self.assertFalse(user.groups.exists())
self.assertFalse(user.children.exists())
self.assertFalse(user.demographics.exists())
def test_participant_password_requirements(self):
response = self.client.post(
reverse("web:participant-signup"),
{
"username": "<EMAIL>",
"password1": <PASSWORD>,
"password2": <PASSWORD>,
"nickname": "Testfamily",
},
follow=True,
)
# There are form errors...
self.assertNotEqual(len(response.context["form"].errors), 0)
self.assertIn("This password is too short.", response.content.decode("utf-8"))
# We stayed on the same page...
self.assertEqual(response.redirect_chain, [])
self.assertEqual(response.status_code, 200)
# And user isn't logged in
self.assertTrue(response.wsgi_request.user.is_anonymous)
self.assertFalse(response.wsgi_request.user.is_authenticated)
def test_participant_signup_mismatched_passwords(self):
response = self.client.post(
reverse("web:participant-signup"),
{
"username": "<EMAIL>",
"password1": <PASSWORD>,
"password2": <PASSWORD> + "q",
"nickname": "Testfamily",
},
follow=True,
)
# There are form errors...
self.assertIn("password2", response.context["form"].errors)
self.assertIn(
"The two password fields didn’t match.",
response.context["form"].errors["password2"],
)
# We stayed on the same page
self.assertEqual(response.redirect_chain, [])
self.assertEqual(response.status_code, 200)
# And user isn't logged in
self.assertTrue(response.wsgi_request.user.is_anonymous)
self.assertFalse(response.wsgi_request.user.is_authenticated)
def test_participant_signup_existing_user(self):
response = self.client.post(
reverse("web:participant-signup"),
{
"username": "<EMAIL>",
"password1": <PASSWORD>,
"password2": <PASSWORD>,
"nickname": "Testfamily",
},
follow=True,
)
# There are form errors...
self.assertIn("username", response.context["form"].errors)
self.assertIn(
"User with this Email address already exists.",
response.context["form"].errors["username"],
)
# We stayed on the same page
self.assertEqual(response.redirect_chain, [])
self.assertEqual(response.status_code, 200)
# And user isn't logged in
self.assertTrue(response.wsgi_request.user.is_anonymous)
self.assertFalse(response.wsgi_request.user.is_authenticated)
def test_participant_signup_invalid_email(self):
response = self.client.post(
reverse("web:participant-signup"),
{
"username": "participantmit.edu",
"password1": <PASSWORD>,
"password2": <PASSWORD>,
"nickname": "Testfamily",
},
follow=True,
)
# There are form errors...
self.assertIn("username", response.context["form"].errors)
self.assertIn(
"Enter a valid email address.", response.context["form"].errors["username"]
)
# We stayed on the same page
self.assertEqual(response.redirect_chain, [])
self.assertEqual(response.status_code, 200)
# And user isn't logged in
self.assertTrue(response.wsgi_request.user.is_anonymous)
self.assertFalse(response.wsgi_request.user.is_authenticated)
def test_participant_login_required_views_unauthenticated(self):
login_required_views = [
"web:demographic-data-update",
"web:children-list",
"web:email-preferences",
"web:studies-history",
"accounts:manage-account", # TODO: move to accounts tests
]
for url_name in login_required_views:
response = self.client.get(reverse(url_name), follow=True,)
# Redirected to login view with next set if unauthenticated
self.assertEqual(
response.redirect_chain,
[(f"{reverse('login')}?next={reverse(url_name)}", 302)],
f"Unauthenticated user not redirected to login from {url_name}",
)
self.assertEqual(response.status_code, 200)
def test_demographic_data_update_authenticated(self):
self.client.force_login(self.participant)
# Get the form to fill out; check that initial data includes values only
# from more recent demographic data
response = self.client.get(reverse("web:demographic-data-update"))
self.assertEqual(response.status_code, 200)
form = response.context["form"]
data = form.initial
self.assertEqual(data["languages_spoken_at_home"], "Spanish")
self.assertNotEqual(data["additional_comments"], "Original comments")
# Update data and save
data["languages_spoken_at_home"] = "Swahili"
cleaned_data = {key: val for (key, val) in data.items() if val is not None}
response = self.client.post(
reverse("web:demographic-data-update"), cleaned_data, follow=True
)
self.assertEqual(
response.redirect_chain, [(reverse("web:studies-list"), 302)],
)
self.assertEqual(response.status_code, 200)
# Make sure we can retrieve updated data
response = self.client.get(reverse("web:demographic-data-update"))
self.assertEqual(response.status_code, 200)
self.assertIn("Swahili", response.content.decode("utf-8")) # or
self.assertEqual(
response.context["form"].initial["languages_spoken_at_home"], "Swahili"
)
# Check we've created an additional demographicdata object for this
self.assertEqual(self.participant.demographics.count(), 3)
# TODO: ParticipantUpdateView
# - check can update password (participant, researcher)
# - check can update email but only to unused (otherwise reloads, no update), can update nickname
# - check can only get own data
# - check can disable/enable 2fa
# TODO: ChildrenListView
# - check all own children are there, can only see own children
# - check can add child
# - check if invalid data sent, reloads page & does not create child
# TODO: ChildUpdateView
# - check can get but only for own child, check can change name, check cannot change DOB
# TODO: ParticipantEmailPreferencesView
# - check can get but only for own, check can un-check one preference & save
class ParticipantStudyViewsTestCase(TestCase):
def setUp(self):
self.participant_email = "<EMAIL>"
self.valid_password = "<PASSWORD>"
self.too_short_password = "<PASSWORD>"
# Participant Setup
self.participant = G(
User,
username=self.participant_email,
is_active=True,
is_researcher=False,
nickname="Participant family",
)
self.participant.set_password(self.valid_password)
self.participant.save()
self.demographic_orig = G(
DemographicData,
user=self.participant,
languages_spoken_at_home="French",
additional_comments="Original comments",
previous=None,
number_of_children="",
number_of_guardians_explanation="",
number_of_guardians="2",
number_of_books=75,
former_lookit_annual_income="",
lookit_referrer="Google",
)
self.child = G(
Child,
user=self.participant,
existing_conditions=Child.existing_conditions.dyslexia,
languages_spoken=Child.languages_spoken.en,
birthday=datetime.date.today() - datetime.timedelta(days=10 * 365),
)
self.researcher = G(
User, is_active=True, is_researcher=True, given_name="Researcher"
)
self.lab = G(Lab, name="MIT")
self.study_type = G(StudyType, name="default", id=1)
small_gif = (
b"\x47\x49\x46\x38\x39\x61\x01\x00\x01\x00\x00\x00\x00\x21\xf9\x04"
b"\x01\x0a\x00\x01\x00\x2c\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02"
b"\x02\x4c\x01\x00\x3b"
)
self.thumbnail = SimpleUploadedFile(
name="small.gif", content=small_gif, content_type="image/gif"
)
self.public_active_study_1 = G(
Study,
creator=self.researcher,
shared_preview=False,
study_type=self.study_type,
name="PublicActiveStudy1",
lab=self.lab,
public=True,
image=self.thumbnail,
)
# Separately set state because it's set to "created" initially
self.public_active_study_1.save()
self.public_active_study_1.state = "active"
self.public_active_study_1.save()
self.public_active_study_2 = G(
Study,
creator=self.researcher,
shared_preview=False,
study_type=self.study_type,
name="PublicActiveStudy2",
lab=self.lab,
public=True,
image=self.thumbnail,
)
# Separately set state because it's set to "created" initially
self.public_active_study_2.save()
self.public_active_study_2.state = "active"
self.public_active_study_2.save()
self.private_active_study = G(
Study,
creator=self.researcher,
shared_preview=False,
study_type=self.study_type,
name="PrivateActiveStudy",
lab=self.lab,
public=False,
image=self.thumbnail,
)
# Separately set state because it's set to "created" initially
self.private_active_study.save()
self.private_active_study.state = "active"
self.private_active_study.save()
self.public_inactive_study = G(
Study,
creator=self.researcher,
shared_preview=False,
study_type=self.study_type,
name="PublicInactiveStudy",
lab=self.lab,
public=True,
image=self.thumbnail,
)
# Separately set state because it's set to "created" initially
self.public_inactive_study.save()
self.public_inactive_study.state = "submitted"
self.public_inactive_study.save()
def test_study_list_view(self):
response = self.client.get(reverse("web:studies-list"))
self.assertEqual(response.status_code, 200)
content = response.content.decode("utf-8")
# Make sure we see the two public, active studies, but not an inactive or private study
self.assertIn("PublicActiveStudy1", content)
self.assertIn("PublicActiveStudy2", content)
self.assertNotIn("PrivateActiveStudy", content)
self.assertNotIn("PublicInactiveStudy", content)
self.assertTrue(
response.context["study_list"]
.filter(uuid=self.public_active_study_1.uuid)
.exists()
)
self.assertTrue(
response.context["study_list"]
.filter(uuid=self.public_active_study_2.uuid)
.exists()
)
self.assertFalse(
response.context["study_list"]
.filter(uuid=self.private_active_study.uuid)
.exists()
)
self.assertFalse(
response.context["study_list"]
.filter(uuid=self.public_inactive_study.uuid)
.exists()
)
# TODO: StudyDetailView
# - check can see for public or private active study, unauthenticated or authenticated
# - check context[children] has own children
# TODO: StudiesHistoryView
# - check can see several sessions where consent frame was completed (but consent not marked), not for someone else's
# child, not for consent frame incomplete.
# TODO: ExperimentAssetsProxyView
# - check have to be authenticated, maybe that's it for now?
# TODO: ExperimentProxyView
# - check have to be authenticated, has to be own child
|
<reponame>jlangdev/falconpy
"""
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
overwatch_dashboard - Falcon Overwatch Dashboard API Interface Class
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
from ._util import force_default, handle_single_argument, process_service_request
from ._service_class import ServiceClass
from ._endpoint._overwatch_dashboard import _overwatch_dashboard_endpoints as Endpoints
class OverwatchDashboard(ServiceClass):
"""
The only requirement to instantiate an instance of this class
is a valid token provided by the Falcon API SDK OAuth2 class, a
existing instance of the authentication class as an object or a
valid set of credentials.
"""
@force_default(defaults=["parameters"], default_types=["dict"])
def aggregates_detections_global_counts(self: object, *args, parameters: dict = None, **kwargs) -> dict:
"""
Get the total number of detections pushed across all customers
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#
# /Overwatch%20Dashboard/AggregatesDetectionsGlobalCounts
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="AggregatesDetectionsGlobalCounts",
keywords=kwargs,
params=handle_single_argument(args, parameters, "filter")
)
def aggregates_events_collections(self: object, body: list) -> dict:
"""
Get OverWatch detection event collection info by providing an aggregate query
"""
# [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#
# /Overwatch%20Dashboard/AggregatesEventsCollections
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="AggregatesEventsCollections",
body=body
)
def aggregates_events(self: object, body: list) -> dict:
"""
Get aggregate OverWatch detection event info by providing an aggregate query
"""
# [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/Overwatch%20Dashboard/AggregatesEvents
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="AggregatesEvents",
body=body
)
@force_default(defaults=["parameters"], default_types=["dict"])
def aggregates_incidents_global_counts(self: object, *args, parameters: dict = None, **kwargs) -> dict:
"""
Get the total number of incidents pushed across all customers.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#
# /Overwatch%20Dashboard/AggregatesIncidentsGlobalCounts
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="AggregatesIncidentsGlobalCounts",
keywords=kwargs,
params=handle_single_argument(args, parameters, "filter")
)
@force_default(defaults=["parameters"], default_types=["dict"])
def aggregates_events_global_counts(self: object, *args, parameters: dict = None, **kwargs) -> dict:
"""
Get the total number of incidents pushed across all customers.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#
# /Overwatch%20Dashboard/AggregatesOWEventsGlobalCounts
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="AggregatesOWEventsGlobalCounts",
keywords=kwargs,
params=handle_single_argument(args, parameters, "filter")
)
# These method names align to the operation IDs in the API but
# do not conform to snake_case / PEP8 and are defined here for
# backwards compatibility / ease of use purposes
AggregatesDetectionsGlobalCounts = aggregates_detections_global_counts
AggregatesEventsCollections = aggregates_events_collections
AggregatesEvents = aggregates_events
AggregatesIncidentsGlobalCounts = aggregates_incidents_global_counts
AggregatesOWEventsGlobalCounts = aggregates_events_global_counts
# The legacy name for this class does not conform to PascalCase / PEP8
# It is defined here for backwards compatibility purposes only.
Overwatch_Dashboard = OverwatchDashboard # pylint: disable=C0103
|
<reponame>MingheCao/pysot
# Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from pysot.core.config import cfg
from pysot.tracker.siamrpn_tracker import SiamRPNTracker
import torch
from tools.rubost_track import rubost_track
from tools.rubost_track import imageSimilarity_deeprank
import cv2
class SiamRPNRBTracker(SiamRPNTracker):
def __init__(self, model):
super(SiamRPNRBTracker, self).__init__(model)
self.longterm_state = False
self.frame_num = 0
self.state_update = True
self.visualize = True
self.visualize_gmm = False
self.save_img= False
self.CONFIDENCE_LOW = 0.985
self.instance_sizes = {x: cfg.TRACK.INSTANCE_SIZE +60 * x for x in range(10)}
self.state_update = False
self.state_cross = False
self.state_lost = False
self.state_lost_cnt=0
self.center_std_thre=2.3
self.similar_thre = 1.2
self.cross_count = 0
self.cross_count2=0
self.img_similar=imageSimilarity_deeprank.imageSimilarity_deeprank()
def init(self, img, bbox):
"""
args:
img(np.ndarray): BGR image
bbox: (x, y, w, h) bbox
"""
z_crop = self.get_z_crop(img, bbox)
self.model.template(z_crop)
self.zf_gt = self.model.zf
self.zf_global = self.model.zf
self.zf_bbox_global=self.img_similar.get_feature(
self.crop_bbox(img,bbox))
def get_subwindow_init(self, im, pos, model_sz, original_sz, s_z, avg_chans):
if isinstance(pos, float):
pos = [pos, pos]
sz = original_sz
im_sz = im.shape
c = (original_sz + 1) / 2
# context_xmin = round(pos[0] - c) # py2 and py3 round
context_xmin = np.floor(pos[0] - c + 0.5)
context_xmax = context_xmin + sz - 1
# context_ymin = round(pos[1] - c)
context_ymin = np.floor(pos[1] - c + 0.5)
context_ymax = context_ymin + sz - 1
left_pad = int(max(0., -context_xmin))
top_pad = int(max(0., -context_ymin))
right_pad = int(max(0., context_xmax - im_sz[1] + 1))
bottom_pad = int(max(0., context_ymax - im_sz[0] + 1))
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
r, c, k = im.shape
if any([top_pad, bottom_pad, left_pad, right_pad]):
size = (r + top_pad + bottom_pad, c + left_pad + right_pad, k)
te_im = np.zeros(size, np.uint8)
te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im
if top_pad:
te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans
if bottom_pad:
te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans
if left_pad:
te_im[:, 0:left_pad, :] = avg_chans
if right_pad:
te_im[:, c + left_pad:, :] = avg_chans
im_patch = te_im[int(context_ymin):int(context_ymax + 1),
int(context_xmin):int(context_xmax + 1), :]
else:
im_patch = im[int(context_ymin):int(context_ymax + 1),
int(context_xmin):int(context_xmax + 1), :]
# patch
im_patch[0:int((original_sz - s_z) / 2), :, :] = avg_chans
im_patch[:, 0:int((original_sz - s_z) / 2), :] = avg_chans
im_patch[int((original_sz + s_z) / 2):original_sz, :, :] = avg_chans
im_patch[:, int((original_sz + s_z) / 2):original_sz, :] = avg_chans
#
if not np.array_equal(model_sz, original_sz):
im_patch = cv2.resize(im_patch, (model_sz, model_sz))
im_patch = im_patch.transpose(2, 0, 1)
im_patch = im_patch[np.newaxis, :, :, :]
im_patch = im_patch.astype(np.float32)
im_patch = torch.from_numpy(im_patch)
if cfg.CUDA:
im_patch = im_patch.cuda()
return im_patch
def get_z_crop(self, img, bbox):
self.center_pos = np.array([bbox[0] + (bbox[2] - 1) / 2,
bbox[1] + (bbox[3] - 1) / 2])
self.size = np.array([bbox[2], bbox[3]])
# calculate z crop size
w_z = self.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
h_z = self.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
s_z = round(np.sqrt(w_z * h_z))
# calculate channle average
self.channel_average = np.mean(img, axis=(0, 1))
# get crop
z_crop = self.get_subwindow(img, self.center_pos,
cfg.TRACK.EXEMPLAR_SIZE,
s_z, self.channel_average)
return z_crop
def template(self, img, bbox):
z_crop = self.get_z_crop(img, bbox)
return self.model.template_rb(z_crop)
def template_raw(self, img, bbox):
z_crop = self.get_z_crop(img, bbox)
return self.model.template_rb_raw(z_crop)
def img_tensor2cpu(self,img):
return img.permute(2, 3, 1, 0).squeeze().cpu().detach().numpy().astype(np.uint8)
def bbox_cwh2xywh(self,bbox):
return [bbox[0] - bbox[2]/2, bbox[1] - bbox[3]/2,bbox[2],bbox[3]]
@torch.no_grad()
def template_upate(self, zf, weight):
if weight > 1 or weight < 0:
raise ValueError("weight must between 0 and 1.")
for idx in range(len(zf)):
self.zf_global[idx] = (1 - weight) * self.zf_global[idx] + weight * zf[idx]
self.model.zf = self.zf_global
def sample_scoremap(self, score_map):
num_sample = 2000
X = rubost_track.scoremap_sample_reject(score_map, num_sample)
state_sampling = False if len(X) <= int(num_sample / 100) else True
return X, state_sampling
def get_seg_gmm(self, gmm, labels):
label = np.unique(labels)
seg_gmm = []
wt_sum = []
for i, lb in enumerate(label):
idx = np.where(labels == lb)
mu = gmm.means_[idx]
weight = gmm.weights_[idx]
cov = gmm.covariances_[idx]
seg_gmm.append((idx, weight, mu, cov))
wt_sum.append(weight.sum())
wt_sum = np.array(wt_sum)
index = np.argsort(wt_sum)[::-1] # sort decent order
seg_gmm_sort = []
for idx in index:
seg_gmm_sort.append(seg_gmm[idx])
return seg_gmm_sort
def cal_gms_meancov(self, X, gmm, seg_gmm):
Y_ = gmm.predict(X)
meancov = []
point_set = []
for idx, wt, mu, cov in seg_gmm:
wt_sum = wt.sum()
if wt_sum <= 0.1:
continue
points = np.empty((0, 2), float)
for lb in idx[0]:
points = np.vstack((points, X[Y_ == lb, :]))
mean = points.mean(axis=0)
if not len(points) ==1:
cov = np.cov(points.T)
v, w = np.linalg.eigh(cov)
std = np.sqrt(v[1])
else:
cov = np.zeros((2,2))
std= 0
meancov.append((mean, cov, std))
point_set.append(points)
# import matplotlib.pyplot as plt
# plt.figure()
# plt.scatter(points[:,0],points[:,1])
# plt.plot(mean[0], mean[1], 'X', color='b')
# plt.xlim(-12.5, 12.5)
# plt.ylim(-12.5, 12.5)
return meancov, point_set
def get_respond_idx(self, score_map, point_set):
score_size = score_map.shape[0]
max_rep_idx = []
for i in range(len(point_set)):
if i > 2:
break
points = point_set[i] + score_size / 2
points = np.round(points).astype(np.int32)
z = score_map[points[:, 1], points[:, 0]]
pt = points[z.argmax(), :]
max_rep_idx.append(np.array([pt[1], pt[0]]))
return max_rep_idx
def segment_groups(self, score_map):
X, self.state_sampling = self.sample_scoremap(score_map)
if not self.state_sampling:
return 0
gmm = rubost_track.gmm_fit(X, 6)
labels = rubost_track.ChineseWhispers_gm(gmm, threhold=5, n_iter=5)
self.state_ngroups = len(np.unique(labels))
if self.state_ngroups <= 0:
raise ValueError("Groups must greater than 1.")
seg_gmm = self.get_seg_gmm(gmm, labels)
meancov, point_set = self.cal_gms_meancov(X, gmm, seg_gmm)
repond_idx = self.get_respond_idx(score_map, point_set)
minstd=float('-inf')
for _, _, std in meancov[:2]:
if std>minstd:
self.center_std=std
self.state_std = True if self.center_std < self.center_std_thre else False
if self.visualize_gmm:
center_label = labels[0]
rubost_track.plot_results_cw(X, gmm.predict(X), seg_gmm, meancov, '1,2,2', str(self.frame_num))
return repond_idx
def find_pbbox(self,score_nms,pred_bbox_nms,score_size,scale_z):
penalty = self.calc_penalty(pred_bbox_nms, scale_z)
pscore = self.penalize_score(score_nms, penalty, score_size, True)
best_idx = np.argmax(pscore)
best_score = score_nms[best_idx]
bbox = pred_bbox_nms[:, best_idx] / scale_z
lr = penalty[best_idx] * score_nms[best_idx] * cfg.TRACK.LR
cx = bbox[0] + self.center_pos[0]
cy = bbox[1] + self.center_pos[1]
width = self.size[0] * (1 - lr) + bbox[2] * lr
height = self.size[1] * (1 - lr) + bbox[3] * lr
pbbox = [cx, cy, width, height]
return penalty,pbbox
def find_proposal_bbox(self, penalty, score_nms,pred_bbox_nms, repond_idx, score_size, scale_z):
proposal_bbox = []
if repond_idx !=0: # sampling_state == True
for cord in repond_idx:
idx = np.ravel_multi_index(cord, (score_size, score_size))
bb = pred_bbox_nms[:, idx] / scale_z
lr = penalty[idx] * score_nms[idx] * cfg.TRACK.LR
cx = bb[0] + self.center_pos[0]
cy = bb[1] + self.center_pos[1]
width = self.size[0] * (1 - lr) + bb[2] * lr
height = self.size[1] * (1 - lr) + bb[3] * lr
proposal_bbox.append([cx , cy , width, height])
return proposal_bbox
def merge_bbox(self,img,pbbox,proposal_bbox):
def to_lurd(box):
return [box[0] - box[2] / 2,box[1] - box[3] / 2,
box[0] + box[2] / 2,box[1] + box[3] / 2]
filtered_bbox=[]
for bb in proposal_bbox:
iou_score=rubost_track.cal_iou(to_lurd(pbbox),to_lurd(bb))
# print('iou: %f' %(iou_score))
if iou_score <=0.76:
cx, cy, width, height = self._bbox_clip(bb[0], bb[1], bb[2],
bb[3], img.shape[:2])
filtered_bbox.append([cx - width/2,cy - height/2,width,height])
return filtered_bbox
def score_nms(self,outputs,score_size,anchors):
score = self._convert_score(outputs['cls'])
pred_bbox = self._convert_bbox(outputs['loc'], anchors)
best_score = score[np.argmax(score)]
index = np.argmax(score.reshape(5, -1), axis=0)
score_nms = score.reshape(5, -1)[index, np.arange(score_size * score_size)]
pred_bbox_nms = pred_bbox.reshape(4, 5, -1)[:, index, np.arange(score_size * score_size)]
return score_nms,pred_bbox_nms,best_score
def calc_penalty(self, pred_bbox, scale_z):
def change(r):
return np.maximum(r, 1. / r)
def sz(w, h):
pad = (w + h) * 0.5
return np.sqrt((w + pad) * (h + pad))
# scale penalty
s_c = change(sz(pred_bbox[2, :], pred_bbox[3, :]) /
(sz(self.size[0] * scale_z, self.size[1] * scale_z)))
# ratio penalty
r_c = change((self.size[0] / self.size[1]) /
(pred_bbox[2, :] / pred_bbox[3, :]))
penalty = np.exp(-(r_c * s_c - 1) * cfg.TRACK.PENALTY_K)
return penalty
def penalize_score(self, score, penalty, score_size, update_state):
pscore = penalty * score
hanning = np.hanning(score_size)
window = np.outer(hanning, hanning)
window=window.flatten()
# window = np.tile(window.flatten(), self.anchor_num)
# window
pscore = pscore * (1 - cfg.TRACK.WINDOW_INFLUENCE) + \
window * cfg.TRACK.WINDOW_INFLUENCE
return pscore
def get_similar_state(self,img,pbbox,filtered_ppbbox):
hist_score1, hist_score2 = self.similar_compare_deep(img, pbbox, filtered_ppbbox)
hist_score = hist_score1 / np.array(hist_score2)
print('----------')
print(hist_score1)
print(hist_score2)
print('----------')
# print(hist_score)
idx = np.argmax(hist_score)
bbox = filtered_ppbbox[idx]
similar_state= False if any(hist_score > self.similar_thre) else True
return similar_state,bbox
def track(self, img):
"""
args:
img(np.ndarray): BGR image
return:
bbox(list):[x, y, width, height]
"""
instance_size = self.instance_sizes[0]
w_z = self.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
h_z = self.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
s_z = np.sqrt(w_z * h_z)
scale_z = cfg.TRACK.EXEMPLAR_SIZE / s_z
score_size = (instance_size - cfg.TRACK.EXEMPLAR_SIZE) // \
cfg.ANCHOR.STRIDE + 1 + cfg.TRACK.BASE_SIZE
anchors = self.generate_anchor(score_size)
s_x = s_z * (instance_size / cfg.TRACK.EXEMPLAR_SIZE)
x_crop = self.get_subwindow(img, self.center_pos, instance_size,
round(s_x), self.channel_average)
outputs = self.model.track(x_crop)
score_nms,pred_bbox_nms,best_score=self.score_nms(outputs,score_size,anchors)
score_map = score_nms.reshape(score_size, score_size)
penalty, pbbox = self.find_pbbox(score_nms,pred_bbox_nms,score_size,scale_z)
repond_idx = self.segment_groups(score_map)
if self.visualize:
frame = rubost_track.visualize_tracking_heated(self.img_tensor2cpu(x_crop),score_map, instance_size,self.frame_num, best_score)
if best_score <= self.CONFIDENCE_LOW or self.state_sampling == False:
self.state_update = False
self.state_lost = True
self.state_lost_cnt+=1
proposal_bbox = self.find_proposal_bbox(penalty, score_nms,pred_bbox_nms, repond_idx, score_size, scale_z)
filtered_ppbbox = self.merge_bbox(img, pbbox, proposal_bbox)
if self.state_lost:
self.state_lost=False
return {'bbox': self.bbox,'s_x': s_x,'score_map': score_map}
if self.state_update:
self.template_upate(self.zf_gt, 0.0626)
if self.state_cross:
self.cross_count+=1
if self.cross_count2 >=6:
self.state_cross=False
self.cross_count2=0
if self.cross_count < 3 :
self.center_pos = np.array([pbbox[0], pbbox[1]])
self.size = np.array([pbbox[2], pbbox[3]])
cx, cy, width, height = self._bbox_clip(pbbox[0], pbbox[1], pbbox[2],
pbbox[3], img.shape[:2])
bbox = [cx - width / 2,
cy - height / 2,
width,
height]
self.bbox=bbox
return {'bbox': self.bbox, 's_x': s_x, 'score_map': score_map, 'proposal_bbox': filtered_ppbbox}
if len(filtered_ppbbox):
similar_state,bbox = self.get_similar_state(img, self.bbox_cwh2xywh(pbbox), filtered_ppbbox)
if not similar_state:
self.center_pos = np.array([bbox[0], bbox[1]])
self.size = np.array([bbox[2], bbox[3]])
self.state_cross=False
else:
self.center_pos = np.array([pbbox[0], pbbox[1]])
self.size = np.array([pbbox[2], pbbox[3]])
cx, cy, width, height = self._bbox_clip(pbbox[0], pbbox[1], pbbox[2],
pbbox[3], img.shape[:2])
bbox = [cx - width / 2,
cy - height / 2,
width,
height]
self.bbox=bbox
self.state_cross = False
self.cross_count = 0
else:
self.center_pos = np.array([pbbox[0], pbbox[1]])
self.size = np.array([pbbox[2], pbbox[3]])
cx, cy, width, height = self._bbox_clip(pbbox[0], pbbox[1], pbbox[2],
pbbox[3], img.shape[:2])
bbox = [cx - width / 2,
cy - height / 2,
width,
height]
self.bbox = bbox
self.cross_count2+=1
return {'bbox': self.bbox, 's_x': s_x, 'score_map': score_map, 'proposal_bbox': filtered_ppbbox}
self.center_pos = np.array([pbbox[0], pbbox[1]])
self.size = np.array([pbbox[2], pbbox[3]])
cx, cy, width, height = self._bbox_clip(pbbox[0], pbbox[1], pbbox[2],
pbbox[3], img.shape[:2])
self.bbox = [cx - width / 2,
cy - height / 2,
width,
height]
if self.state_std:
if not len(filtered_ppbbox):
self.state_update = True
else:
self.state_update = False
self.state_cross=True
if self.state_update:
self.template_upate(self.template(img, self.bbox), 0.0626)
# self.zf_bbox_global=self.feature_mix(self.zf_bbox_global,
# self.img_similar.get_feature(self.crop_bbox(img,self.bbox)),
# 0.0626)
if self.visualize:
rubost_track.put_text_update_std(frame,self.center_std,self.state_update)
return {'bbox': self.bbox,'s_x': s_x,'score_map': score_map,'proposal_bbox': filtered_ppbbox}
def similar_compare_deep(self,img,bbox,filtered_ppbbox):
score1 = self.img_similar.similarity_score(self.zf_bbox_global,
self.img_similar.get_feature(
self.crop_bbox(img,bbox)))
if self.visualize:
cv2.imshow('1', self.crop_bbox(img, bbox))
if self.save_img:
cv2.imwrite('/home/rislab/Workspace/pysot/rb_result/stds/' + str(self.frame_num) + '_1.jpg', self.crop_bbox(img, bbox))
cv2.waitKey(1)
score2 = []
for idx,fbbox in enumerate(filtered_ppbbox):
fbbox=np.array(fbbox)
fbbox[np.where(fbbox < 0 )]=0
score = self.img_similar.similarity_score(self.zf_bbox_global,
self.img_similar.get_feature(
self.crop_bbox(img, fbbox)))
score2.append(score)
if self.visualize:
cv2.imshow('2', self.crop_bbox(img, fbbox))
if self.save_img:
cv2.imwrite('/home/rislab/Workspace/pysot/rb_result/stds/' + str(self.frame_num) + '_'+str(idx)+'_2.jpg',
self.crop_bbox(img, fbbox))
cv2.waitKey(1)
return score1,score2
def crop_bbox(self,img,bbox):
bbox=np.array(bbox)
bbox[np.where(bbox < 0)] = 0
return img[int(bbox[1]):int(bbox[1] + bbox[3]),
int(bbox[0]):int(bbox[0] + bbox[2]), :]
def feature_mix(self,feature1,feature2,wt):
return feature1*(1-wt) + feature2*wt |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 24 14:44:33 2022
@author: aknur
"""
import openseespy.opensees as ops
import pandas as pd
import csv
import os
import numpy as np
import random
import math
from functions import *
import column
import time
start_time = time.time()
pd.options.display.max_columns = None
pd.options.display.max_rows = None
# Create an instance of class Column
parameters = column.Column()
# Loading the dataset
df = pd.read_csv("nn_combo1.csv")
#df = df[1:]
# Adding columns to store gridlines
df['w_g']=0
df['d_g']=0
# Directory to store the output response files
directory ='.'
eps1U = -0.002 # strain at maximum strength of unconfined concrete from Eurocode
eps2U = -0.0035 # strain at ultimate stress from Eurocode
fy = parameters.fy/parameters.g_s # Yield stress
# Lists to track the checks
Mz_check = []
My_check = []
As_final = []
As_1 = []
As_2 = []
P_max = []
Spacing = []
numBar = []
# Iterating over the test case prediction designs
for index, row in df.iterrows():
print(index)
# Reading the predictions, put the dataset columns in the following order
colHeight,fc, P, My_red, Mz_red,colWidth, colDepth,As_total, w_g, d_g = row
fc = fc/parameters.g_c # Concrete design strength
# Rounding down the predicted geometry dimensions
colWidth = 0.05*math.floor(colWidth/0.05)
colDepth = 0.05*math.floor(colDepth/0.05)
P = -P
count = 0
passed_check = 0
while passed_check==0:
#passed_check=1
# If rounding down make the design fail, round up once
if count==1:
#colWidth = colWidth + 0.05
colDepth = colDepth + 0.05
print(colWidth, colDepth)
count = 2
count = 1
# Iterate over all rebar diameters starting from largest one
# since it is more practical from engineering perspective
for barDiam in parameters.d_rebars:
barDiam = int(1000*barDiam)/1000
As_bar = (math.pi)*(barDiam*barDiam)/4
A_core = colWidth*colDepth
As_min = 0.002*A_core
As_max = 0.04*A_core
# Check the steel area requirements
if As_total>=As_min and As_total<As_max:
As_1.append(index)
numBarsSec = math.ceil(As_total/As_bar)
# Check for the number of reinforcements
if numBarsSec>= 4:
numBar.append(index)
# Some variables derived from the parameters
coverY = colDepth/2.0 # The distance from the section z-axis to the edge of the cover concrete -- outer edge of cover concrete
coverZ = colWidth/2.0 # The distance from the section y-axis to the edge of the cover concrete -- outer edge of cover concrete
coreY = coverY - parameters.cover - barDiam/2 # The distance from the section z-axis to the edge of the core concrete -- edge of the core concrete/inner edge of cover concrete
coreZ = coverZ - parameters.cover - barDiam/2 # The distance from the section y-axis to the edge of the core concrete -- edge of the core concrete/inner edge of cover concrete
dist1 = coverY - parameters.cover/2
dist2 = coverZ - parameters.cover/2
# Scheme with parametrisation using no reinforcement area
listDivisorPairs = returnDivisorsPair(numBarsSec)
if (len(listDivisorPairs) == 1):
listDivisorPairs = returnDivisorsPair(numBarsSec-1)
list_w_g = [*range(2, math.ceil(numBarsSec/2)+1, 1)]
for w_g in list_w_g:
d_g = math.ceil(numBarsSec/2)-w_g+2
w_h = (colWidth-2*barDiam-2.5*parameters.cover)/colWidth
d_h = (colDepth-2*barDiam-2.5*parameters.cover)/colDepth
rebarZ = np.linspace(-coreZ, coreZ, w_g)
rebarY = np.linspace(-coreY, coreY, d_g)
spacingZ = (2*coreZ)/(w_g-1)
spacingY = (2*coreY)/(d_g-1)
# Checking for minimal spacing requirement
spacing_min=max(2*barDiam, barDiam+0.032+0.005, barDiam+0.020) #[m]
if (spacingZ > spacing_min or spacingY > spacing_min):
Spacing.append(index)
ops.wipe()
# Define model builder
ops.model('basic', '-ndm', 3, '-ndf', 6)
# Cover concrete (unconfined)
ops.uniaxialMaterial('Concrete01', parameters.IDcon, fc, eps1U, fc, eps2U)
# Reinforcement material matTag, Fy, E0, b)
ops.uniaxialMaterial('Steel01', parameters.IDreinf, fy, parameters.Es, parameters.Bs)
# Construct concrete fibers
ops.section('Fiber', parameters.SecTag, '-GJ', 1.0e6)
ops.patch('quadr', parameters.IDcon, parameters.num_fib, parameters.num_fib, -coreY, coreZ, -coreY, -coreZ, coreY, -coreZ, coreY, coreZ)
ops.patch('quadr', parameters.IDcon, 1, parameters.num_fib, -coverY, coverZ, -coreY, coreZ, coreY, coreZ, coverY, coverZ)
ops.patch('quadr', parameters.IDcon, 1, parameters.num_fib, -coreY, -coreZ, -coverY, -coverZ, coverY, -coverZ, coreY, -coreZ)
ops.patch('quadr', parameters.IDcon, parameters.num_fib, 1, -coverY, coverZ, -coverY, -coverZ, -coreY, -coreZ, -coreY, coreZ)
ops.patch('quadr', parameters.IDcon, parameters.num_fib, 1, coreY, coreZ, coreY, -coreZ, coverY, -coverZ, coverY, coverZ)
# Inserting rebars
hollowY = d_h*coverY
hollowZ = w_h*coverZ
rebars_YZ = np.empty((0,2))
for ii, Y in enumerate(rebarY):
for jj, Z in enumerate(rebarZ):
if (abs(Y) < hollowY and abs(Z) < hollowZ):
continue
rebars_YZ = np.vstack([rebars_YZ, [Y,Z]])
for ii in range(len(rebars_YZ)):
ops.fiber(*rebars_YZ[ii], As_bar, parameters.IDreinf)
# Check for number of rebars in final configuration
numTotRebars = len(rebars_YZ)
if (numTotRebars>4 or numTotRebars*As_bar>As_min) :
As_2.append(index)
eps = fy/parameters.Es # Steel yield strain
# Yield curvature
# d -- from cover to rebar
d_z = colDepth-parameters.cover-barDiam/2
Kz = eps/(0.7*d_z)
d_y = colWidth-parameters.cover-barDiam/2
Ky = eps/(0.7*d_y)
# Set axial load
As = As_bar * numTotRebars
Ac = colWidth*colDepth - As
Pmax = -parameters.alpha_coef*(parameters.nu_coef*(-fc)*Ac + fy*As)
Pmax = Pmax + parameters.unit_weight*colHeight*colDepth*colWidth
# Record the bar diameter
df.at[index, 'D_rebar'] = barDiam
numRebars = w_g*2+(d_g-2)*2
# Record number of rebars
df.at[index, 'numRebars'] = numRebars
# Check for finas steel area
if -0.1*Pmax/fy < As or As>0.002*A_core:
# Record final steel area check pass
As_final.append(index)
# Check for axial load capacity
if Pmax<P:
# Record the axial load check pass
P_max.append(index)
strain1 = directory + 'strain1_.txt'
strain2 = directory + 'strain2_.txt'
strain3 = directory + 'strain3_.txt'
strain4 = directory + 'strain4_.txt'
strains = [strain1, strain2, strain3, strain4]
# Call the section analysis procedure
MomentCurvature(parameters, P, Kz, -1, 5, strains, dist1, dist2)
indeces = []
if os.path.getsize(strain1)>0:
strain1 = pd.read_csv(strain1, sep = ' ', header = None, )
filtered1 = strain1[strain1[2]>= -0.0035]
if len(filtered1)> 1:
indeces.append(list(filtered1.index)[-1])
if os.path.getsize(strain2)>0:
strain2 = pd.read_csv(strain2, sep = ' ', header = None, )
filtered2 = strain2[strain2[2]>= -0.0035]
if len(filtered2)> 1:
indeces.append(list(filtered2.index)[-1])
if os.path.getsize(strain3)>0:
strain3 = pd.read_csv(strain3, sep = ' ', header = None, )
filtered3 = strain3[strain3[2]>= -0.0035]
if len(filtered3)> 1:
indeces.append(list(filtered3.index)[-1])
if os.path.getsize(strain4)>0:
strain4 = pd.read_csv(strain4, sep = ' ', header = None, )
filtered4 = strain4[strain4[2]>= -0.0035]
if len(filtered4)> 1:
indeces.append(list(filtered4.index)[-1])
if len(indeces)>=1:
Moment_ult = min(indeces)
My_max = strain1.loc[Moment_ult, [0]]
My_max = My_max[0]
if My_max> My_red:
# Record Mz pass check
My_check.append(index)
strain21 = directory + 'strain21_.txt'
strain22 = directory + 'strain22_.txt'
strain23 = directory + 'strain23_.txt'
strain24 = directory + 'strain24_.txt'
strains2 = [strain21, strain22, strain23, strain24]
# Call the section analysis procedure
MomentCurvature(parameters, P, Ky, My_red, 6, strains2, dist1, dist2)
indeces = []
if os.path.getsize(strain21)>0:
strain1 = pd.read_csv(strain21, sep = ' ', header = None)
filtered1 = strain1[strain1[2]>= -0.0035]
if len(filtered1)> 1:
indeces.append(list(filtered1.index)[-1])
if os.path.getsize(strain22)>0:
strain2 = pd.read_csv(strain22, sep = ' ', header = None)
filtered2 = strain2[strain2[2]>= -0.0035]
if len(filtered2)> 1:
indeces.append(list(filtered2.index)[-1])
if os.path.getsize(strain23)>0:
strain3 = pd.read_csv(strain23, sep = ' ', header = None)
filtered3 = strain3[strain3[2]>= -0.0035]
if len(filtered3)> 1:
indeces.append(list(filtered3.index)[-1])
if os.path.getsize(strain24)>0:
strain4 = pd.read_csv(strain24, sep = ' ', header = None)
filtered4 = strain4[strain4[2]>= -0.0035]
if len(filtered4)> 1:
indeces.append(list(filtered4.index)[-1])
if len(indeces)>=1:
Moment_ult = min(indeces)
Mz_max = strain1.loc[Moment_ult, [0]]
Mz_max = Mz_max[0]
# Check for Mz capacity
if Mz_max> Mz_red:
Mz_check.append(index)
#Record final design configuration
df.at[index, 'Width'] = colWidth
df.at[index, 'Depth'] = colDepth
df.at[index, 'w_g'] = w_g
df.at[index, 'd_g'] = d_g
numRebars = w_g*2+(d_g-2)*2
df.at[index, 'numRebars'] = numRebars
df.at[index, 'As_total'] = As
print("#################################")
passed_check = 1
break
else:
continue
break
print( len(set(As_1)), len(set(As_2)), len(set(As_final)), len(set(numBar)), len(set(P_max)), len(set(My_check)), len(set(Mz_check)))
# Compute the cost of the final designs
#df['fc'] = (-1)*df['fc']
fcs = np.asarray([-50.0, -45.0, -40.0, -35.0, -30.0, -25.0])
def price_concrete(row):
# Source for prices: https://jcbetons.lv/cenas-en/?lang=en
# + 21% VAT
if row['fc'] == fcs[0]:
return 232 # 95 EUR/m3 - assumed value
if row['fc'] == fcs[1]:
return 254 # 90 EUR/m3 - assumed value
if row['fc'] == fcs[2]:
return 275 # 85 EUR/m3 - assumed value
if row['fc'] == fcs[3]:
return 294
if row['fc'] == fcs[4]:
return 311
if row['fc'] == fcs[5]:
return 328
return -1
# Prices in EUR/m3
df['price_s'] = 1.38*7850
df['price_c'] = df.apply(lambda row: price_concrete(row), axis=1)
df['price'] = df['colHeight']*((df['Width']*df['Depth'] - df['As_total'])*df['price_c'] + df['As_total']*df['price_s'])
print("--- %s seconds ---" % (time.time() - start_time))
|
<reponame>prateek-77/rcan-it
import math
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from ._utils import conv3x3, conv1x1, get_activation
class ResidualBase(nn.Module):
def __init__(self, stochastic_depth: bool = False,
prob: float = 1.0, multFlag: bool = True) -> None:
super().__init__()
self.sd = stochastic_depth
if stochastic_depth:
self.prob = prob
self.multFlag = multFlag
def forward(self, x: torch.Tensor) -> torch.Tensor:
identity = x.clone()
return self._forward_train(x, identity) if self.training \
else self._forward_test(x, identity)
def _forward_train(self, x, identity) -> torch.Tensor:
if not self.sd: # no stochastic depth
res = self._forward_res(x)
return identity + res
if torch.rand(1) < self.prob: # no skip
for param in self.parameters():
param.requires_grad = True
res = self._forward_res(x)
return identity + res
# This block is skipped during training
for param in self.parameters():
param.requires_grad = False
return identity
def _forward_test(self, x, identity) -> torch.Tensor:
res = self._forward_res(x)
if self.sd and self.multFlag:
res *= self.prob
return identity + res
def _forward_res(self, _) -> torch.Tensor:
# Residual forward function should be
# defined in child classes.
raise NotImplementedError
class PreActBasicBlock(ResidualBase):
def __init__(self, planes: int, stochastic_depth: bool = False,
act_mode: str = 'relu', prob: float = 1.0, multFlag: bool = True,
zero_inti_residual: bool = False, affine_init_w: float = 0.1,
**_) -> None:
super().__init__(stochastic_depth, prob, multFlag)
self.aff1 = Affine2d(planes, affine_init_w)
self.conv1 = conv3x3(planes, planes)
self.aff2 = Affine2d(planes, affine_init_w)
self.conv2 = conv3x3(planes, planes)
self.act = get_activation(act_mode)
if zero_inti_residual:
nn.init.constant_(self.aff2.weight, 0.0)
def _forward_res(self, x: torch.Tensor) -> torch.Tensor:
x = self.aff1(x)
x = self.act(x)
x = self.conv1(x)
x = self.aff2(x)
x = self.act(x)
x = self.conv2(x)
return x
class PreActBasicBlockDW(ResidualBase):
def __init__(self, planes: int, stochastic_depth: bool = False,
act_mode: str = 'relu', prob: float = 1.0, multFlag: bool = True,
zero_inti_residual: bool = False, affine_init_w: float = 0.1,
reduction: int = 8) -> None:
super().__init__(stochastic_depth, prob, multFlag)
self.aff1 = Affine2d(planes, affine_init_w)
self.conv1 = conv3x3(planes, planes, groups=planes)
self.se1 = SEBlock(planes, reduction, act_mode)
self.aff2 = Affine2d(planes, affine_init_w)
self.conv2 = conv3x3(planes, planes, groups=planes)
self.se2 = SEBlock(planes, reduction, act_mode)
self.act = get_activation(act_mode)
if zero_inti_residual:
nn.init.constant_(self.aff2.weight, 0.0)
def _forward_res(self, x: torch.Tensor) -> torch.Tensor:
x = self.aff1(x)
x = self.act(x)
x = self.conv1(x)
x = self.se1(x)
x = self.aff2(x)
x = self.act(x)
x = self.conv2(x)
x = self.se2(x)
return x
class PreActBottleneck(ResidualBase):
def __init__(self, planes: int, stochastic_depth: bool = False,
act_mode: str = 'relu', prob: float = 1.0, multFlag: bool = True,
zero_inti_residual: bool = False, affine_init_w: float = 0.1,
**_) -> None:
super().__init__(stochastic_depth, prob, multFlag)
self.aff1 = Affine2d(planes, affine_init_w)
self.conv1 = conv1x1(planes, planes)
self.aff2 = Affine2d(planes, affine_init_w)
self.conv2 = conv3x3(planes, planes)
self.aff3 = Affine2d(planes, affine_init_w)
self.conv3 = conv1x1(planes, planes)
self.act = get_activation(act_mode)
if zero_inti_residual:
nn.init.constant_(self.aff3.weight, 0.0)
def _forward_res(self, x: torch.Tensor) -> torch.Tensor:
x = self.aff1(x)
x = self.act(x)
x = self.conv1(x)
x = self.aff2(x)
x = self.act(x)
x = self.conv2(x)
x = self.aff3(x)
x = self.act(x)
x = self.conv3(x)
return x
class MBConvBlock(ResidualBase):
def __init__(self, planes: int, stochastic_depth: bool = False, act_mode: str = 'relu',
prob: float = 1.0, multFlag: bool = True, reduction: int = 8,
zero_inti_residual: bool = False, affine_init_w: float = 0.1) -> None:
super().__init__(stochastic_depth, prob, multFlag)
self.conv1 = conv1x1(planes, planes)
self.aff1 = Affine2d(planes, affine_init_w)
self.conv2 = conv3x3(planes, planes, groups=planes) # depth-wise
self.aff2 = Affine2d(planes, affine_init_w)
self.se = SEBlock(planes, reduction, act_mode)
self.conv3 = conv1x1(planes, planes)
self.aff3 = Affine2d(planes, affine_init_w)
self.act = get_activation(act_mode)
if zero_inti_residual:
nn.init.constant_(self.aff3.weight, 0.0)
def _forward_res(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv1(x)
x = self.aff1(x)
x = self.act(x)
x = self.conv2(x)
x = self.aff2(x)
x = self.act(x)
x = self.se(x)
x = self.conv3(x)
x = self.aff3(x) # no activation
return x
class EDSRBlock(ResidualBase):
def __init__(self, planes: int, bias: bool = True, act_mode: str = 'relu',
res_scale: float = 0.1, res_scale_learnable: bool = False,
stochastic_depth: bool = False, prob: float = 1.0, multFlag: bool = True, **_):
super().__init__(stochastic_depth, prob, multFlag)
if res_scale_learnable:
self.res_scale = Parameter(torch.ones(1))
nn.init.constant_(self.res_scale, res_scale)
else:
self.res_scale = res_scale
self.body = nn.Sequential(
conv3x3(planes, planes, bias=bias),
get_activation(act_mode),
conv3x3(planes, planes, bias=bias))
def _forward_res(self, x: torch.Tensor) -> torch.Tensor:
x = self.body(x).mul(self.res_scale)
return x
class RCANBlock(ResidualBase):
def __init__(self, planes: int, bias: bool = True, act_mode: str = 'relu',
res_scale: float = 0.1, reduction: int = 16, res_scale_learnable: bool = False,
stochastic_depth: bool = False, prob: float = 1.0, multFlag: bool = True,
normal_init_std: Optional[float] = None, **_):
super().__init__(stochastic_depth, prob, multFlag)
if res_scale_learnable:
self.res_scale = Parameter(torch.ones(1))
nn.init.constant_(self.res_scale, res_scale)
else:
self.res_scale = res_scale
self.body = nn.Sequential(
conv3x3(planes, planes, bias=bias),
get_activation(act_mode),
conv3x3(planes, planes, bias=bias),
SEBlock(planes, reduction, act_mode))
# normal initialization
if normal_init_std is not None:
for idx in [0, 2]:
nn.init.normal_(self.body[idx].weight, 0.0, normal_init_std)
def _forward_res(self, x: torch.Tensor) -> torch.Tensor:
x = self.body(x).mul(self.res_scale)
return x
class RCANBlockDW(ResidualBase):
"""RCAN building block with depth-wise convolution for the second conv layer.
"""
def __init__(self, planes: int, bias: bool = True, act_mode: str = 'relu',
res_scale: float = 0.1, reduction: int = 16, res_scale_learnable: bool = False,
stochastic_depth: bool = False, prob: float = 1.0, multFlag: bool = True, **_):
super().__init__(stochastic_depth, prob, multFlag)
if res_scale_learnable:
self.res_scale = Parameter(torch.ones(1))
nn.init.constant_(self.res_scale, res_scale)
else:
self.res_scale = res_scale
self.body = nn.Sequential(
conv3x3(planes, planes, bias=bias),
get_activation(act_mode),
conv3x3(planes, planes, bias=bias, groups=planes),
SEBlock(planes, reduction, act_mode))
def _forward_res(self, x: torch.Tensor) -> torch.Tensor:
x = self.body(x).mul(self.res_scale)
return x
class RCANBlockAllDW(ResidualBase):
"""RCAN building block with depth-wise convolution for all conv layers. An
additional squeeze-and-excitation (SE) block is used for the cross-channel
communication.
"""
def __init__(self, planes: int, bias: bool = True, act_mode: str = 'relu',
res_scale: float = 0.1, reduction: int = 16, res_scale_learnable: bool = False,
stochastic_depth: bool = False, prob: float = 1.0, multFlag: bool = True, **_):
super().__init__(stochastic_depth, prob, multFlag)
if res_scale_learnable:
self.res_scale = Parameter(torch.ones(1))
nn.init.constant_(self.res_scale, res_scale)
else:
self.res_scale = res_scale
self.body = nn.Sequential(
conv3x3(planes, planes, bias=bias, groups=planes),
SEBlock(planes, reduction, act_mode),
get_activation(act_mode),
conv3x3(planes, planes, bias=bias, groups=planes),
SEBlock(planes, reduction, act_mode))
def _forward_res(self, x: torch.Tensor) -> torch.Tensor:
x = self.body(x).mul(self.res_scale)
return x
class SEBlock(nn.Module):
def __init__(self, planes: int, reduction: int = 8, act_mode: str = 'relu'):
super().__init__()
self.squeeze = nn.AdaptiveAvgPool2d(1)
self.excitation = nn.Sequential(
nn.Conv2d(planes, planes // reduction, kernel_size=1),
get_activation(act_mode),
nn.Conv2d(planes // reduction, planes, kernel_size=1),
nn.Sigmoid()
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
y = self.squeeze(x)
y = self.excitation(y)
return x * y
class MeanShift(nn.Conv2d):
def __init__(self, rgb_range, rgb_mean=(0.4488, 0.4371, 0.4040),
rgb_std=(1.0, 1.0, 1.0), sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1)
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std
for p in self.parameters():
p.requires_grad = False
class Affine2d(nn.Module):
def __init__(self, planes: int, init_w: float = 0.1) -> None:
super().__init__()
self.weight = Parameter(torch.ones(1, planes, 1, 1))
self.bias = Parameter(torch.zeros(1, planes, 1, 1))
nn.init.constant_(self.weight, init_w)
def forward(self, x):
return x * self.weight + self.bias
class Upsampler(nn.Sequential):
def __init__(self, scale: int, planes: int, act_mode: str = 'relu',
use_affine: bool = True):
m = []
if (scale & (scale - 1)) == 0: # is power of 2
if use_affine:
for _ in range(int(math.log(scale, 2))):
m.append(conv3x3(planes, 4 * planes))
m.append(nn.PixelShuffle(2))
m.append(Affine2d(planes))
m.append(get_activation(act_mode))
else:
for _ in range(int(math.log(scale, 2))):
m.append(conv3x3(planes, 4 * planes, bias=True))
m.append(nn.PixelShuffle(2))
m.append(get_activation(act_mode))
elif scale == 3:
if use_affine:
m.append(conv3x3(planes, 9 * planes))
m.append(nn.PixelShuffle(3))
m.append(Affine2d(planes))
m.append(get_activation(act_mode))
else:
m.append(conv3x3(planes, 9 * planes, bias=True))
m.append(nn.PixelShuffle(3))
m.append(get_activation(act_mode))
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
|
#!/usr/bin/python
import subprocess
import os
import sys
import getopt
import traceback
import shutil
import re
def Usage(args):
print sys.argv[0] + ' [-hp] [-r revision]'
print ''
print ' -r\t: Specify rocket internal revision number'
print ' -p\t: Include python libraries'
print ' -s\t: Include full source code and build files'
print ' -h\t: This help screen'
print ''
sys.exit()
def CheckVSVars():
if 'VCINSTALLDIR' in os.environ:
return
if not 'VS90COMNTOOLS' in os.environ:
print "Unable to find VS9 install - check your VS90COMNTOOLS environment variable"
sys.exit()
path = os.environ['VS90COMNTOOLS']
subprocess.call('"' + path + 'vsvars32.bat" > NUL && ' + ' '.join(sys.argv))
sys.exit()
def ProcessOptions(args):
options = {'ROCKET_VERSION': 'custom', 'BUILD_PYTHON': False, 'FULL_SOURCE': False, 'ARCHIVE_NAME': 'libRocket-sdk'}
try:
optlist, args = getopt.getopt(args, 'r:phs')
except getopt.GetoptError, e:
print '\nError: ' + str(e) + '\n'
Usage(args)
for opt in optlist:
if opt[0] == '-h':
Usage(args)
if opt[0] == '-r':
options['ROCKET_VERSION'] = opt[1]
if opt[0] == '-p':
options['BUILD_PYTHON'] = True
if opt[0] == '-s':
options['FULL_SOURCE'] = True
options['ARCHIVE_NAME'] = 'libRocket-source'
return options
def Build(project, configs, defines = {}):
old_cl = ''
if 'CL' in os.environ:
old_cl = os.environ['CL']
else:
os.environ['CL'] = ''
for name, value in defines.iteritems():
os.environ['CL'] = os.environ['CL'] + ' /D' + name + '=' + value
for config in configs:
cmd = '"' + os.environ['VCINSTALLDIR'] + '\\vcpackages\\vcbuild.exe" /rebuild ' + project + '.vcproj "' + config + '|Win32"'
ret = subprocess.call(cmd)
if ret != 0:
print "Failed to build " + project
sys.exit()
os.environ['CL'] = old_cl
def DelTree(path):
if not os.path.exists(path):
return
print 'Deleting ' + path + '...'
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
def CopyFiles(source_path, destination_path, file_list = [], exclude_list = [], preserve_paths = True):
working_directory = os.getcwd()
source_directory = os.path.abspath(os.path.join(working_directory, os.path.normpath(source_path)))
destination_directory = os.path.abspath(os.path.join(working_directory, os.path.normpath(destination_path)))
print "Copying " + source_directory + " to " + destination_directory + " ..."
if not os.path.exists(source_directory):
print "Warning: Source directory " + source_directory + " doesn't exist."
return False
for root, directories, files in os.walk(source_directory, False):
for file in files:
# Skip files not in the include list.
if len(file_list) > 0:
included = False
for include in file_list:
if re.search(include, file):
included = True
break;
if not included:
continue
# Determine our subdirectory.
subdir = root.replace(source_directory, "")
if subdir[:1] == os.path.normcase('/'):
subdir = subdir[1:]
# Skip paths in the exclude list
excluded = False
for exclude in exclude_list:
if re.search(exclude, file):
excluded = True
break
if excluded:
continue
# Build up paths
source_file = os.path.join(root, file)
destination_subdir = destination_directory
if preserve_paths:
destination_subdir = os.path.join(destination_directory, subdir)
if not os.path.exists(destination_subdir):
os.makedirs(destination_subdir)
destination_file = os.path.join(destination_subdir, file)
# Copy files
try:
shutil.copy(source_file, destination_file)
except:
print "Failed copying " + source_file + " to " + destination_file
traceback.print_exc()
return True
def Archive(archive_name, path):
cwd = os.getcwd()
os.chdir(path + '/..')
file_name = archive_name + '.zip'
if os.path.exists(file_name):
os.unlink(file_name)
os.system('zip -r ' + file_name + ' ' + path[path.rfind('/')+1:])
os.chdir(cwd)
def main():
CheckVSVars()
options = ProcessOptions(sys.argv[1:])
Build('RocketCore', ['Debug', 'Release'], {'ROCKET_VERSION': '\\"' + options['ROCKET_VERSION'] + '\\"'})
Build('RocketControls', ['Debug', 'Release'])
Build('RocketDebugger', ['Debug', 'Release'])
if options['BUILD_PYTHON']:
Build('RocketCorePython', ['Debug', 'Release'])
Build('RocketControlsPython', ['Debug', 'Release'])
DelTree('../dist/libRocket')
CopyFiles('../Include', '../dist/libRocket/Include')
CopyFiles('../bin', '../dist/libRocket/bin', ['\.dll$', '^[^_].*\.lib$', '\.py$', '\.pyd$'])
CopyFiles('../Samples', '../dist/libRocket/Samples', ['\.h$', '\.cpp$', '\.vcproj$', '\.sln$', '\.vcproj\.user$', '\.rml$', '\.rcss$', '\.tga$', '\.py$', '\.otf$', '\.txt$'])
if options['FULL_SOURCE']:
CopyFiles('../Build', '../dist/libRocket/Build', ['\.vcproj$', '\.sln$', '\.vsprops$', '\.py$'])
CopyFiles('../Source', '../dist/libRocket/Source', ['\.cpp$', '\.h$', '\.inl$'])
shutil.copyfile('../changelog.txt', '../dist/libRocket/changelog.txt')
Archive(options['ARCHIVE_NAME'] + '-' + options['ROCKET_VERSION'], '../dist/libRocket');
if __name__ == '__main__':
main() |
<reponame>aghosh92/atomai
"""
jrvae.py
=======
Module for analysis of system "building blocks" with rotationally-invariant
variational autoencoders for joint continuous and discrete representations
Created by <NAME> (email: <EMAIL>)
"""
from typing import Optional, Union, List
from copy import deepcopy as dc
import numpy as np
import torch
from ...losses_metrics import joint_rvae_loss
from ...utils import set_train_rng, to_onehot, transform_coordinates
from .vae import BaseVAE
class jrVAE(BaseVAE):
"""
Rotationally-invariant VAE for joint continuous and
discrete latent representations.
Args:
in_dim:
Input dimensions for image data passed as (heigth, width)
for grayscale data or (height, width, channels)
for multichannel data
latent_dim:
Number of latent dimensions associated with image content
discrete_dim:
List specifying dimensionalities of discrete (Gumbel-Softmax)
latent variables associated with image content
nb_classes:
Number of classes for class-conditional VAE.
(leave it at 0 to learn discrete latent reprenetations)
translation:
account for xy shifts of image content (Default: True)
seed:
seed for torch and numpy (pseudo-)random numbers generators
**conv_encoder (bool):
use convolutional layers in encoder
**numlayers_encoder (int):
number of layers in encoder (Default: 2)
**numlayers_decoder (int):
number of layers in decoder (Default: 2)
**numhidden_encoder (int):
number of hidden units OR conv filters in encoder (Default: 128)
**numhidden_decoder (int):
number of hidden units in decoder (Default: 128)
**skip (bool):
uses generative skip model with residual paths between
latents and decoder layers (Default: False)
Example:
>>> input_dim = (28, 28) # intput dimensions
>>> # Intitialize model
>>> jrvae = aoi.models.jVAE(input_dim, latent_dim=2, discrete_dim=[10],
>>> numlayers_encoder=3, numhidden_encoder=512,
>>> numlayers_decoder=3, numhidden_decoder=512)
>>> # Train
>>> jrvae.fit(imstack_train, training_cycles=100,
batch_size=100, rotation_prior=np.pi/4)
>>> jrvae.manifold2d(origin="upper", cmap="gnuplot2")
"""
def __init__(self,
in_dim: int = None,
latent_dim: int = 2,
discrete_dim: List[int] = [2],
nb_classes: int = 0,
translation: bool = True,
seed: int = 0,
**kwargs: Union[int, bool, str]
) -> None:
"""
Initializes joint rVAE model (jrVAE)
"""
coord = 3 if translation else 1 # xy translations and/or rotation
args = (in_dim, latent_dim, nb_classes, coord, discrete_dim)
super(jrVAE, self).__init__(*args, **kwargs)
set_train_rng(seed)
self.translation = translation
self.dx_prior = None
self.phi_prior = None
self.kdict_ = dc(kwargs)
self.kdict_["num_iter"] = 0
def elbo_fn(self,
x: torch.Tensor,
x_reconstr: torch.Tensor,
*args: torch.Tensor,
**kwargs: Union[List, int]
) -> torch.Tensor:
"""
Computes ELBO
"""
return joint_rvae_loss(self.loss, self.in_dim, x, x_reconstr, *args, **kwargs)
def forward_compute_elbo(self,
x: torch.Tensor,
y: Optional[torch.Tensor] = None,
mode: str = "train"
) -> torch.Tensor:
"""
Joint rVAE's forward pass with training/test loss computation
"""
tau = self.kdict_.get("temperature", .67)
x_coord_ = self.x_coord.expand(x.size(0), *self.x_coord.size())
x = x.to(self.device)
if mode == "eval":
with torch.no_grad():
latent_ = self.encoder_net(x)
else:
latent_ = self.encoder_net(x)
self.kdict_["num_iter"] += 1
z_mean, z_logsd = latent_[:2]
z_sd = torch.exp(z_logsd)
z_cont = self.reparameterize(z_mean, z_sd)
phi = z_cont[:, 0] # angle
if self.translation:
dx = z_cont[:, 1:3] # translation
dx = (dx * self.dx_prior).unsqueeze(1)
z_cont = z_cont[:, 3:] # image content
else:
dx = 0 # no translation
z_cont = z_cont[:, 1:] # image content
x_coord_ = transform_coordinates(x_coord_, phi, dx)
alphas = latent_[2:]
z_disc = [self.reparameterize_discrete(a, tau) for a in alphas]
z_disc = torch.cat(z_disc, 1)
z = torch.cat((z_cont, z_disc), dim=1)
if y is not None:
targets = to_onehot(y, self.nb_classes)
z = torch.cat((z, targets), -1)
if mode == "eval":
with torch.no_grad():
x_reconstr = self.decoder_net(x_coord_, z)
else:
x_reconstr = self.decoder_net(x_coord_, z)
return self.elbo_fn(x, x_reconstr, z_mean, z_logsd, alphas, **self.kdict_)
def fit(self,
X_train: Union[np.ndarray, torch.Tensor],
y_train: Optional[Union[np.ndarray, torch.Tensor]] = None,
X_test: Optional[Union[np.ndarray, torch.Tensor]] = None,
y_test: Optional[Union[np.ndarray, torch.Tensor]] = None,
loss: str = "mse",
**kwargs) -> None:
"""
Trains joint rVAE model
Args:
X_train:
3D or 4D stack of training images with dimensions
(n_images, height, width) for grayscale data or
or (n_images, height, width, channels) for multi-channel data
y_train:
Vector with labels of dimension (n_images,), where n_images
is a number of training images
X_test:
3D or 4D stack of test images with the same dimensions
as for the X_train (Default: None)
y_test:
Vector with labels of dimension (n_images,), where n_images
is a number of test images
loss:
reconstruction loss function, "ce" or "mse" (Default: "mse")
**translation_prior (float):
translation prior
**rotation_prior (float):
rotational prior
**temperature (float):
Relaxation parameter for Gumbel-Softmax distribution
**cont_capacity (list):
List containing (max_capacity, num_iters, gamma) parameters
to control the capacity of the continuous latent channel.
Default values: [5.0, 25000, 30].
Based on https://arxiv.org/pdf/1804.03599.pdf & https://arxiv.org/abs/1804.00104
**disc_capacity (list):
List containing (max_capacity, num_iters, gamma) parameters
to control the capacity of the discrete latent channel(s).
Default values: [5.0, 25000, 30].
Based on https://arxiv.org/pdf/1804.03599.pdf & https://arxiv.org/abs/1804.00104
**filename (str):
file path for saving model after each training cycle ("epoch")
"""
self._check_inputs(X_train, y_train, X_test, y_test)
self.dx_prior = kwargs.get("translation_prior", 0.1)
self.kdict_["phi_prior"] = kwargs.get("rotation_prior", 0.1)
for k, v in kwargs.items():
if k in ["cont_capacity", "disc_capacity", "temperature"]:
self.kdict_[k] = v
self.compile_trainer(
(X_train, y_train), (X_test, y_test), **kwargs)
self.loss = loss # this part needs to be handled better
if self.loss == "ce":
self.sigmoid_out = True # Use sigmoid layer for "prediction" stage
self.metadict["sigmoid_out"] = True
self.recording = kwargs.get("recording", False)
for e in range(self.training_cycles):
self.current_epoch = e
elbo_epoch = self.train_epoch()
self.loss_history["train_loss"].append(elbo_epoch)
if self.test_iterator is not None:
elbo_epoch_test = self.evaluate_model()
self.loss_history["test_loss"].append(elbo_epoch_test)
self.print_statistics(e)
self.update_metadict()
self.save_model(self.filename)
def update_metadict(self):
self.metadict["num_epochs"] = self.current_epoch
self.metadict["num_iter"] = self.kdict_["num_iter"]
|
<filename>pygmt/tests/test_config.py
"""
Tests for gmt config.
"""
import pytest
from pygmt import Figure, config
from pygmt.helpers.testing import check_figures_equal
@pytest.mark.mpl_image_compare
def test_config():
"""
Test if config works globally and locally.
"""
fig = Figure()
# Change global settings of current figure
config(FONT_ANNOT_PRIMARY="blue")
fig.basemap(
region="0/10/0/10", projection="X10c/10c", frame=["af", '+t"Blue Annotation"']
)
with config(FONT_LABEL="red", FONT_ANNOT_PRIMARY="red"):
fig.basemap(
region="0/10/0/10",
projection="X10c/10c",
frame=['xaf+l"red label"', "yaf", '+t"red annotation"'],
X="15c",
)
fig.basemap(
region="0/10/0/10",
projection="X10c/10c",
frame=["af", '+t"Blue Annotation"'],
X="15c",
)
# Revert to default settings in current figure
config(FONT_ANNOT_PRIMARY="black")
return fig
@check_figures_equal()
def test_config_font_one():
"""
Test that setting `FONT` config changes all `FONT_*` settings except
`FONT_LOGO`.
Specifically, this test only checks that `FONT_ANNOT_PRIMARY`,
`FONT_ANNOT_SECONDARY`, `FONT_LABEL`, and `FONT_TITLE` are modified.
"""
fig_ref = Figure()
with config(
FONT_ANNOT_PRIMARY="8p,red",
FONT_ANNOT_SECONDARY="8p,red",
FONT_LABEL="8p,red",
FONT_TITLE="8p,red",
):
fig_ref.basemap(R="0/9/0/9", J="C3/3/9c", Tm="jTL+w4c+d4.5+l")
fig_ref.basemap(Tm="jBR+w5c+d-4.5+l")
fig_test = Figure()
with config(FONT="8p,red"):
fig_test.basemap(
region=[0, 9, 0, 9], projection="C3/3/9c", compass="jTL+w4c+d4.5+l"
)
fig_test.basemap(compass="jBR+w5c+d-4.5+l")
return fig_ref, fig_test
@check_figures_equal()
def test_config_font_annot():
"""
Test that setting `FONT_ANNOT` config changes both `FONT_ANNOT_PRIMARY` and
`FONT_ANNOT_SECONDARY`.
"""
fig_ref = Figure()
with config(FONT_ANNOT_PRIMARY="6p,red", FONT_ANNOT_SECONDARY="6p,red"):
fig_ref.basemap(R="0/9/0/9", J="C3/3/9c", Tm="jTL+w4c+d4.5")
fig_ref.basemap(compass="jBR+w5c+d-4.5")
fig_test = Figure()
with config(FONT_ANNOT="6p,red"):
fig_test.basemap(
region=[0, 9, 0, 9], projection="C3/3/9c", compass="jTL+w4c+d4.5"
)
fig_test.basemap(compass="jBR+w5c+d-4.5")
return fig_ref, fig_test
@pytest.mark.mpl_image_compare
def test_config_format_time_map():
"""
Test that setting `FORMAT_TIME_MAP` config changes both
`FORMAT_TIME_PRIMARY_MAP` and `FORMAT_TIME_SECONDARY_MAP`.
"""
fig = Figure()
with config(FORMAT_TIME_MAP="abbreviation"):
fig.basemap(
region=["2020-1-24T", "2020-1-27T", 0, 1],
projection="X6c/1c",
frame=["pa1K", "sa1K", "NWse"],
)
fig.basemap(frame=["pa1K", "sa1K", "nwSE"])
return fig
@pytest.mark.mpl_image_compare
def test_config_map_annot_offset():
"""
Test that setting `MAP_ANNOT_OFFSET` config changes both
`MAP_ANNOT_OFFSET_PRIMARY` and `MAP_ANNOT_OFFSET_SECONDARY`.
"""
fig = Figure()
with config(MAP_ANNOT_OFFSET="15p"):
fig.basemap(
region=["2020-1-24T", "2020-1-27T", 0, 1],
projection="X6c/1c",
frame=["pa1d", "sa1d", "NWse"],
)
fig.basemap(frame=["pa1d", "sa1d", "nwSE"])
return fig
@pytest.mark.mpl_image_compare
def test_config_map_grid_cross_size():
"""
Test that setting `MAP_GRID_CROSS_SIZE` config changes both
`MAP_GRID_CROSS_SIZE_PRIMARY` and `MAP_GRID_CROSS_SIZE_SECONDARY`.
"""
fig = Figure()
with config(MAP_GRID_CROSS_SIZE="3p"):
fig.basemap(
region=["2020-1-24T21:00", "2020-1-25T00:00", 0, 1],
projection="X6c/2c",
frame=["pa1Hg", "sa45mg45m", "NWse"],
verbose="e",
)
fig.basemap(frame=["pa1Hg", "sa45mg45m", "nwSE"], yshift=-3, verbose="e")
return fig
@pytest.mark.mpl_image_compare
def test_config_map_grid_pen():
"""
Test that setting `MAP_GRID_PEN` config changes both `MAP_GRID_PEN_PRIMARY`
and `MAP_GRID_PEN_SECONDARY`.
"""
fig = Figure()
with config(MAP_GRID_PEN="thick,red"):
fig.basemap(
region=["2020-1-24T21:00", "2020-1-25T00:00", 0, 1],
projection="X6c/2c",
frame=["pa1Hg", "sa45mg45m", "NWse"],
verbose="e",
)
fig.basemap(frame=["pa1Hg", "sa45mg45m", "nwSE"], yshift=-3, verbose="e")
return fig
@pytest.mark.mpl_image_compare
def test_config_map_tick_length():
"""
Test that setting `MAP_TICK_LENGTH` config changes both
`MAP_TICK_LENGTH_PRIMARY` and `MAP_TICK_LENGTH_SECONDARY`.
"""
fig = Figure()
with config(MAP_TICK_LENGTH="5p"):
fig.basemap(
region=["2020-1-24T21:00", "2020-1-25T00:00", 0, 1],
projection="X6c/2c",
frame=["pa1Hg", "sa45mg45m", "NWse"],
verbose="e",
)
fig.basemap(frame=["pa1Hg", "sa45mg45m", "nwSE"], yshift=-3, verbose="e")
return fig
@pytest.mark.mpl_image_compare
def test_config_map_tick_pen():
"""
Test that setting `MAP_TICK_PEN` config changes both `MAP_TICK_PEN_PRIMARY`
and `MAP_TICK_PEN_SECONDARY`.
"""
fig = Figure()
with config(MAP_TICK_PEN="thick,red"):
fig.basemap(
region=["2020-1-24T21:00", "2020-1-25T00:00", 0, 1],
projection="X6c/2c",
frame=["pa1Hg", "sa45mg45m", "NWse"],
verbose="e",
)
fig.basemap(frame=["pa1Hg", "sa45mg45m", "nwSE"], yshift=-3, verbose="e")
return fig
|
<filename>wavefront_api_client/__init__.py
# coding: utf-8
# flake8: noqa
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from wavefront_api_client.api.access_policy_api import AccessPolicyApi
from wavefront_api_client.api.account__user_and_service_account_api import AccountUserAndServiceAccountApi
from wavefront_api_client.api.alert_api import AlertApi
from wavefront_api_client.api.api_token_api import ApiTokenApi
from wavefront_api_client.api.cloud_integration_api import CloudIntegrationApi
from wavefront_api_client.api.dashboard_api import DashboardApi
from wavefront_api_client.api.derived_metric_api import DerivedMetricApi
from wavefront_api_client.api.direct_ingestion_api import DirectIngestionApi
from wavefront_api_client.api.event_api import EventApi
from wavefront_api_client.api.external_link_api import ExternalLinkApi
from wavefront_api_client.api.ingestion_spy_api import IngestionSpyApi
from wavefront_api_client.api.integration_api import IntegrationApi
from wavefront_api_client.api.maintenance_window_api import MaintenanceWindowApi
from wavefront_api_client.api.message_api import MessageApi
from wavefront_api_client.api.metric_api import MetricApi
from wavefront_api_client.api.metrics_policy_api import MetricsPolicyApi
from wavefront_api_client.api.monitored_application_api import MonitoredApplicationApi
from wavefront_api_client.api.monitored_service_api import MonitoredServiceApi
from wavefront_api_client.api.notificant_api import NotificantApi
from wavefront_api_client.api.proxy_api import ProxyApi
from wavefront_api_client.api.query_api import QueryApi
from wavefront_api_client.api.role_api import RoleApi
from wavefront_api_client.api.saved_search_api import SavedSearchApi
from wavefront_api_client.api.search_api import SearchApi
from wavefront_api_client.api.source_api import SourceApi
from wavefront_api_client.api.span_sampling_policy_api import SpanSamplingPolicyApi
from wavefront_api_client.api.usage_api import UsageApi
from wavefront_api_client.api.user_api import UserApi
from wavefront_api_client.api.user_group_api import UserGroupApi
from wavefront_api_client.api.webhook_api import WebhookApi
# import ApiClient
from wavefront_api_client.api_client import ApiClient
from wavefront_api_client.configuration import Configuration
# import models into sdk package
from wavefront_api_client.models.aws_base_credentials import AWSBaseCredentials
from wavefront_api_client.models.access_control_element import AccessControlElement
from wavefront_api_client.models.access_control_list_read_dto import AccessControlListReadDTO
from wavefront_api_client.models.access_control_list_simple import AccessControlListSimple
from wavefront_api_client.models.access_control_list_write_dto import AccessControlListWriteDTO
from wavefront_api_client.models.access_policy import AccessPolicy
from wavefront_api_client.models.access_policy_rule_dto import AccessPolicyRuleDTO
from wavefront_api_client.models.account import Account
from wavefront_api_client.models.alert import Alert
from wavefront_api_client.models.alert_dashboard import AlertDashboard
from wavefront_api_client.models.alert_min import AlertMin
from wavefront_api_client.models.alert_route import AlertRoute
from wavefront_api_client.models.alert_source import AlertSource
from wavefront_api_client.models.annotation import Annotation
from wavefront_api_client.models.anomaly import Anomaly
from wavefront_api_client.models.app_dynamics_configuration import AppDynamicsConfiguration
from wavefront_api_client.models.azure_activity_log_configuration import AzureActivityLogConfiguration
from wavefront_api_client.models.azure_base_credentials import AzureBaseCredentials
from wavefront_api_client.models.azure_configuration import AzureConfiguration
from wavefront_api_client.models.chart import Chart
from wavefront_api_client.models.chart_settings import ChartSettings
from wavefront_api_client.models.chart_source_query import ChartSourceQuery
from wavefront_api_client.models.class_loader import ClassLoader
from wavefront_api_client.models.cloud_integration import CloudIntegration
from wavefront_api_client.models.cloud_trail_configuration import CloudTrailConfiguration
from wavefront_api_client.models.cloud_watch_configuration import CloudWatchConfiguration
from wavefront_api_client.models.conversion import Conversion
from wavefront_api_client.models.conversion_object import ConversionObject
from wavefront_api_client.models.customer_facing_user_object import CustomerFacingUserObject
from wavefront_api_client.models.dashboard import Dashboard
from wavefront_api_client.models.dashboard_min import DashboardMin
from wavefront_api_client.models.dashboard_parameter_value import DashboardParameterValue
from wavefront_api_client.models.dashboard_section import DashboardSection
from wavefront_api_client.models.dashboard_section_row import DashboardSectionRow
from wavefront_api_client.models.derived_metric_definition import DerivedMetricDefinition
from wavefront_api_client.models.ec2_configuration import EC2Configuration
from wavefront_api_client.models.event import Event
from wavefront_api_client.models.event_search_request import EventSearchRequest
from wavefront_api_client.models.event_time_range import EventTimeRange
from wavefront_api_client.models.external_link import ExternalLink
from wavefront_api_client.models.facet_response import FacetResponse
from wavefront_api_client.models.facet_search_request_container import FacetSearchRequestContainer
from wavefront_api_client.models.facets_response_container import FacetsResponseContainer
from wavefront_api_client.models.facets_search_request_container import FacetsSearchRequestContainer
from wavefront_api_client.models.fast_reader_builder import FastReaderBuilder
from wavefront_api_client.models.field import Field
from wavefront_api_client.models.gcp_billing_configuration import GCPBillingConfiguration
from wavefront_api_client.models.gcp_configuration import GCPConfiguration
from wavefront_api_client.models.history_entry import HistoryEntry
from wavefront_api_client.models.history_response import HistoryResponse
from wavefront_api_client.models.ingestion_policy import IngestionPolicy
from wavefront_api_client.models.ingestion_policy_mapping import IngestionPolicyMapping
from wavefront_api_client.models.install_alerts import InstallAlerts
from wavefront_api_client.models.integration import Integration
from wavefront_api_client.models.integration_alert import IntegrationAlert
from wavefront_api_client.models.integration_alias import IntegrationAlias
from wavefront_api_client.models.integration_dashboard import IntegrationDashboard
from wavefront_api_client.models.integration_manifest_group import IntegrationManifestGroup
from wavefront_api_client.models.integration_metrics import IntegrationMetrics
from wavefront_api_client.models.integration_status import IntegrationStatus
from wavefront_api_client.models.json_node import JsonNode
from wavefront_api_client.models.kubernetes_component import KubernetesComponent
from wavefront_api_client.models.kubernetes_component_status import KubernetesComponentStatus
from wavefront_api_client.models.logical_type import LogicalType
from wavefront_api_client.models.maintenance_window import MaintenanceWindow
from wavefront_api_client.models.message import Message
from wavefront_api_client.models.metric_details import MetricDetails
from wavefront_api_client.models.metric_details_response import MetricDetailsResponse
from wavefront_api_client.models.metric_status import MetricStatus
from wavefront_api_client.models.metrics_policy_read_model import MetricsPolicyReadModel
from wavefront_api_client.models.metrics_policy_write_model import MetricsPolicyWriteModel
from wavefront_api_client.models.module import Module
from wavefront_api_client.models.module_descriptor import ModuleDescriptor
from wavefront_api_client.models.module_layer import ModuleLayer
from wavefront_api_client.models.monitored_application_dto import MonitoredApplicationDTO
from wavefront_api_client.models.monitored_cluster import MonitoredCluster
from wavefront_api_client.models.monitored_service_dto import MonitoredServiceDTO
from wavefront_api_client.models.new_relic_configuration import NewRelicConfiguration
from wavefront_api_client.models.new_relic_metric_filters import NewRelicMetricFilters
from wavefront_api_client.models.notificant import Notificant
from wavefront_api_client.models.notification_messages import NotificationMessages
from wavefront_api_client.models.package import Package
from wavefront_api_client.models.paged import Paged
from wavefront_api_client.models.paged_account import PagedAccount
from wavefront_api_client.models.paged_alert import PagedAlert
from wavefront_api_client.models.paged_alert_with_stats import PagedAlertWithStats
from wavefront_api_client.models.paged_anomaly import PagedAnomaly
from wavefront_api_client.models.paged_cloud_integration import PagedCloudIntegration
from wavefront_api_client.models.paged_customer_facing_user_object import PagedCustomerFacingUserObject
from wavefront_api_client.models.paged_dashboard import PagedDashboard
from wavefront_api_client.models.paged_derived_metric_definition import PagedDerivedMetricDefinition
from wavefront_api_client.models.paged_derived_metric_definition_with_stats import PagedDerivedMetricDefinitionWithStats
from wavefront_api_client.models.paged_event import PagedEvent
from wavefront_api_client.models.paged_external_link import PagedExternalLink
from wavefront_api_client.models.paged_ingestion_policy import PagedIngestionPolicy
from wavefront_api_client.models.paged_integration import PagedIntegration
from wavefront_api_client.models.paged_maintenance_window import PagedMaintenanceWindow
from wavefront_api_client.models.paged_message import PagedMessage
from wavefront_api_client.models.paged_monitored_application_dto import PagedMonitoredApplicationDTO
from wavefront_api_client.models.paged_monitored_cluster import PagedMonitoredCluster
from wavefront_api_client.models.paged_monitored_service_dto import PagedMonitoredServiceDTO
from wavefront_api_client.models.paged_notificant import PagedNotificant
from wavefront_api_client.models.paged_proxy import PagedProxy
from wavefront_api_client.models.paged_related_event import PagedRelatedEvent
from wavefront_api_client.models.paged_report_event_anomaly_dto import PagedReportEventAnomalyDTO
from wavefront_api_client.models.paged_role_dto import PagedRoleDTO
from wavefront_api_client.models.paged_saved_search import PagedSavedSearch
from wavefront_api_client.models.paged_service_account import PagedServiceAccount
from wavefront_api_client.models.paged_source import PagedSource
from wavefront_api_client.models.paged_span_sampling_policy import PagedSpanSamplingPolicy
from wavefront_api_client.models.paged_user_group_model import PagedUserGroupModel
from wavefront_api_client.models.point import Point
from wavefront_api_client.models.policy_rule_read_model import PolicyRuleReadModel
from wavefront_api_client.models.policy_rule_write_model import PolicyRuleWriteModel
from wavefront_api_client.models.proxy import Proxy
from wavefront_api_client.models.query_event import QueryEvent
from wavefront_api_client.models.query_result import QueryResult
from wavefront_api_client.models.query_type_dto import QueryTypeDTO
from wavefront_api_client.models.raw_timeseries import RawTimeseries
from wavefront_api_client.models.related_anomaly import RelatedAnomaly
from wavefront_api_client.models.related_data import RelatedData
from wavefront_api_client.models.related_event import RelatedEvent
from wavefront_api_client.models.related_event_time_range import RelatedEventTimeRange
from wavefront_api_client.models.report_event_anomaly_dto import ReportEventAnomalyDTO
from wavefront_api_client.models.response_container import ResponseContainer
from wavefront_api_client.models.response_container_access_policy import ResponseContainerAccessPolicy
from wavefront_api_client.models.response_container_access_policy_action import ResponseContainerAccessPolicyAction
from wavefront_api_client.models.response_container_account import ResponseContainerAccount
from wavefront_api_client.models.response_container_alert import ResponseContainerAlert
from wavefront_api_client.models.response_container_cloud_integration import ResponseContainerCloudIntegration
from wavefront_api_client.models.response_container_dashboard import ResponseContainerDashboard
from wavefront_api_client.models.response_container_derived_metric_definition import ResponseContainerDerivedMetricDefinition
from wavefront_api_client.models.response_container_event import ResponseContainerEvent
from wavefront_api_client.models.response_container_external_link import ResponseContainerExternalLink
from wavefront_api_client.models.response_container_facet_response import ResponseContainerFacetResponse
from wavefront_api_client.models.response_container_facets_response_container import ResponseContainerFacetsResponseContainer
from wavefront_api_client.models.response_container_history_response import ResponseContainerHistoryResponse
from wavefront_api_client.models.response_container_ingestion_policy import ResponseContainerIngestionPolicy
from wavefront_api_client.models.response_container_integration import ResponseContainerIntegration
from wavefront_api_client.models.response_container_integration_status import ResponseContainerIntegrationStatus
from wavefront_api_client.models.response_container_list_access_control_list_read_dto import ResponseContainerListAccessControlListReadDTO
from wavefront_api_client.models.response_container_list_integration import ResponseContainerListIntegration
from wavefront_api_client.models.response_container_list_integration_manifest_group import ResponseContainerListIntegrationManifestGroup
from wavefront_api_client.models.response_container_list_notification_messages import ResponseContainerListNotificationMessages
from wavefront_api_client.models.response_container_list_service_account import ResponseContainerListServiceAccount
from wavefront_api_client.models.response_container_list_string import ResponseContainerListString
from wavefront_api_client.models.response_container_list_user_api_token import ResponseContainerListUserApiToken
from wavefront_api_client.models.response_container_maintenance_window import ResponseContainerMaintenanceWindow
from wavefront_api_client.models.response_container_map_string_integer import ResponseContainerMapStringInteger
from wavefront_api_client.models.response_container_map_string_integration_status import ResponseContainerMapStringIntegrationStatus
from wavefront_api_client.models.response_container_message import ResponseContainerMessage
from wavefront_api_client.models.response_container_metrics_policy_read_model import ResponseContainerMetricsPolicyReadModel
from wavefront_api_client.models.response_container_monitored_application_dto import ResponseContainerMonitoredApplicationDTO
from wavefront_api_client.models.response_container_monitored_cluster import ResponseContainerMonitoredCluster
from wavefront_api_client.models.response_container_monitored_service_dto import ResponseContainerMonitoredServiceDTO
from wavefront_api_client.models.response_container_notificant import ResponseContainerNotificant
from wavefront_api_client.models.response_container_paged_account import ResponseContainerPagedAccount
from wavefront_api_client.models.response_container_paged_alert import ResponseContainerPagedAlert
from wavefront_api_client.models.response_container_paged_alert_with_stats import ResponseContainerPagedAlertWithStats
from wavefront_api_client.models.response_container_paged_anomaly import ResponseContainerPagedAnomaly
from wavefront_api_client.models.response_container_paged_cloud_integration import ResponseContainerPagedCloudIntegration
from wavefront_api_client.models.response_container_paged_customer_facing_user_object import ResponseContainerPagedCustomerFacingUserObject
from wavefront_api_client.models.response_container_paged_dashboard import ResponseContainerPagedDashboard
from wavefront_api_client.models.response_container_paged_derived_metric_definition import ResponseContainerPagedDerivedMetricDefinition
from wavefront_api_client.models.response_container_paged_derived_metric_definition_with_stats import ResponseContainerPagedDerivedMetricDefinitionWithStats
from wavefront_api_client.models.response_container_paged_event import ResponseContainerPagedEvent
from wavefront_api_client.models.response_container_paged_external_link import ResponseContainerPagedExternalLink
from wavefront_api_client.models.response_container_paged_ingestion_policy import ResponseContainerPagedIngestionPolicy
from wavefront_api_client.models.response_container_paged_integration import ResponseContainerPagedIntegration
from wavefront_api_client.models.response_container_paged_maintenance_window import ResponseContainerPagedMaintenanceWindow
from wavefront_api_client.models.response_container_paged_message import ResponseContainerPagedMessage
from wavefront_api_client.models.response_container_paged_monitored_application_dto import ResponseContainerPagedMonitoredApplicationDTO
from wavefront_api_client.models.response_container_paged_monitored_cluster import ResponseContainerPagedMonitoredCluster
from wavefront_api_client.models.response_container_paged_monitored_service_dto import ResponseContainerPagedMonitoredServiceDTO
from wavefront_api_client.models.response_container_paged_notificant import ResponseContainerPagedNotificant
from wavefront_api_client.models.response_container_paged_proxy import ResponseContainerPagedProxy
from wavefront_api_client.models.response_container_paged_related_event import ResponseContainerPagedRelatedEvent
from wavefront_api_client.models.response_container_paged_report_event_anomaly_dto import ResponseContainerPagedReportEventAnomalyDTO
from wavefront_api_client.models.response_container_paged_role_dto import ResponseContainerPagedRoleDTO
from wavefront_api_client.models.response_container_paged_saved_search import ResponseContainerPagedSavedSearch
from wavefront_api_client.models.response_container_paged_service_account import ResponseContainerPagedServiceAccount
from wavefront_api_client.models.response_container_paged_source import ResponseContainerPagedSource
from wavefront_api_client.models.response_container_paged_span_sampling_policy import ResponseContainerPagedSpanSamplingPolicy
from wavefront_api_client.models.response_container_paged_user_group_model import ResponseContainerPagedUserGroupModel
from wavefront_api_client.models.response_container_proxy import ResponseContainerProxy
from wavefront_api_client.models.response_container_query_type_dto import ResponseContainerQueryTypeDTO
from wavefront_api_client.models.response_container_role_dto import ResponseContainerRoleDTO
from wavefront_api_client.models.response_container_saved_search import ResponseContainerSavedSearch
from wavefront_api_client.models.response_container_service_account import ResponseContainerServiceAccount
from wavefront_api_client.models.response_container_set_business_function import ResponseContainerSetBusinessFunction
from wavefront_api_client.models.response_container_set_source_label_pair import ResponseContainerSetSourceLabelPair
from wavefront_api_client.models.response_container_source import ResponseContainerSource
from wavefront_api_client.models.response_container_span_sampling_policy import ResponseContainerSpanSamplingPolicy
from wavefront_api_client.models.response_container_string import ResponseContainerString
from wavefront_api_client.models.response_container_tags_response import ResponseContainerTagsResponse
from wavefront_api_client.models.response_container_user_api_token import ResponseContainerUserApiToken
from wavefront_api_client.models.response_container_user_group_model import ResponseContainerUserGroupModel
from wavefront_api_client.models.response_container_validated_users_dto import ResponseContainerValidatedUsersDTO
from wavefront_api_client.models.response_container_void import ResponseContainerVoid
from wavefront_api_client.models.response_status import ResponseStatus
from wavefront_api_client.models.role_dto import RoleDTO
from wavefront_api_client.models.saved_search import SavedSearch
from wavefront_api_client.models.schema import Schema
from wavefront_api_client.models.search_query import SearchQuery
from wavefront_api_client.models.service_account import ServiceAccount
from wavefront_api_client.models.service_account_write import ServiceAccountWrite
from wavefront_api_client.models.sortable_search_request import SortableSearchRequest
from wavefront_api_client.models.sorting import Sorting
from wavefront_api_client.models.source import Source
from wavefront_api_client.models.source_label_pair import SourceLabelPair
from wavefront_api_client.models.source_search_request_container import SourceSearchRequestContainer
from wavefront_api_client.models.span import Span
from wavefront_api_client.models.span_sampling_policy import SpanSamplingPolicy
from wavefront_api_client.models.specific_data import SpecificData
from wavefront_api_client.models.stats_model_internal_use import StatsModelInternalUse
from wavefront_api_client.models.stripe import Stripe
from wavefront_api_client.models.tags_response import TagsResponse
from wavefront_api_client.models.target_info import TargetInfo
from wavefront_api_client.models.tesla_configuration import TeslaConfiguration
from wavefront_api_client.models.timeseries import Timeseries
from wavefront_api_client.models.trace import Trace
from wavefront_api_client.models.triage_dashboard import TriageDashboard
from wavefront_api_client.models.tuple import Tuple
from wavefront_api_client.models.tuple_result import TupleResult
from wavefront_api_client.models.tuple_value_result import TupleValueResult
from wavefront_api_client.models.user_api_token import UserApiToken
from wavefront_api_client.models.user_dto import UserDTO
from wavefront_api_client.models.user_group import UserGroup
from wavefront_api_client.models.user_group_model import UserGroupModel
from wavefront_api_client.models.user_group_properties_dto import UserGroupPropertiesDTO
from wavefront_api_client.models.user_group_write import UserGroupWrite
from wavefront_api_client.models.user_model import UserModel
from wavefront_api_client.models.user_request_dto import UserRequestDTO
from wavefront_api_client.models.user_to_create import UserToCreate
from wavefront_api_client.models.validated_users_dto import ValidatedUsersDTO
from wavefront_api_client.models.void import Void
from wavefront_api_client.models.vrops_configuration import VropsConfiguration
from wavefront_api_client.models.wf_tags import WFTags
|
<reponame>mikeengland/fireant
import itertools
import pandas as pd
import numpy as np
from typing import Dict, List, Optional, Tuple
from datetime import timedelta
from fireant import (
formats,
utils,
)
from fireant.dataset.fields import DataType, Field
from fireant.dataset.totals import TOTALS_MARKERS
from fireant.dataset.references import Reference
from fireant.reference_helpers import (
reference_alias,
reference_label,
reference_prefix,
reference_suffix,
)
from fireant.utils import alias_selector
from .base import HideField, TransformableWidget
from .chart_base import (
ChartWidget,
ContinuousAxisSeries,
)
DEFAULT_COLORS = (
"#DDDF0D",
"#55BF3B",
"#DF5353",
"#7798BF",
"#AAEEEE",
"#FF0066",
"#EEAAEE",
"#DF5353",
"#7798BF",
"#AAEEEE",
)
DASH_STYLES = (
"Solid",
"Dash",
"Dot",
"DashDot",
"LongDash",
"LongDashDot",
"ShortDash",
"ShortDashDot",
"LongDashDotDot",
"ShortDashDotDot",
"ShortDot",
)
MARKER_SYMBOLS = ("circle", "square", "diamond", "triangle", "triangle-down")
SERIES_NEEDING_MARKER = (ChartWidget.LineSeries, ChartWidget.AreaSeries)
TS_UPPER_BOUND = pd.Timestamp.max - timedelta(seconds=1)
ALWAYS_SHARED_TOOLTIP_CHART_TYPES = {ChartWidget.PieSeries.type}
def has_only_line_series(axis):
return all([isinstance(series_, ChartWidget.LineSeries) for series_ in axis])
class HighCharts(ChartWidget, TransformableWidget):
# Pagination should be applied to groups of the 0th index level (the x-axis) in order to paginate series
group_pagination = True
def __init__(
self,
title: Optional[str] = None,
colors: Optional[List[str]] = None,
hide: Optional[List[HideField]] = None,
x_axis_visible: bool = True,
tooltip_visible: bool = True,
split_dimension: Optional[Field] = None,
):
super(HighCharts, self).__init__(hide=hide)
self.title = title
self.colors = colors or DEFAULT_COLORS
self.x_axis_visible = x_axis_visible
self.tooltip_visible = tooltip_visible
self.split_dimension = split_dimension or None
def __repr__(self):
return ".".join(["HighCharts()"] + [repr(axis) for axis in self.items])
def transform(
self,
data_frame: pd.DataFrame,
dimensions: List[Field],
references: List[Reference],
annotation_frame: Optional[pd.DataFrame] = None,
):
"""
- Main entry point -
Transforms a data frame into HighCharts JSON format.
See https://api.highcharts.com/highcharts/
:param data_frame:
The data frame containing the data. Index must match the dimensions parameter.
:param dimensions:
A list of dimensions that are being rendered.
:param references:
A list of references that are being rendered.
:param annotation_frame:
A data frame containing annotation data.
:return:
A dict or a list of dicts meant to be dumped as JSON.
"""
result_df = data_frame.copy()
hide_aliases = self.hide_aliases(dimensions)
self.hide_data_frame_indexes(result_df, hide_aliases)
dimension_map = {alias_selector(dimension.alias): dimension for dimension in dimensions}
# Nan/None values in the index can break split dimension feature,
# because xs method cannot
result_df.rename(index={np.nan: formats.BLANK_VALUE}, inplace=True)
render_group = []
split_dimension = self.split_dimension
if split_dimension and split_dimension.data_type != DataType.date:
split_dimension_alias = alias_selector(split_dimension.alias)
# Categories method cannot be reused here, given the totals label wouldn't be correctly
# mapped to the totals value in the split dimension column.
values, _ = self._values_and_dimension(result_df, dimension_map, split_dimension_alias)
for value in values:
render_group.append(
[
result_df.xs(value or '', level=split_dimension_alias, drop_level=False),
formats.display_value(value, split_dimension) or value,
]
)
if not render_group:
render_group = [(result_df, None)]
num_charts = len(render_group)
charts = [
self._render_individual_chart(
chart_df,
dimensions,
references,
annotation_frame=annotation_frame,
title_suffix=title_suffix,
num_charts=num_charts,
)
for chart_df, title_suffix in render_group
]
return charts[0] if num_charts == 1 else charts
def _render_individual_chart(
self,
data_frame: pd.DataFrame,
dimensions: List[Field],
references: List[Reference],
annotation_frame: Optional[pd.DataFrame] = None,
title_suffix: str = "",
num_charts: int = 1,
):
result_df = data_frame
dimension_map = {alias_selector(dimension.alias): dimension for dimension in dimensions}
colors = itertools.cycle(self.colors)
is_timeseries = dimensions and dimensions[0].data_type == DataType.date
# Timestamp.max is used as a marker for rolled up dimensions (totals). Filter out the totals value for the
# dimension used for the x-axis
if is_timeseries and len(data_frame) > 0:
result_df = self._remove_date_totals(result_df)
# Group the results by index levels after the 0th, one for each series
# This will result in a series for every combination of dimension values and each series will contain a data set
# across the 0th dimension (used for the x-axis)
series_data_frames = self._group_by_series(result_df)
total_num_series = sum([len(axis) for axis in self.items])
y_axes, series = [], []
for axis_idx, axis in enumerate(self.items):
# Tee the colors iterator so we can peek at the next color. The next color in the iterator becomes the
# axis color. The first series on each axis should match the axis color, and will progress the colors
# iterator. The next axis color should be the next color in the iterator after the last color used by a
# series on the current axis in order to get a better variety of color.
colors, tee_colors = itertools.tee(colors)
axis_color = next(tee_colors)
# prepend axes, append series, this keeps everything ordered left-to-right
y_axes[0:0] = self._render_y_axis(axis_idx, axis_color if 1 < total_num_series else None, references)
series += self._render_series(
axis,
axis_idx,
axis_color,
colors,
result_df,
series_data_frames,
dimensions,
references,
is_timeseries,
)
categories = self._categories(result_df, dimension_map)
x_axis = self._render_x_axis(dimensions, categories)
annotations = []
if has_only_line_series(axis) and annotation_frame is not None:
annotations = self._render_annotation(annotation_frame, x_axis)
extra = {}
if num_charts > 1:
extra["title"] = {"text": f"{self.title} ({title_suffix})" if self.title else title_suffix}
extra["chart"] = {
# Height of 240px is the smallest we can have, while still fully displaying the chart menu.
"height": 240
}
return {
"title": {"text": self.title},
"xAxis": x_axis,
"yAxis": y_axes,
"colors": self.colors,
"series": series,
"tooltip": {
"useHTML": True,
"enabled": self.tooltip_visible,
# When only a single datapoint per series is available, shared tooltips should be avoided.
# Since it looks clunky and often supersedes most of the chart.
"shared": all(
[len(item['data']) > 1 or item['type'] in ALWAYS_SHARED_TOOLTIP_CHART_TYPES for item in series]
),
},
"legend": {"useHTML": True},
"annotations": annotations,
**extra,
}
def _values_and_dimension(
self, data_frame: pd.DataFrame, dimension_map: Dict[str, Field], dimension_alias: Optional[str] = None
):
is_mi = isinstance(data_frame.index, pd.MultiIndex)
levels = data_frame.index.levels if is_mi else [data_frame.index]
first_level = None
if dimension_alias:
level_index = 0
for i, level in enumerate(levels):
if level.name == dimension_alias:
level_index = i
break
first_level = levels[level_index]
else:
first_level = levels[0]
if first_level is not None and first_level.name is not None:
dimension_alias = first_level.name
dimension = dimension_map[dimension_alias]
return [value for value in first_level], dimension
return [], None
def _categories(
self, data_frame: pd.DataFrame, dimension_map: Dict[str, Field], dimension_alias: Optional[str] = None
) -> List[str]:
values, dimension = self._values_and_dimension(data_frame, dimension_map, dimension_alias)
return [formats.display_value(value, dimension) or value for value in values]
def _render_x_axis(self, dimensions: List[Field], categories: List[str]):
"""
Renders the xAxis configuration.
https://api.highcharts.com/highcharts/xAxis
:param dimensions:
:param categories:
:return:
"""
is_timeseries = dimensions and dimensions[0].data_type == DataType.date
if is_timeseries:
return {"type": "datetime", "visible": self.x_axis_visible}
return {
"type": "category",
"categories": categories if categories else ["All"],
"visible": self.x_axis_visible,
}
def _render_y_axis(self, axis_idx: int, color: str, references: List[Reference]) -> List[dict]:
"""
Renders the yAxis configuration.
https://api.highcharts.com/highcharts/yAxis
:param axis_idx:
:param color:
:param references:
:return:
"""
axis = self.items[axis_idx]
y_axes = [
{
"id": str(axis_idx),
"title": {"text": None},
"labels": {"style": {"color": color}},
"visible": axis.y_axis_visible,
}
]
y_axes += [
{
"id": "{}_{}".format(axis_idx, reference.alias),
"title": {"text": reference.label},
"opposite": True,
"labels": {"style": {"color": color}},
"visible": axis.y_axis_visible,
}
for reference in references
if reference.delta
]
return y_axes
def _render_series(
self,
axis,
axis_idx: int,
axis_color: str,
colors: List[str],
data_frame: pd.DataFrame,
series_data_frames,
dimensions: List[Field],
references: List[Reference],
is_timeseries: bool = False,
) -> List[dict]:
"""
Renders the series configuration.
https://api.highcharts.com/highcharts/series
:param axis:
:param axis_idx:
:param axis_color:
:param colors:
:param series_data_frames:
:param dimensions:
:param references:
:param is_timeseries:
:return:
"""
hc_series = []
for series in axis:
# Pie Charts, do not break data out into groups, render all data in one pie chart series
if isinstance(series, self.PieSeries):
# Pie charts do not group the data up by series so render them separately and then go to the next series
for reference in [None] + references:
hc_series.append(self._render_pie_series(series.metric, reference, data_frame, dimensions))
continue
# For other series types, create a highcharts series for each group (combination of dimension values)
symbols = itertools.cycle(MARKER_SYMBOLS)
for (dimension_values, group_df), symbol in zip(series_data_frames, symbols):
dimension_values = utils.wrap_list(dimension_values)
dimension_label = self._format_dimension_values(dimensions[1:], dimension_values)
hc_series += self._render_highcharts_series(
series,
group_df,
references,
dimension_label,
is_timeseries,
symbol,
axis_idx,
axis_color,
next(colors),
)
return hc_series
def _render_highcharts_series(
self,
series,
series_df: pd.DataFrame,
references: List[Reference],
dimension_label: str,
is_timeseries: bool,
symbol: str,
axis_idx: int,
axis_color: str,
series_color: str,
):
"""
Note on colors:
- With a single axis, use different colors for each series
- With multiple axes, use the same color for the entire axis and only change the dash style
:param series:
:param series_df:
:param references:
:param dimension_label:
:param is_timeseries:
:param symbol:
:param axis_idx:
:param axis_color:
:param series_color:
:return:
"""
if is_timeseries:
series_df = series_df.sort_index(level=0)
results = []
for reference, dash_style in zip([None] + references, itertools.cycle(DASH_STYLES)):
field_alias = utils.alias_selector(reference_alias(series.metric, reference))
metric_label = reference_label(series.metric, reference)
if field_alias not in series_df:
continue
hc_series = {
"type": series.type,
"name": "{} ({})".format(metric_label, dimension_label) if dimension_label else metric_label,
"data": (
self._render_timeseries_data(series_df, field_alias, series.metric)
if is_timeseries
else self._render_category_data(series_df, field_alias, series.metric)
),
"tooltip": self._render_tooltip(series.metric, reference),
"yAxis": (
"{}_{}".format(axis_idx, reference.alias)
if reference is not None and reference.delta
else str(axis_idx)
),
"marker": (
{"symbol": symbol, "fillColor": axis_color or series_color}
if isinstance(series, SERIES_NEEDING_MARKER)
else {}
),
"stacking": series.stacking,
}
if isinstance(series, ContinuousAxisSeries):
# Set each series in a continuous series to a specific color
hc_series["color"] = series_color
hc_series["dashStyle"] = dash_style
results.append(hc_series)
return results
@staticmethod
def _render_category_data(group_df: pd.DataFrame, field_alias: str, metric: Field):
categories = (
list(group_df.index.levels[0]) if isinstance(group_df.index, pd.MultiIndex) else list(group_df.index)
)
series = []
for labels, y in group_df[field_alias].iteritems():
label = labels[0] if isinstance(labels, tuple) else labels
if pd.isnull(label):
# ignore nans in index
continue
series.append({"x": categories.index(label), "y": formats.raw_value(y, metric)})
return series
@staticmethod
def _render_timeseries_data(group_df: pd.DataFrame, metric_alias: str, metric: Field) -> List[Tuple[int, int]]:
series = []
for dimension_values, y in group_df[metric_alias].iteritems():
first_dimension_value = utils.wrap_list(dimension_values)[0]
# Ignore empty result sets where the only row is totals
if first_dimension_value in TOTALS_MARKERS:
continue
if pd.isnull(first_dimension_value):
# Ignore totals on the x-axis.
continue
series.append(
(
formats.date_as_millis(first_dimension_value),
formats.raw_value(y, metric),
)
)
return series
def _render_tooltip(self, metric: Field, reference: Reference) -> dict:
return {
"valuePrefix": reference_prefix(metric, reference),
"valueSuffix": reference_suffix(metric, reference),
"valueDecimals": metric.precision,
}
def _render_pie_series(
self, metric: Field, reference: Reference, data_frame: pd.DataFrame, dimension_fields: List[Field]
) -> dict:
metric_alias = utils.alias_selector(metric.alias)
if self.split_dimension:
dimension_fields = [dimension for dimension in dimension_fields if dimension != self.split_dimension]
data_frame = data_frame.reset_index(alias_selector(self.split_dimension.alias), drop=True)
data = []
for dimension_values, y in data_frame[metric_alias].iteritems():
dimension_values = utils.wrap_list(dimension_values)
name = self._format_dimension_values(dimension_fields, dimension_values)
data.append({"name": name or metric.label, "y": formats.raw_value(y, metric)})
return {
"name": reference_label(metric, reference),
"type": "pie",
"data": data,
"tooltip": {
"pointFormat": '<span style="color:{point.color}">\u25CF</span> {series.name}: '
"<b>{point.y} ({point.percentage:.1f}%)</b><br/>",
"valueDecimals": metric.precision,
"valuePrefix": reference_prefix(metric, reference),
"valueSuffix": reference_suffix(metric, reference),
},
}
def _render_annotation(self, annotation_df: pd.DataFrame, x_axis):
"""
Group data in the annotation data frame and calculate their positions on the x-axis of the main chart.
:param annotation_df:
A data frame containing annotation data.
:param x_axis:
The x-axis of the chart.
:return:
A list of annotations with label based on the annotation data frame.
"""
if annotation_df.empty:
return []
annotation_alias = annotation_df.columns[0]
annotation_df[annotation_alias] = annotation_df[annotation_alias].astype(str)
# Group the annotation frame by concatenating the strings in the annotation column for each index value
grouped_annotation_df = self._group_annotation_df(annotation_df, annotation_alias).to_frame()
# Calculate the annotation label positions for either timeseries or categories
annotation_label_positions = self._get_annotation_positions(grouped_annotation_df, annotation_alias, x_axis)
annotation = {"labels": []}
for annotation_position in annotation_label_positions:
label = {
"point": {"x": annotation_position["position"], "xAxis": 0},
"text": annotation_position["label"],
}
annotation["labels"].append(label)
return [annotation]
def _get_annotation_positions(self, df: pd.DataFrame, dimension_alias: str, axis):
if axis["type"] == "datetime":
return self._get_timeseries_positions(df, dimension_alias)
return self._get_category_positions(df, dimension_alias, axis)
@staticmethod
def _group_annotation_df(annotation_df: pd.DataFrame, annotation_field_name: str) -> pd.DataFrame:
def concatenate_row_values(df):
df.fillna("None", inplace=True)
return ", ".join(df)
return annotation_df.groupby(annotation_df.index.names)[annotation_field_name].apply(
lambda x: concatenate_row_values(x)
)
@staticmethod
def _get_timeseries_positions(df: pd.DataFrame, dimension_alias: str):
timeseries_positions = []
for dimensions, dimension_value in df[dimension_alias].iteritems():
datetime = utils.wrap_list(dimensions)[0]
timeseries_positions.append({"position": formats.date_as_millis(datetime), "label": dimension_value})
return timeseries_positions
@staticmethod
def _get_category_positions(df: pd.DataFrame, dimension_alias: str, axis) -> List[dict]:
dimension_positions = []
category_positions = {category: index for index, category in enumerate(axis["categories"])}
for dimensions, dimension_value in df[dimension_alias].iteritems():
category_label = utils.wrap_list(dimensions)[0]
dimension_positions.append(
{
"position": category_positions[category_label],
"label": dimension_value,
}
)
return dimension_positions
@staticmethod
def _remove_date_totals(data_frame: pd.DataFrame) -> pd.DataFrame:
"""
This function filters the totals value for the date/time dimension from the result set. There is no way to
represent this value on a chart so it is just removed.
:param data_frame:
:return:
"""
if isinstance(data_frame.index, pd.MultiIndex):
first_index = data_frame.index.get_level_values(0)
if isinstance(first_index, pd.DatetimeIndex):
index_slice = first_index < TS_UPPER_BOUND
return data_frame.loc[index_slice, :]
elif isinstance(data_frame.index, pd.DatetimeIndex):
return data_frame[data_frame.index < TS_UPPER_BOUND]
return data_frame
@staticmethod
def _group_by_series(data_frame: pd.DataFrame) -> pd.DataFrame:
if len(data_frame) == 0 or not isinstance(data_frame.index, pd.MultiIndex):
return [([], data_frame)]
levels = data_frame.index.names[1:]
return data_frame.groupby(level=levels, sort=False)
@staticmethod
def _format_dimension_values(dimensions: List[Field], dimension_values: list) -> str:
return ", ".join(
str.strip(formats.display_value(value, dimension) or str(value))
for value, dimension in zip(dimension_values, dimensions)
)
|
<filename>stanCode-projects/Object oriented-Breakout game/breakout_ex.py
"""
stanCode Breakout Project
Adapted from <NAME>'s Breakout by
<NAME>, <NAME>, <NAME>,
and <NAME>.
Name: <NAME>
"""
from campy.gui.events.timer import pause
from breakoutgraphics_ex import BreakoutGraphics
# ex import
import random
FRAME_RATE = 1000 / 120 # 120 frames per second
NUM_LIVES = 2 # Number of attempts
g = BreakoutGraphics(brick_cols=3)
def main():
g.lives = NUM_LIVES # save lives to g.lives
g.lives_board.text = f'Lives: {g.lives}' # update lives board
while True: # main while loop
pause(FRAME_RATE)
if g.lives == 0: # lost game condition
print(f'game over! left brick:{g.brick_num}')
game_over()
break
if g.brick_num == 0: # Win game condition
g.window.remove(g.bonus_ball)
game_win()
break
# run the ball and count lives
running_ball()
def running_ball():
not_brick = [g.paddle, g.ball, g.score_board, g.lives_board, g.bonus_ball] # not brick list
no_rebound = [None, g.score_board, g.lives_board, g.bonus_ball] # no bound list
g.bonus_time = 0 # to check bonus state
while g.is_running: # while loop of running the ball
pause(FRAME_RATE) # frame rate
if g.bonus_time > 0 and g.bonus_color == 'green': # while bonus, do something else
g.ball.move(g.vx*0.5, g.vy*0.5) # green bonus, slower ball
elif g.bonus_time > 0 and g.bonus_color == 'red':
g.ball.move(g.vx*2, g.vy*2) # red bonus, faster ball
else:
g.ball.move(g.vx, g.vy) # normal, ball move per frame rate
g.bonus_ball.move(0, 2) # bonus ball fall down v
# lost lives
if g.ball.y > g.window.height: # when ball falls out of window, lives-1
g.lives -= 1
g.lives_board.text = f'Lives:{g.lives}' # update score
g.window.remove(g.bonus_ball)
g.is_running = False # update game state
break
# win game
if g.brick_num == 0: # win game condition
g.is_running = False # update game state
break
# 3 wall rebound
if not 0 <= g.ball.x <= g.window.width - g.ball.width:
g.vx *= -1
if not 0 <= g.ball.y:
g.vy *= -1
# def up, down, left ,right 4 point of ball, and get what touched or not
ball_u = g.window.get_object_at(g.ball.x + g.ball.width / 2, g.ball.y - 1)
ball_d = g.window.get_object_at(
g.ball.x + g.ball.width / 2, g.ball.y + g.ball.height + 1)
ball_l = g.window.get_object_at(g.ball.x - 1, g.ball.y + g.ball.height / 2)
ball_r = g.window.get_object_at(
g.ball.x + g.ball.width + 1, g.ball.y + g.ball.height / 2)
# check what point touched then rebound, if it's brick, remove it
if ball_d not in no_rebound and g.vy > 0: # when ball move down and touch something
g.vy *= -1 # y axis rebound
if ball_d not in not_brick:
eliminate(ball_d) # eliminate point and count brick num & score
elif ball_l not in no_rebound and g.vx < 0:
g.vx *= -1
if ball_l not in not_brick:
eliminate(ball_l)
elif ball_r not in no_rebound and g.vx > 0:
g.vx *= -1
if ball_r not in not_brick:
eliminate(ball_r)
elif ball_u not in no_rebound and g.vy < 0:
g.vy *= -1
if ball_u not in not_brick:
eliminate(ball_u)
# bonus ball
bonus_ball_d = g.window.get_object_at( # get what bonus ball's down point touched
g.bonus_ball.x+g.bonus_ball.width/2, g.bonus_ball.y+g.bonus_ball.height+1)
if bonus_ball_d is g.paddle: # when paddle get bonus ball
if g.bonus_color == 'black': # black bonus, lives +1
g.lives += 1
g.lives_board.text = f'Lives:{g.lives}'
elif g.bonus_color == 'red': # red bonus, more score but move faster
g.bonus_score = 2
elif g.bonus_color == 'yellow': # yellow bonus, add score
g.score += 50
g.score_board.text = f'Score:{g.score}'
g.ball.fill_color = g.bonus_ball.fill_color
g.bonus_time = 3*1000/FRAME_RATE
g.bonus_ball.y += 100 # fall out of window
g.bonus_time -= 1 # subtract bonus time
if g.bonus_time <= 0: # when no bonus time, reset bonus state
g.ball.fill_color = 'black'
g.bonus_score = 1
def eliminate(point):
"""
:param point: GObject, which need to eliminate, must check before function.
"""
g.window.remove(point) # eliminate
if random.random() < 0.3 and g.bonus_ball.y > g.window.height: # bonus Trigger conditions
color = random.choice(['black', 'red', 'green', 'yellow']) # random bonus color
g.bonus_color, g.bonus_ball.fill_color = color, color
g.window.add(g.bonus_ball, point.x+g.ball.width/2, point.y)
g.brick_num -= 1 # count left brick
g.score += 10 * g.bonus_score # Scoring
g.score_board.text = f'Score:{g.score}' # update score board
def menu():
pass
def game_over(): # game over animation
for y in range(g.brick_rows): # remove left brick by row
for x in range(g.brick_cols):
down_brick = g.window.get_object_at(
x * (g.brick.width + g.brick_spacing), # find brick by row
g.brick.y - y * (g.brick.height + g.brick_spacing))
g.window.remove(down_brick)
pause(100)
g.window.add(g.game_over, # add game over text
x=(g.window.width - g.game_over.width) / 2, y=g.window.height / 3)
pause(500)
g.score_board.font = g.game_over.font # Scale score board ,update and show
g.window.add(g.score_board,
x=(g.window.width - g.score_board.width) / 2, y=g.window.height / 2)
pause(1000)
g.score = 0
def game_win(): # game win animation
g.window.add(g.game_win,
x=(g.window.width - g.game_win.width) / 2, y=g.window.height / 3)
pause(500)
g.score_board.font = g.game_win.font # Scale score board ,update and show
g.window.add(g.score_board,
x=(g.window.width - g.score_board.width) / 2, y=g.window.height / 2)
pause(500)
g.score_board.text = f'Score:{g.score+(100*g.lives)}' # add lives scores
pause(1000)
g.score = 0
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from __future__ import print_function, unicode_literals
import os
import sys
import tempfile
import glob
import re
import time
import fileinput
import requests
import hashlib
from datetime import datetime
from subprocess import check_call, check_output, CalledProcessError
from uritemplate import URITemplate, expand
script_env = {}
def add_script_env(name):
script_env[name] = os.environ.get(name)
add_script_env('REPO_NAME')
add_script_env('GITHUB_USER')
add_script_env('GITHUB_USER_TOKEN')
add_script_env('PYPI_REPO')
# although not used directly here, twine env vars are needed for releasing
add_script_env('TWINE_USERNAME')
add_script_env('TWINE_PASSWORD')
# the new version of the CLI
add_script_env('CLI_VERSION')
add_script_env('AZURE_STORAGE_CONNECTION_STRING')
assert (all(script_env[n] != None for n in script_env)), "Not all required environment variables have been set. {}".format(script_env)
GITHUB_API_AUTH = (script_env.get('GITHUB_USER'), script_env.get('GITHUB_USER_TOKEN'))
GITHUB_API_HEADERS = {'Accept': 'application/vnd.github.v3+json', 'user-agent': 'azure-cli-pypi-github-releaser/v1'}
SOURCE_ARCHIVE_NAME = 'source.tar.gz'
GITHUB_RELEASE_BODY_TMPL = """
The module has been published to PyPI.
View HISTORY.rst of the module for a changelog.
{}
Full release notes at https://docs.microsoft.com/en-us/cli/azure/release-notes-azure-cli
"""
COMMAND_MODULE_PREFIX = 'azure-cli-'
MODULES_TO_ALWAYS_RELEASE = ['azure-cli']
MODULES_TO_SKIP = ['azure-cli-testsdk']
def give_chance_to_cancel(msg_prefix=''):
cancel_time_secs = 10
msg_tmpl = '{}: Starting in {} seconds.'
for i in range(cancel_time_secs, 0, -1):
print_status(msg_tmpl.format(msg_prefix, i))
time.sleep(1)
def print_env_vars():
for n in script_env:
print('{} = {}'.format(n, script_env[n]))
def print_status(msg=''):
print('-- '+msg)
def print_heading(heading):
print('{0}\n{1}\n{0}'.format('=' * len(heading), heading))
def _get_core_modules_paths(repo_working_dir):
for path in glob.glob(repo_working_dir + '/src/*/setup.py'):
yield os.path.basename(os.path.dirname(path)), os.path.dirname(path)
def _get_command_modules_paths(repo_working_dir, include_prefix=False):
for path in glob.glob(repo_working_dir + '/src/command_modules/{}*/setup.py'.format(
COMMAND_MODULE_PREFIX)):
folder = os.path.dirname(path)
name = os.path.basename(folder)
if not include_prefix:
name = name[len(COMMAND_MODULE_PREFIX):]
yield name, folder
def _get_all_module_paths(repo_working_dir):
return list(_get_core_modules_paths(repo_working_dir)) + list(_get_command_modules_paths(repo_working_dir, include_prefix=True))
def _get_current_module_version(mod_path):
mod_version = None
with open(os.path.join(mod_path, 'setup.py'), 'r') as fh:
version_re = re.compile('VERSION = *')
lines = fh.readlines()
for _, line in enumerate(lines):
if version_re.match(line):
mod_version = line.split('=')[1].strip(' "\'').split('+')[0]
return mod_version
def clone_repo(repo_working_dir):
check_call(['git', 'clone', 'https://github.com/{}'.format(script_env.get('REPO_NAME')), repo_working_dir])
check_call(['git', 'checkout', 'master'], cwd=repo_working_dir)
def should_release_module(mod_name, mod_path, repo_working_dir):
if mod_name in MODULES_TO_ALWAYS_RELEASE:
print_status('We always release {}.'.format(mod_name))
return True
if mod_name in MODULES_TO_SKIP:
print_status('Skipping module {} as in modules to skip list.'.format(mod_name))
return False
# Determine if should release based on the current version
cur_mod_version = _get_current_module_version(mod_path)
r_start = '{}-{}'.format(mod_name, cur_mod_version)
revision_range = "{}..{}".format(r_start, 'HEAD')
try:
module_changes = check_output(["git", "log", "--pretty=format:* %s", revision_range, "--", mod_path, ":(exclude)*/tests/*"],
cwd=repo_working_dir)
except CalledProcessError:
# Maybe the revision_range is invalid if this is a new module.
return True
if module_changes:
print_status('Begin changes in {}'.format(mod_name))
print(str(module_changes, 'utf-8'))
print_status('End changes in {}'.format(mod_name))
return True
print_status('Skipping module {} as there are no changes.'.format(mod_name))
return False
def modify_setuppy_version(mod_name, mod_path):
setuppy_path = os.path.join(mod_path, 'setup.py')
with open(setuppy_path, 'r') as fh:
version_re = re.compile('VERSION = *')
lines = fh.readlines()
for index, line in enumerate(lines):
if version_re.match(line):
old_version = line.split('=')[1].strip(' "\'').split('+')[0]
major, minor, rev = old_version.split('.')
rev = int(rev) + 1
version = '{}.{}.{}'.format(major, minor, rev)
lines[index] = 'VERSION = "{}+dev"\n'.format(version)
update_setup = lines
break
else:
raise ValueError('In the setup file {}, version is not found.'.format(setuppy_path))
if update_setup:
with open(setuppy_path, 'w') as fh:
fh.writelines(update_setup)
else:
raise ValueError('No updated content for setup.py in {}.'.format(mod_name))
return old_version, version
def modify_initpy_version(mod_name, mod_path, old_version, new_version):
if mod_name == 'azure-cli':
path_to_init = os.path.join(mod_path, 'azure', 'cli', '__init__.py')
elif mod_name == 'azure-cli-core':
path_to_init = os.path.join(mod_path, 'azure', 'cli', 'core', '__init__.py')
for _, line in enumerate(fileinput.input(path_to_init, inplace=1)):
if line.startswith('__version__'):
sys.stdout.write(line.replace(old_version, new_version))
else:
sys.stdout.write(line)
def modify_historyrst(mod_name, mod_path, old_version, new_version):
historyrst_path = os.path.join(mod_path, 'HISTORY.rst')
new_history_lines = []
just_seen_unreleased = False
contains_unreleased = False
with open(historyrst_path, 'r') as fq:
lines = fq.readlines()
for _, line in enumerate(lines):
if 'unreleased' in line.lower() and not line.startswith('* '):
contains_unreleased = True
if contains_unreleased:
for _, line in enumerate(lines):
if just_seen_unreleased:
# skip the line as it's just a heading for the old unreleased section
just_seen_unreleased = False
continue
if 'unreleased' in line.lower() and not line.startswith('* '):
new_heading = '{} ({})'.format(new_version, datetime.utcnow().strftime('%Y-%m-%d'))
line = '{}\n{}\n'.format(new_heading, '+' * len(new_heading))
just_seen_unreleased = True
new_history_lines.append(line)
else:
for index, line in enumerate(lines):
if line.startswith('Release History'):
begin = index + 2
if old_version in line:
end = index
break
new_heading = '{} ({})'.format(new_version, datetime.utcnow().strftime('%Y-%m-%d'))
line = '{}\n{}\n'.format(new_heading, '+' * len(new_heading))
release_notes = [line]
if mod_name in MODULES_TO_ALWAYS_RELEASE:
release_notes.append('* no changes\n\n')
else:
release_notes.append('* minor fixes\n\n')
new_history_lines = lines[:begin] + release_notes + lines[end:]
with open(historyrst_path, 'w') as fq:
fq.writelines(new_history_lines)
def release_module(mod_name, mod_path, repo_working_dir):
# Change version in setup.py
old_version, new_version = modify_setuppy_version(mod_name, mod_path)
# Need to modify __init__.py for these modules as well
if mod_name in ['azure-cli', 'azure-cli-core']:
modify_initpy_version(mod_name, mod_path, old_version, new_version)
# Modify HISTORY.rst
modify_historyrst(mod_name, mod_path, old_version, new_version)
# Create commit with appropriate message.
commit_message = 'Release {} {}'.format(mod_name, new_version)
check_call(['git', 'commit', '-am', commit_message], cwd=repo_working_dir)
commitish = check_output(['git', 'rev-parse', 'HEAD'], cwd=repo_working_dir)
commitish = str(commitish, 'utf-8')
commitish = commitish.strip()
return mod_name, commitish, new_version
def install_cli_into_venv():
venv_dir = tempfile.mkdtemp()
check_call(['virtualenv', venv_dir])
path_to_pip = os.path.join(venv_dir, 'bin', 'pip')
extra_index_url = 'https://testpypi.python.org/simple' if script_env.get('PYPI_REPO') == 'https://test.pypi.org/legacy/' else None
args = [path_to_pip, 'install', 'azure-cli']
if extra_index_url:
args.extend(['--extra-index-url', extra_index_url])
check_call(args)
deps = check_output([path_to_pip, 'freeze'])
deps = str(deps, 'utf-8')
deps = deps.split('\n')
cli_components = []
for dep in deps:
if dep.startswith('azure-cli'):
cli_components.append(dep.split('=='))
return cli_components
def run_push_to_git():
repo_working_dir = tempfile.mkdtemp()
clone_repo(repo_working_dir)
configure_git(repo_working_dir)
commitish_list = []
for mod_name, mod_path in _get_all_module_paths(repo_working_dir):
print_heading(mod_name.upper())
if should_release_module(mod_name, mod_path, repo_working_dir):
mod_name, commitish, new_version = release_module(mod_name, mod_path, repo_working_dir)
commitish_list.append((mod_name, commitish, new_version))
else:
print_status('Skipped {}'.format(mod_name))
# Push all commits to master.
check_call(['git', 'push', '-f', 'origin', 'master'], cwd=repo_working_dir)
return commitish_list
def set_up_cli_repo_dir():
working_dir = tempfile.mkdtemp()
check_call(['git', 'clone', 'https://github.com/{}'.format(script_env.get('REPO_NAME')), working_dir])
check_call(['pip', 'install', '-e', 'tools'], cwd=working_dir)
return working_dir
def publish_to_pypi(working_dir, commitish_list):
# Publish all in commitish list to PyPI
assets_dir_map = {}
for mod_name, commitish, _ in commitish_list:
assets_dir = tempfile.mkdtemp()
check_call(['git', 'checkout', commitish], cwd=working_dir)
check_call(['python', '-m', 'tools.automation.release.run', '-c', mod_name,
'-r', script_env.get('PYPI_REPO'), '--dest', assets_dir], cwd=working_dir)
assets_dir_map[mod_name] = assets_dir
# reset back
check_call(['git', 'checkout', 'master'], cwd=working_dir)
return assets_dir_map
def upload_asset(upload_uri_tmpl, filepath, label):
filename = os.path.basename(filepath)
upload_url = URITemplate(upload_uri_tmpl).expand(name=filename, label=label)
headers = GITHUB_API_HEADERS
headers['Content-Type'] = 'application/octet-stream'
with open(filepath, 'rb') as payload:
requests.post(upload_url, data=payload, auth=GITHUB_API_AUTH, headers=headers)
def upload_assets_for_github_release(upload_uri_tmpl, component_name, component_version, assets_dir):
for filename in os.listdir(assets_dir):
fullpath = os.path.join(assets_dir, filename)
if filename == SOURCE_ARCHIVE_NAME:
upload_asset(upload_uri_tmpl, fullpath, '{} {} source code (.tar.gz)'.format(component_name, component_version))
elif filename.endswith('.tar.gz'):
upload_asset(upload_uri_tmpl, fullpath, '{} {} Source Distribution (.tar.gz)'.format(component_name, component_version))
elif filename.endswith('.whl'):
upload_asset(upload_uri_tmpl, fullpath, '{} {} Python Wheel (.whl)'.format(component_name, component_version))
def run_create_github_release(commitish_list, assets_dir_map):
# Create Github release (inc. the artifacts .whl etc.).
print_heading('Creating GitHub releases')
for mod_name, commitish, mod_version in commitish_list:
print_status('Publishing GitHub release for {} {}'.format(mod_name, mod_version))
tag_name = '{}-{}'.format(mod_name, mod_version)
release_name = "{} {}".format(mod_name, mod_version)
if script_env.get('PYPI_REPO') == 'https://upload.pypi.org/legacy/':
released_pypi_url = 'https://pypi.org/project/{}/{}'.format(mod_name, mod_version)
elif script_env.get('PYPI_REPO') == 'https://test.pypi.org/legacy/':
released_pypi_url = 'https://test.pypi.org/project/{}/{}'.format(mod_name, mod_version)
else:
released_pypi_url = ''
payload = {'tag_name': tag_name, "target_commitish": commitish, "name": release_name, "body": GITHUB_RELEASE_BODY_TMPL.format(released_pypi_url), "prerelease": False}
r = requests.post('https://api.github.com/repos/{}/releases'.format(script_env.get('REPO_NAME')), json=payload, auth=GITHUB_API_AUTH, headers=GITHUB_API_HEADERS)
if r.status_code == 201:
upload_url = r.json()['upload_url']
upload_assets_for_github_release(upload_url, mod_name, mod_version, assets_dir_map[mod_name])
print_status('Published GitHub release for {} {}'.format(mod_name, mod_version))
else:
print_status('ERROR: Failed to create GitHub release for {} {}'.format(mod_name, mod_version))
def run_create_packaged_release(working_dir):
# After releasing, create a new venv, and pip install and verify then create
# list of components for the package release step.
print_status('Start installing CLI into venv')
components_list = install_cli_into_venv()
print_status('Finished installing CLI into venv')
archive_dir = tempfile.mkdtemp()
# create the packaged releases automatically
args = ['python', '-m', 'tools.automation.release.packaged', '--version', script_env.get('CLI_VERSION'), '--dest', archive_dir, '--components']
for name, version in components_list:
# The tag for this module is slightly different so make that change.
if name == 'azure-cli-command-modules-nspkg':
name = 'azure-cli-command_modules-nspkg'
args.append('{}={}'.format(name, version))
print_status(' '.join(args))
check_call(args, cwd=working_dir)
print_status('Created packaged release in dir {}'.format(archive_dir))
# Get the sha256sum
archive_file_name = os.listdir(archive_dir)[0]
archive_file_path = os.path.join(archive_dir, archive_file_name)
sha256 = hashlib.sha256()
with open(archive_file_path, 'rb') as f:
sha256.update(f.read())
computed_hash = sha256.hexdigest()
print_status('SHA256 of {} is {}'.format(archive_file_path, computed_hash))
# Upload release archive to Azure Storage
check_call(['az', 'storage', 'blob', 'upload', '--file', archive_file_path, '--name', archive_file_name, '--container-name', 'releases', '--connection-string', script_env.get('AZURE_STORAGE_CONNECTION_STRING')])
archive_url = check_output(['az', 'storage', 'blob', 'url', '--name', archive_file_name, '--container-name', 'releases', '--connection-string', script_env.get('AZURE_STORAGE_CONNECTION_STRING'), '--output', 'tsv'])
archive_url = str(archive_url, 'utf-8')
archive_url = archive_url.strip()
print_status('Archive URL is {}'.format(archive_url))
def configure_git(repo_working_dir):
check_call(['git', 'config', 'user.email', <EMAIL>'.<EMAIL>(script_env.get('GITHUB_USER'))], cwd=repo_working_dir)
check_call(['git', 'config', 'user.name', script_env.get('GITHUB_USER')], cwd=repo_working_dir)
check_call(['git', 'remote', 'set-url', 'origin', 'https://{}:{}@github.com/{}'.format(script_env.get('GITHUB_USER'), script_env.get('GITHUB_USER_TOKEN'), script_env.get('REPO_NAME'))], cwd=repo_working_dir)
if __name__ == "__main__":
print_env_vars()
give_chance_to_cancel('Create Git release commits')
release_commitish_list = run_push_to_git()
cli_repo_dir = set_up_cli_repo_dir()
give_chance_to_cancel('Publish to PyPI')
release_assets_dir_map = publish_to_pypi(cli_repo_dir, release_commitish_list)
give_chance_to_cancel('Create GitHub releases and tags')
run_create_github_release(release_commitish_list, release_assets_dir_map)
give_chance_to_cancel('Create Packaged Release archive')
# We need to clone the repo again as we've now pushed the git tags and we need them to create the packaged release.
# (we could do 'git pull' but this is easier and uses a clean directory just to be safe)
cli_repo_dir = set_up_cli_repo_dir()
run_create_packaged_release(cli_repo_dir)
print_status('Done.')
|
<reponame>Nailim/shuttler
#import sys
#import os
from datetime import datetime
import commands # use GeoidEval trough command line
import argparse
from geographiclib.geodesic import Geodesic
#import gpsRangeRing
global inputParser # just a reminder, it's used as a global variable
global inputArgs # just a reminder, it's used as a global variable
def parseInput() :
global inputParser
global inputArgs
inputParser = argparse.ArgumentParser(description='Parse GPS telemetry from Anemoi autopilot')
inputParser.add_argument('sessionFolder', nargs=1)
inputArgs = inputParser.parse_args()
def processInput() :
print inputArgs.sessionFolder
#rr = gpsRangeRing.rangeRing()
sourceFile_info = open(inputArgs.sessionFolder[0]+"/mission/info.txt", 'r')
sourceFile_gps = open(inputArgs.sessionFolder[0]+"/logs/gps.csv", 'r')
outputFile = open(inputArgs.sessionFolder[0]+"_processed/logs/gpsTour.kml", 'w')
tab_count = 0
speed_min = 0
speed_max = 0
speed_slice = 0
altitude_min = 0
altitude_max = 0
altitude_slice = 0
# get info data
sourceFile_info.seek(0)
for line in sourceFile_info :
if (line.strip().split(" ",-1)[0] == "speed_min:") :
speed_min = float(line.strip().split(" ",-1)[1])
if (line.strip().split(" ",-1)[0] == "speed_max:") :
speed_max = float(line.strip().split(" ",-1)[1])
if (line.strip().split(" ",-1)[0] == "altitude_min:") :
altitude_min = float(line.strip().split(" ",-1)[1])
if (line.strip().split(" ",-1)[0] == "altitude_max:") :
altitude_max = float(line.strip().split(" ",-1)[1])
speed_slice = (((speed_max - speed_min) * 1000) / 3600) / 1024
altitude_slice = (altitude_max - altitude_min) / 1024
# document - header
outputFile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>"+"\n")
outputFile.write("<kml xmlns=\"http://www.opengis.net/kml/2.2\" xmlns:gx=\"http://www.google.com/kml/ext/2.2\">"+"\n")
# document - body
tab_count += 1
outputFile.write(tab(tab_count)+"<Document>"+"\n")
# document - metadata
tab_count += 1
outputFile.write(tab(tab_count)+"<name>Mission: !!! - GPS log</name>"+"\n")
outputFile.write(tab(tab_count)+"<visibility>1</visibility>"+"\n")
outputFile.write(tab(tab_count)+"<open>1</open>"+"\n")
outputFile.write(tab(tab_count)+"<description>!!! TODO</description>"+"\n")
# document - metadata - style altitude line
outputFile.write(tab(tab_count)+"<Style id=\"lineTour\">"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<LineStyle>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<width>1</width>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</LineStyle>"+"\n")
outputFile.write(tab(tab_count)+"<LabelStyle>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<scale>0</scale>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</LabelStyle>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</Style>"+"\n")
# document - track raw
outputFile.write(tab(tab_count)+"<Placemark>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<name>data: raw</name>"+"\n")
outputFile.write(tab(tab_count)+"<visibility>0</visibility>"+"\n")
outputFile.write(tab(tab_count)+"<styleUrl>#lineTour</styleUrl>"+"\n")
# document - track - icon
outputFile.write(tab(tab_count)+"<Style>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<Icon>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<href>http://maps.google.com/mapfiles/kml/shapes/airports.png</href>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</Icon>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</Style>"+"\n")
# document - track - track
outputFile.write(tab(tab_count)+"<gx:Track>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<altitudeMode>absolute</altitudeMode>"+"\n")
# document - track - track - when
sourceFile_gps.seek(0)
for line in sourceFile_gps :
# for google earth: <when></when>
tempString = tab(tab_count) + "<when>" + datetime.fromtimestamp(float(line.split(',',-1)[0][:10])).strftime('%Y-%m-%dT%H:%M:%SZ') + "</when>" + "\n"
outputFile.write(tempString)
# document - track - track - angles
sourceFile_gps.seek(0)
for line in sourceFile_gps :
geoid_offset = commands.getoutput("echo " + line.split(',',-1)[1] + " " + line.split(',',-1)[2] + " | GeoidEval -n egm84-15")
# for google earth: <gx:coord></gx:coord>
tempString = tab(tab_count)+"<gx:coord>"+line.split(',',-1)[2]+" "+line.split(',',-1)[1]+" "+str(float(line.split(',',-1)[3]) - float(geoid_offset))+"</gx:coord>"+"\n"
outputFile.write(tempString)
# document - track - track - when
sourceFile_gps.seek(0)
for line in sourceFile_gps :
# for google earth: <gx:angles></gx:angles>
tempString = tab(tab_count)+"<gx:angles>"+line.split(',',-1)[5]+" "+"0"+" "+"0"+"</gx:angles>"+"\n"
outputFile.write(tempString)
# document - track - track - coord
tab_count -= 1
outputFile.write(tab(tab_count)+"</gx:Track>"+"\n")
# ! document - track
tab_count -= 1
outputFile.write(tab(tab_count)+"</Placemark>"+"\n")
# document - track evaluated
outputFile.write(tab(tab_count)+"<Placemark>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<name>data: evaluated</name>"+"\n")
outputFile.write(tab(tab_count)+"<visibility>0</visibility>"+"\n")
outputFile.write(tab(tab_count)+"<styleUrl>#lineTour</styleUrl>"+"\n")
# document - track - icon
outputFile.write(tab(tab_count)+"<Style>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<Icon>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<href>http://maps.google.com/mapfiles/kml/shapes/airports.png</href>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</Icon>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</Style>"+"\n")
# document - track - track
outputFile.write(tab(tab_count)+"<gx:Track>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<altitudeMode>absolute</altitudeMode>"+"\n")
# document - track - track - when
sourceFile_gps.seek(0)
for line in sourceFile_gps :
# for google earth: <when></when>
tempString = tab(tab_count) + "<when>" + datetime.fromtimestamp(float(line.split(',',-1)[0][:10])).strftime('%Y-%m-%dT%H:%M:%SZ') + "</when>" + "\n"
outputFile.write(tempString)
# document - track - track - angles
sourceFile_gps.seek(0)
for line in sourceFile_gps :
geoid_offset = commands.getoutput("echo " + line.split(',',-1)[1] + " " + line.split(',',-1)[2] + " | GeoidEval -n egm84-15")
# for google earth: <gx:coord></gx:coord>
tempString = tab(tab_count)+"<gx:coord>"+line.split(',',-1)[2]+" "+line.split(',',-1)[1]+" "+str(float(line.split(',',-1)[3]) - float(geoid_offset))+"</gx:coord>"+"\n"
outputFile.write(tempString)
# document - track - track - when
sourceFile_gps.seek(0)
for line in sourceFile_gps :
# for google earth: <gx:angles></gx:angles>
tempString = tab(tab_count)+"<gx:angles>"+line.split(',',-1)[5]+" "+"0"+" "+"0"+"</gx:angles>"+"\n"
outputFile.write(tempString)
# document - track - track - coord
tab_count -= 1
outputFile.write(tab(tab_count)+"</gx:Track>"+"\n")
# ! document - track
tab_count -= 1
outputFile.write(tab(tab_count)+"</Placemark>"+"\n")
# document - track used
outputFile.write(tab(tab_count)+"<Placemark>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<name>data: used</name>"+"\n")
outputFile.write(tab(tab_count)+"<visibility>1</visibility>"+"\n")
outputFile.write(tab(tab_count)+"<styleUrl>#lineTour</styleUrl>"+"\n")
# document - track - icon
outputFile.write(tab(tab_count)+"<Style>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<Icon>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<href>http://maps.google.com/mapfiles/kml/shapes/airports.png</href>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</Icon>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</Style>"+"\n")
# document - track - track
outputFile.write(tab(tab_count)+"<gx:Track>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<altitudeMode>absolute</altitudeMode>"+"\n")
# document - track - track - when
sourceFile_gps.seek(0)
for line in sourceFile_gps :
# for google earth: <when></when>
tempString = tab(tab_count) + "<when>" + datetime.fromtimestamp(float(line.split(',',-1)[0][:10])).strftime('%Y-%m-%dT%H:%M:%SZ') + "</when>" + "\n"
outputFile.write(tempString)
# document - track - track - angles
sourceFile_gps.seek(0)
for line in sourceFile_gps :
geoid_offset = commands.getoutput("echo " + line.split(',',-1)[1] + " " + line.split(',',-1)[2] + " | GeoidEval -n egm84-15")
# for google earth: <gx:coord></gx:coord>
tempString = tab(tab_count)+"<gx:coord>"+line.split(',',-1)[2]+" "+line.split(',',-1)[1]+" "+str(float(line.split(',',-1)[3]) - float(line.split(',',-1)[4]))+"</gx:coord>"+"\n"
outputFile.write(tempString)
# document - track - track - when
sourceFile_gps.seek(0)
for line in sourceFile_gps :
# for google earth: <gx:angles></gx:angles>
tempString = tab(tab_count)+"<gx:angles>"+line.split(',',-1)[5]+" "+"0"+" "+"0"+"</gx:angles>"+"\n"
outputFile.write(tempString)
# document - track - track - coord
tab_count -= 1
outputFile.write(tab(tab_count)+"</gx:Track>"+"\n")
# ! document - track
tab_count -= 1
outputFile.write(tab(tab_count)+"</Placemark>"+"\n")
# document - tour - top
outputFile.write(tab(tab_count)+"<gx:Tour>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<name>tour: top</name>"+"\n")
outputFile.write(tab(tab_count)+"<visibility>0</visibility>"+"\n")
outputFile.write(tab(tab_count)+"<gx:Playlist>"+"\n")
tab_count += 1
# magic
durationCounter = 0.0
timeStamp = 0
timeStampFirst = 0
sourceFile_gps.seek(0)
for line in sourceFile_gps :
timeStamp = line.split(',',-1)[0]
timeStampFirst = line.split(',',-1)[0]
break
sourceFile_gps.seek(0)
for line in sourceFile_gps :
durationCounter = (float(line.split(',',-1)[0]) - float(timeStamp)) / 1000
timeStamp = line.split(',',-1)[0]
outputFile.write(tab(tab_count)+"<gx:FlyTo>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<gx:duration>"+str(durationCounter)+"</gx:duration>"+"\n")
outputFile.write(tab(tab_count)+"<gx:flyToMode>smooth</gx:flyToMode>"+"\n")
outputFile.write(tab(tab_count)+"<Camera>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<gx:TimeSpan>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<begin>"+datetime.fromtimestamp(float(timeStampFirst[:10])).strftime('%Y-%m-%dT%H:%M:%SZ')+"</begin>"+"\n")
outputFile.write(tab(tab_count)+"<end>" + datetime.fromtimestamp(float(line.split(',',-1)[0][:10])).strftime('%Y-%m-%dT%H:%M:%SZ') + "</end>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</gx:TimeSpan>"+"\n")
outputFile.write(tab(tab_count)+"<altitudeMode>absolute</altitudeMode>"+"\n")
outputFile.write(tab(tab_count)+"<longitude>"+line.split(',',-1)[2]+"</longitude>"+"\n")
outputFile.write(tab(tab_count)+"<latitude>"+line.split(',',-1)[1]+"</latitude>"+"\n")
outputFile.write(tab(tab_count)+"<altitude>"+str(float(line.split(',',-1)[3])+1000.0)+"</altitude>"+"\n")
#outputFile.write(tab(tab_count)+"<heading>"+line.split(',',-1)[5]+"</heading>"+"\n")
outputFile.write(tab(tab_count)+"<heading>"+str(0)+"</heading>"+"\n")
outputFile.write(tab(tab_count)+"<tilt>"+str(0.0)+"</tilt>"+"\n")
outputFile.write(tab(tab_count)+"<roll>"+str(0.0)+"</roll>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</Camera>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</gx:FlyTo>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</gx:Playlist>"+"\n")
# ! document - tour
tab_count -= 1
outputFile.write(tab(tab_count)+"</gx:Tour>"+"\n")
# document - tour - ground
outputFile.write(tab(tab_count)+"<gx:Tour>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<name>tour: ground</name>"+"\n")
outputFile.write(tab(tab_count)+"<visibility>0</visibility>"+"\n")
outputFile.write(tab(tab_count)+"<gx:Playlist>"+"\n")
tab_count += 1
# magic
durationCounter = 0.0
timeStamp = 0
timeStampFirst = 0
cbf = []
sourceFile_gps.seek(0)
for line in sourceFile_gps :
timeStamp = line.split(',',-1)[0]
timeStampFirst = line.split(',',-1)[0]
#firstLatitude = line.split(',',-1)[1]
#firstLongitude = line.split(',',-1)[2]
heading = float(line.split(',',-1)[5]) + 180.0
if heading > 360.0 :
heading = heading - 360
cfb = getCoordinateFromBearing(float(line.split(',',-1)[1]), float(line.split(',',-1)[2]), heading, 10)
break
sourceFile_gps.seek(0)
for line in sourceFile_gps :
durationCounter = (float(line.split(',',-1)[0]) - float(timeStamp)) / 1000
timeStamp = line.split(',',-1)[0]
inverse = Geodesic.WGS84.Inverse(float(cfb[0][0]), float(cfb[0][1]), float(line.split(',',-1)[1]), float(line.split(',',-1)[2]))
outputFile.write(tab(tab_count)+"<gx:FlyTo>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<gx:duration>"+str(durationCounter)+"</gx:duration>"+"\n")
outputFile.write(tab(tab_count)+"<gx:flyToMode>smooth</gx:flyToMode>"+"\n")
outputFile.write(tab(tab_count)+"<Camera>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<gx:TimeSpan>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<begin>"+datetime.fromtimestamp(float(timeStampFirst[:10])).strftime('%Y-%m-%dT%H:%M:%SZ')+"</begin>"+"\n")
outputFile.write(tab(tab_count)+"<end>" + datetime.fromtimestamp(float(line.split(',',-1)[0][:10])).strftime('%Y-%m-%dT%H:%M:%SZ') + "</end>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</gx:TimeSpan>"+"\n")
outputFile.write(tab(tab_count)+"<altitudeMode>relativeToGround</altitudeMode>"+"\n")
outputFile.write(tab(tab_count)+"<longitude>"+str(cfb[0][1])+"</longitude>"+"\n")
outputFile.write(tab(tab_count)+"<latitude>"+str(cfb[0][0])+"</latitude>"+"\n")
outputFile.write(tab(tab_count)+"<altitude>"+str(1.0)+"</altitude>"+"\n")
outputFile.write(tab(tab_count)+"<heading>"+str(inverse['azi1'])+"</heading>"+"\n")
outputFile.write(tab(tab_count)+"<tilt>"+str(100.0)+"</tilt>"+"\n")
outputFile.write(tab(tab_count)+"<roll>"+str(0.0)+"</roll>"+"\n")
#tempString = tab(tab_count)+"<gx:angles>"+line.split(',',-1)[5]+" "+"0"+" "+"0"+"</gx:angles>"+"\n"
#outputFile.write(tempString)
tab_count -= 1
outputFile.write(tab(tab_count)+"</Camera>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</gx:FlyTo>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</gx:Playlist>"+"\n")
# ! document - tour
tab_count -= 1
outputFile.write(tab(tab_count)+"</gx:Tour>"+"\n")
# document - tour - follow
outputFile.write(tab(tab_count)+"<gx:Tour>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<name>tour: follow</name>"+"\n")
outputFile.write(tab(tab_count)+"<visibility>1</visibility>"+"\n")
outputFile.write(tab(tab_count)+"<gx:Playlist>"+"\n")
tab_count += 1
# magic
durationCounter = 0.0
timeStamp = 0
timeStampFirst = 0
sourceFile_gps.seek(0)
for line in sourceFile_gps :
timeStamp = line.split(',',-1)[0]
timeStampFirst = line.split(',',-1)[0]
break
sourceFile_gps.seek(0)
for line in sourceFile_gps :
durationCounter = (float(line.split(',',-1)[0]) - float(timeStamp)) / 1000
timeStamp = line.split(',',-1)[0]
outputFile.write(tab(tab_count)+"<gx:FlyTo>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<gx:duration>"+str(durationCounter)+"</gx:duration>"+"\n")
outputFile.write(tab(tab_count)+"<gx:flyToMode>smooth</gx:flyToMode>"+"\n")
outputFile.write(tab(tab_count)+"<Camera>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<gx:TimeSpan>"+"\n")
tab_count += 1
outputFile.write(tab(tab_count)+"<begin>"+datetime.fromtimestamp(float(timeStampFirst[:10])).strftime('%Y-%m-%dT%H:%M:%SZ')+"</begin>"+"\n")
outputFile.write(tab(tab_count)+"<end>" + datetime.fromtimestamp(float(line.split(',',-1)[0][:10])).strftime('%Y-%m-%dT%H:%M:%SZ') + "</end>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</gx:TimeSpan>"+"\n")
outputFile.write(tab(tab_count)+"<altitudeMode>absolute</altitudeMode>"+"\n")
# calculate point behind the plane
heading = float(line.split(',',-1)[5]) + 180.0
if heading > 360.0 :
heading = heading - 360;
cfb = getCoordinateFromBearing(float(line.split(',',-1)[1]), float(line.split(',',-1)[2]), heading, 500)
outputFile.write(tab(tab_count)+"<longitude>"+str(cfb[0][1])+"</longitude>"+"\n")
outputFile.write(tab(tab_count)+"<latitude>"+str(cfb[0][0])+"</latitude>"+"\n")
outputFile.write(tab(tab_count)+"<altitude>"+str(float(line.split(',',-1)[3])+500.0)+"</altitude>"+"\n")
outputFile.write(tab(tab_count)+"<heading>"+line.split(',',-1)[5]+"</heading>"+"\n")
outputFile.write(tab(tab_count)+"<tilt>"+str(45.0)+"</tilt>"+"\n")
outputFile.write(tab(tab_count)+"<roll>"+str(0.0)+"</roll>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</Camera>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</gx:FlyTo>"+"\n")
tab_count -= 1
outputFile.write(tab(tab_count)+"</gx:Playlist>"+"\n")
# ! document - tour
tab_count -= 1
outputFile.write(tab(tab_count)+"</gx:Tour>"+"\n")
# ! document
outputFile.write(tab(tab_count)+"</Document>"+"\n")
# !
tab_count -= 1
outputFile.write("</kml>"+"\n")
sourceFile_gps.close()
outputFile.close()
#for filename in os.listdir(inputArgs.sessionFolder[0]):
# print filename
def tab(count) :
tab = ""
for c in range(0,count,1) :
tab = tab + "\t"
return tab
def getCoordinateFromBearing(center_lat, center_lon, bearing, distance) : # degrees, degrees, degrees, meters
lat_lon = []
line = Geodesic.WGS84.Line(center_lat, center_lon, bearing)
point = line.Position(distance)
lat_lon.append([point['lat2'], point['lon2']])
return lat_lon
def getSpeedLineColor(speed, speed_slice) :
#speed_slice = int(round(speed / 0.027126736)) # 1/1024 slice of 0-27.7778 m/s (0-100 km/h) speed range
#speed_slice = int(round(speed / 0.034722222)) # 1/1024 slice of 0-35.5556 m/s (0-128 km/h) speed range
speed_slice = int(round(speed / speed_slice)) # 1/1024 slice of 0-max speed range
if speed_slice <= 0 :
return "ffff0000"
elif speed_slice <= 255 :
return "ffff"+format(speed_slice,'02x')+"00"
elif speed_slice <= 511 :
return "ff"+format((256-(speed_slice-255)),'02x')+"ff00"
#return "ff"+str(hex( 255-(speed_slice-255) ))[2:]+"ff00"
elif speed_slice <= 767 :
return "ff00ff"+format((speed_slice-511),'02x')
#return "ff00ff"+str(hex(speed_slice-511))[2:]
elif speed_slice <= 1023 :
return "ff00"+format((256-(speed_slice-767)),'02x')+"ff"
#return "ff00"+str(hex( 255-(speed_slice-767) ))[2:]+"ff"
else :
return "ff0000ff"
def getAltitudeLineColor(altitude, altitude_slice) :
#altitude_slice = int(round(altitude)) # 1/1024 slice altutide (0-1024 m) altitude range
altitude_slice = int(round(altitude / altitude_slice)) # 1/1024 slice of 0-max altitude range
if altitude_slice <= 0 :
return "ffff0000"
elif altitude_slice <= 255 :
return "ffff"+format(altitude_slice,'02x')+"00"
elif altitude_slice <= 511 :
return "ff"+format((256-(altitude_slice-255)),'02x')+"ff00"
#return "ff"+str(hex( 255-(altitude_slice-255) ))[2:]+"ff00"
elif altitude_slice <= 767 :
return "ff00ff"+format((altitude_slice-511),'02x')
#return "ff00ff"+str(hex(altitude_slice-511))[2:]
elif altitude_slice <= 1023 :
return "ff00"+format((256-(altitude_slice-767)),'02x')+"ff"
#return "ff00"+str(hex( 255-(altitude_slice-767) ))[2:]+"ff"
else :
return "ff0000ff"
if __name__ == "__main__": # this is not a module
parseInput() # what do we have to do
processInput() # doing what we have to do
#test()
print "" # for estetic output
|
<filename>chaco/multi_line_plot.py
""" Defines the MultiLinePlot class.
"""
from __future__ import with_statement
# Standard library imports
import warnings
from math import ceil, floor
# Major library imports
import numpy as np
from numpy import argsort, array, invert, isnan, take, transpose
# Enthought library imports
from enable.api import black_color_trait, ColorTrait, LineStyle
from traits.api import Float, List, Str, Trait, \
Bool, Callable, Property, cached_property, Instance, Array
from traitsui.api import Item, View, ScrubberEditor, HGroup
from array_data_source import ArrayDataSource
from base import arg_find_runs, bin_search
from base_xy_plot import BaseXYPlot
class MultiLinePlot(BaseXYPlot):
""" A plot consisting of multiple lines.
The data to be plotted must come from a two-dimensional array with shape M by N
stored in a MultiArrayDataSource object. M is the number of lines to be plotted,
and N is the number of points in each line.
Constructor Parameters
----------------------
index : instance of an ArrayDataSource
These are the 'x' or abscissa coordinates.
yindex : instance of ArrayDataSource
These are the 'y' coordinates.
value : instance of a MultiArrayDataSource
Note that the `scale`, `offset` and `normalized_amplitude` attributes of the
MultiLinePlot control the projection of the traces into the (x,y)
plot. In simplest case, `scale=1` and `offset=0`, and `normalized_amplitude`
controls the scaling of the traces relative to their base y value.
global_min, global_max : float
The minimum and maximum values of the data in `value`. For large
arrays, computing these could take excessive time, so they must be
provided when an instance is created.
normalized_amplitude : Float
color : ColorTrait
color_func : Callable or None
If not None, this Callable overrides `color`. The argument to `color_func`
will be the integer index of the trace to be rendered. `color_func` must
return an RGBA 4-tuple.
Default: None
orientation : str
Must be 'v' or 'h' (for 'vertical' or 'horizontal', respectively). This is
the orientation of the index axis (i.e. the 'x' axis).
Default: 'h'
fast_clip : bool
If True, traces whose *base* 'y' coordinate is outside the value axis range
are not plotted, even if some of the data in the curve extends into the plot
region.
Default: False
line_width : float
Width of the plotted lines.
line_style :
The style of the trace lines in the plot.
The following are from the original LinePlot code, and are untested:
selected_color
selected_line_style
"""
# M and N appearing in the comments are as defined in the docstring.
yindex = Instance(ArrayDataSource)
# amplitude = Float(0.0)
# `scale` and `offset` provide a more general transformation, but are currently
# untested.
scale = Float(1.0)
offset = Float(0.0)
fast_clip = Bool(False)
# The color of the lines.
color = black_color_trait
# A function that returns the color of lines. Overrides `color` if not None.
color_func = Trait(None, None, Callable)
# The color to use to highlight the line when selected.
selected_color = ColorTrait("lightyellow")
# The style of the selected line.
selected_line_style = LineStyle("solid")
# The name of the key in self.metadata that holds the selection mask
metadata_name = Str("selections")
# The thickness of the line.
line_width = Float(1.0)
# The line dash style.
line_style = LineStyle
use_global_bounds = Bool(True)
# Minimum value in the `value` data source. This must be provided
# in the call to the constructor.
global_min = Float
# Maximum value in the `value` data source. This must be provided
# in the call to the constructor.
global_max = Float
# Normalized amplitude is the value exposed to the user.
normalized_amplitude = Float(-0.5)
amplitude_scale = Property(Float, depends_on=['global_min', 'global_max', 'data',
'use_global_bounds', 'yindex'])
amplitude = Property(Float, depends_on=['normalized_amplitude',
'amplitude_scale'])
#------------------------------------------------------------------------
# Private traits
#------------------------------------------------------------------------
# The projected 2D numpy array.
_trace_data = Property(Array, depends_on=['index', 'index.data_changed',
'value', 'value.data_changed', 'yindex', 'yindex.data_changed',
'amplitude', 'scale', 'offset'])
# Cached list of non-NaN arrays of (x,y) data-space points; regardless of
# self.orientation, this is always stored as (index_pt, value_pt). This is
# different from the default BaseXYPlot definition.
_cached_data_pts = List
# Cached list of non-NaN arrays of (x,y) screen-space points.
_cached_screen_pts = List
#------------------------------------------------------------------------
#
#------------------------------------------------------------------------
def trait_view(self, obj):
"""Create a minimalist View, with just the amplitude and color attributes."""
# Minimalist Traits UI View for customizing the plot: only the trace amplitude
# and line color are exposed.
view = View(
HGroup(
Item('use_global_bounds'),
# Item('normalized_amplitude'),
# Item('normalized_amplitude', editor=RangeEditor()),
Item('normalized_amplitude',
editor=ScrubberEditor(increment=0.2, hover_color=0xFFFFFF, active_color=0xA0CD9E,
border_color=0x0000FF)),
),
Item("color", label="Trace color", style="simple"),
width=480,
title="Trace Plot Line Attributes",
buttons=["OK", "Cancel"])
return view
#------------------------------------------------------------------------
#
#------------------------------------------------------------------------
# See base_xy_plot.py for these:
## def hittest(self, screen_pt, threshold=7.0):
## def interpolate(self, index_value):
def get_screen_points(self):
self._gather_points()
scrn_pts_list = [[self.map_screen(ary) for ary in line]
for line in self._cached_data_pts]
return scrn_pts_list
#------------------------------------------------------------------------
# Private methods
#------------------------------------------------------------------------
@cached_property
def _get_amplitude_scale(self):
"""
If the amplitude is set to this value, the largest trace deviation from
its base y coordinate will be equal to the y coordinate spacing.
"""
# Note: Like the rest of the current code, this ignores the `scale` attribute.
if self.yindex is not None:
coordinates = self.yindex.get_data()
else:
coordinates = []
if len(coordinates) > 1:
dy = coordinates[1] - coordinates[0]
if dy == 0:
dy = 1.0
else:
# default coordinate spacing if there is only 1 coordinate
dy = 1.0
if self.use_global_bounds:
max_abs = max(abs(self.global_min), abs(self.global_max))
else:
data = self.value._data
max_abs = np.max(np.abs(data))
if max_abs == 0:
amp_scale = 0.5 * dy
else:
amp_scale = 0.5 * dy / max_abs
return amp_scale
@cached_property
def _get_amplitude(self):
amplitude = self.normalized_amplitude * self.amplitude_scale
return amplitude
@cached_property
def _get__trace_data(self):
"""Compute the transformed data."""
# Get the array from `value`
data = self.value._data
coordinates = self.yindex.get_data()
channel_data = self.scale*(self.amplitude*data + coordinates[:,np.newaxis]) \
+ self.offset
return channel_data
def _gather_points(self):
"""
Collects the data points that are within the bounds of the plot and
caches them.
"""
if self._cache_valid:
return
if not self.index or not self.value:
return
index = self.index.get_data()
varray = self._trace_data
if varray.size == 0:
self._cached_data_pts = []
self._cached_valid = True
return
coordinates = self.yindex.get_data()
if self.fast_clip:
coord_min = float(coordinates[0])
coord_max = coordinates[-1]
slice_min = max(0,ceil((varray.shape[0]-1)*(self.value_range.low - coord_min)/(coord_max - coord_min)))
slice_max = min(varray.shape[0], 1+floor((varray.shape[0]-1)*(self.value_range.high - coord_min)/(coord_max - coord_min)))
varray = varray[slice_min:slice_max]
# FIXME: The y coordinates must also be sliced to match varray.
# Check to see if the data is completely outside the view region.
outside = False
# Check x coordinates.
low, high = self.index.get_bounds()
if low > self.index_range.high or high < self.index_range.low:
outside = True
# Check y coordinates. Use varray because it is nased on the yindex,
# but has been shifted up or down depending on the values.
ylow, yhigh = varray.min(), varray.max()
if ylow > self.value_range.high or yhigh < self.value_range.low:
outside = True
if outside:
self._cached_data_pts = []
self._cached_valid = True
return
if len(index) == 0 or varray.shape[0] == 0 or varray.shape[1] == 0 \
or len(index) != varray.shape[1]:
self._cached_data_pts = []
self._cache_valid = True
return
size_diff = varray.shape[1] - len(index)
if size_diff > 0:
warnings.warn('Chaco.LinePlot: value.shape[1] %d - len(index) %d = %d\n' \
% (varray.shape[1], len(index), size_diff))
index_max = len(index)
varray = varray[:,:index_max]
else:
index_max = varray.shape[1]
index = index[:index_max]
# Split the index and value raw data into non-NaN chunks.
# nan_mask is a boolean M by N array.
nan_mask = invert(isnan(varray)) & invert(isnan(index))
blocks_list = []
for nm in nan_mask:
blocks = [b for b in arg_find_runs(nm, "flat") if nm[b[0]] != 0]
blocks_list.append(blocks)
line_points = []
for k, blocks in enumerate(blocks_list):
points = []
for block in blocks:
start, end = block
block_index = index[start:end]
block_value = varray[k, start:end]
index_mask = self.index_mapper.range.mask_data(block_index)
runs = [r for r in arg_find_runs(index_mask, "flat") \
if index_mask[r[0]] != 0]
# Check to see if our data view region is between two points in the
# index data. If so, then we have to reverse map our current view
# into the appropriate index and draw the bracketing points.
if runs == []:
data_pt = self.map_data((self.x_mapper.low_pos, self.y_mapper.low_pos))
if self.index.sort_order == "none":
indices = argsort(index)
sorted_index = take(index, indices)
sorted_value = take(varray[k], indices)
sort = 1
else:
sorted_index = index
sorted_value = varray[k]
if self.index.sort_order == "ascending":
sort = 1
else:
sort = -1
ndx = bin_search(sorted_index, data_pt, sort)
if ndx == -1:
# bin_search can return -1 if data_pt is outside the bounds
# of the source data
continue
z = transpose(array((sorted_index[ndx:ndx+2],
sorted_value[ndx:ndx+2])))
points.append(z)
else:
# Expand the width of every group of points so we draw the lines
# up to their next point, outside the plot area
data_end = len(index_mask)
for run in runs:
start, end = run
if start != 0:
start -= 1
if end != data_end:
end += 1
run_data = transpose(array((block_index[start:end],
block_value[start:end])))
points.append(run_data)
line_points.append(points)
self._cached_data_pts = line_points
self._cache_valid = True
return
# See base_xy_plot.py for:
## def _downsample(self):
## def _downsample_vectorized(self):
def _render(self, gc, line_points, selected_points=None):
if len(line_points) == 0:
return
with gc:
gc.set_antialias(True)
gc.clip_to_rect(self.x, self.y, self.width, self.height)
render = self._render_normal
if selected_points is not None:
gc.set_stroke_color(self.selected_color_)
gc.set_line_width(self.line_width+10.0)
gc.set_line_dash(self.selected_line_style_)
render(gc, selected_points)
if self.color_func is not None:
# Existence of self.color_func overrides self.color.
color_func = self.color_func
else:
color_func = lambda k: self.color_
tmp = list(enumerate(line_points))
# Note: the list is reversed for testing with _render_filled.
for k, points in reversed(tmp):
color = color_func(k)
# Apply the alpha
alpha = color[-1] if len(color) == 4 else 1
color = color[:3] + (alpha * self.alpha,)
gc.set_stroke_color(color)
gc.set_line_width(self.line_width)
gc.set_line_dash(self.line_style_)
render(gc, points)
# Draw the default axes, if necessary
self._draw_default_axes(gc)
def _render_normal(self, gc, points):
for ary in points:
if len(ary) > 0:
gc.begin_path()
gc.lines(ary)
gc.stroke_path()
return
def _render_icon(self, gc, x, y, width, height):
with gc:
gc.set_stroke_color(self.color_)
gc.set_line_width(self.line_width)
gc.set_line_dash(self.line_style_)
gc.set_antialias(0)
gc.move_to(x, y+height/2)
gc.line_to(x+width, y+height/2)
gc.stroke_path()
def _alpha_changed(self):
self.invalidate_draw()
self.request_redraw()
return
def _color_changed(self):
self.invalidate_draw()
self.request_redraw()
return
def _line_style_changed(self):
self.invalidate_draw()
self.request_redraw()
return
def _line_width_changed(self):
self.invalidate_draw()
self.request_redraw()
return
def _amplitude_changed(self):
self.value.data_changed = True
self.invalidate_draw()
self.request_redraw()
return
def __getstate__(self):
state = super(MultiLinePlot,self).__getstate__()
for key in ['traits_view']:
if state.has_key(key):
del state[key]
return state
|
<filename>sdl2/sdl2_rect.py
# Python-SDL2 : Yet another SDL2 wrapper for Python
#
# * https://github.com/vaiorabbit/python-sdl2
#
# [NOTICE] This is an automatically generated file.
import ctypes
from .api import SDL2_API_NAMES, SDL2_API_ARGS_MAP, SDL2_API_RETVAL_MAP
# Define/Macro
# Enum
# Typedef
# Struct
class SDL_Point(ctypes.Structure):
_fields_ = [
("x", ctypes.c_int),
("y", ctypes.c_int),
]
class SDL_FPoint(ctypes.Structure):
_fields_ = [
("x", ctypes.c_float),
("y", ctypes.c_float),
]
class SDL_Rect(ctypes.Structure):
_fields_ = [
("x", ctypes.c_int),
("y", ctypes.c_int),
("w", ctypes.c_int),
("h", ctypes.c_int),
]
class SDL_FRect(ctypes.Structure):
_fields_ = [
("x", ctypes.c_float),
("y", ctypes.c_float),
("w", ctypes.c_float),
("h", ctypes.c_float),
]
# Function
def setup_symbols():
SDL2_API_NAMES.append('SDL_PointInRect')
SDL2_API_ARGS_MAP['SDL_PointInRect'] = [ctypes.c_void_p, ctypes.c_void_p]
SDL2_API_RETVAL_MAP['SDL_PointInRect'] = ctypes.c_int
SDL2_API_NAMES.append('SDL_RectEmpty')
SDL2_API_ARGS_MAP['SDL_RectEmpty'] = [ctypes.c_void_p]
SDL2_API_RETVAL_MAP['SDL_RectEmpty'] = ctypes.c_int
SDL2_API_NAMES.append('SDL_RectEquals')
SDL2_API_ARGS_MAP['SDL_RectEquals'] = [ctypes.c_void_p, ctypes.c_void_p]
SDL2_API_RETVAL_MAP['SDL_RectEquals'] = ctypes.c_int
SDL2_API_NAMES.append('SDL_HasIntersection')
SDL2_API_ARGS_MAP['SDL_HasIntersection'] = [ctypes.c_void_p, ctypes.c_void_p]
SDL2_API_RETVAL_MAP['SDL_HasIntersection'] = ctypes.c_int
SDL2_API_NAMES.append('SDL_IntersectRect')
SDL2_API_ARGS_MAP['SDL_IntersectRect'] = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
SDL2_API_RETVAL_MAP['SDL_IntersectRect'] = ctypes.c_int
SDL2_API_NAMES.append('SDL_UnionRect')
SDL2_API_ARGS_MAP['SDL_UnionRect'] = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
SDL2_API_RETVAL_MAP['SDL_UnionRect'] = None
SDL2_API_NAMES.append('SDL_EnclosePoints')
SDL2_API_ARGS_MAP['SDL_EnclosePoints'] = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
SDL2_API_RETVAL_MAP['SDL_EnclosePoints'] = ctypes.c_int
SDL2_API_NAMES.append('SDL_IntersectRectAndLine')
SDL2_API_ARGS_MAP['SDL_IntersectRectAndLine'] = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
SDL2_API_RETVAL_MAP['SDL_IntersectRectAndLine'] = ctypes.c_int
|
<reponame>jskora/scratch-python
# simple_repl.py
from Tester import Test, Tester
import re
def tokenize(expression):
if expression == "":
return []
regex = re.compile("\s*(=>|[-+*\/\%=\(\)]|[A-Za-z_][A-Za-z0-9_]*|[0-9]*\.?[0-9]+)\s*")
tokens = regex.findall(expression)
return [s for s in tokens if not s.isspace()]
class Interpreter:
OPERATOR_PRECEDENCE = {"=": 0, "+": 1, "-": 1, "*": 2, "/": 2, "%": 2}
OPERATORS = OPERATOR_PRECEDENCE.keys()
def __init__(self):
self.vars = {}
self.functions = {}
def input(self, expression):
"""
The algorithm for evaluating any postfix expression is fairly straightforward:
1 While there are input tokens left
1.1 Read the next token from input.
1.2 If the token is a value
1.2.1 Push it onto the stack.
1.3 Otherwise, the token is an operator (operator here includes both operators and functions).
1.3.1 It is already known that the operator takes n arguments.
1.3.2 If there are fewer than n values on the stack
(Error) The user has not input sufficient values in the expression.
1.3.3 Else, Pop the top n values from the stack.
1.3.4 Evaluate the operator, with the values as arguments.
1.3.5 Push the returned results, if any, back onto the stack.
2 If there is only one value in the stack
2.1 That value is the result of the calculation.
3 Otherwise, there are more values in the stack
(Error) The user input has too many values.
"""
if not expression.strip():
return ""
rpn = self.infix_to_postfix(expression)
# print "expression = %s\nrpn = %s" % (expression, rpn)
tokens = tokenize(rpn)
stack = []
for token in tokens:
if token in self.OPERATORS:
if len(stack) >= 2:
v1 = stack.pop()
v2 = stack.pop()
stack.append(self.binary_operation(token, v2, v1))
else:
raise RuntimeError("INSUFFICIENT VALUES")
elif token.isdigit():
stack.append(int(token))
elif token in self.vars.keys():
stack.append(self.vars[token])
else:
stack.append(token)
if len(stack) != 1:
raise RuntimeError("TOO MANY VALUES: %s -> %s = %s" % (expression, rpn, str(stack)))
elif type(stack[-1]) == str:
raise RuntimeError("UNKNOWN VARIABLE")
# print "%s = %s = %s" % (expression, rpn, str(stack[0]))
return stack.pop()
def binary_operation(self, op, v1, v2):
if op == "+":
return v1 + v2
elif op == "-":
return v1 - v2
elif op == "*":
return v1 * v2
elif op == "/":
return v1 / v2
elif op == "%":
return v1 % v2
elif op == "=":
self.vars[v1] = v2
return v2
def infix_to_postfix(self, infix_expression):
"""
1 While there are tokens to be read:
1.1 Read a token.
1.2 If the token is a number, then add it to the output queue.
1.3 If the token is a function token, then push it onto the stack.
1.4 If the token is a function argument separator (e.g., a comma):
1.4.1 Until the token at the top of the stack is a left parenthesis, pop operators off
the stack onto the output queue. If no left parentheses are encountered, either
the separator was misplaced or parentheses were mismatched.
1.5 If the token is an operator, o1, then:
1.5.1 while there is an operator token o2, at the top of the operator stack and either
o1 is left-associative and its precedence is less than or equal to that of o2, or
o1 is right associative, and has precedence less than that of o2,
1.5.1.1 pop o2 off the operator stack, onto the output queue;
1.5.2 at the end of iteration push o1 onto the operator stack.
1.6 If the token is a left parenthesis (i.e. "("), then push it onto the stack.
1.7 If the token is a right parenthesis (i.e. ")"):
1.7.1 Until the token at the top of the stack is a left parenthesis, pop operators off the stack onto the output queue.
1.7.2 Pop the left parenthesis from the stack, but not onto the output queue.
1.7.3 If the token at the top of the stack is a function token, pop it onto the output queue.
1.7.4 If the stack runs out without finding a left parenthesis, then there are mismatched parentheses.
2 When there are no more tokens to read:
2.1 While there are still operator tokens in the stack:
2.1.1 If the operator token on the top of the stack is a parenthesis, then there are mismatched parentheses.
2.1.2 Pop the operator onto the output queue.
3 Exit.
"""
tokens = tokenize(infix_expression)
queue = []
stack = []
while tokens:
# 1.1
token = tokens[0]
tokens = tokens[1:]
if not (token in self.OPERATORS or token == "(" or token == ")"):
queue.append(token)
# 1.5
elif token in self.OPERATORS:
# 1.5.1
while stack and stack[-1] in self.OPERATORS and (self.OPERATOR_PRECEDENCE[token] <= self.OPERATOR_PRECEDENCE[stack[-1]]):
queue.append(stack.pop())
stack.append(token)
elif token == "(":
stack.append(token)
elif token == ")":
while stack and stack[-1] != "(":
queue.append(stack.pop())
if stack and stack[-1] == "(":
stack.pop()
else:
raise RuntimeError("PAREN MISMATCH")
while stack and len(stack) > 0:
if stack[-1] == "(":
raise RuntimeError("PAREN MISMATCH")
else:
queue.append(stack.pop())
return " ".join(queue)
test = Tester()
interpreter = Interpreter();
# Basic arithmetic
test.assert_equals(interpreter.input("1 + 1"), 2, "1 + 1")
test.assert_equals(interpreter.input("2 - 1"), 1, "2 - 1")
test.assert_equals(interpreter.input("2 * 3"), 6, "2 * 3")
test.assert_equals(interpreter.input("8 / 4"), 2, "8 / 4")
test.assert_equals(interpreter.input("7 % 4"), 3, "7 % 4")
# Variables
test.assert_equals(interpreter.input("x = 1"), 1, "x = 1")
test.assert_equals(interpreter.input("x"), 1, "x")
test.assert_equals(interpreter.input("x + 3"), 4, "x + 3")
test.expect_error("input: 'y'", lambda: interpreter.input("y")) |
# Copyright 2020 The Kale Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import nbformat
from kale import Pipeline, Step, NotebookConfig
def test_merge_code(dummy_nb_config):
"""Test the merge code functionality."""
pipeline = Pipeline(NotebookConfig(**dummy_nb_config))
pipeline.add_step(Step(name="test", source=["test1"]))
pipeline.get_step("test").merge_code("test2")
assert pipeline.get_step("test").source == ["test1", "test2"]
_EMPTY_TAG = {"step_names": [], "prev_steps": []}
@pytest.mark.parametrize("metadata,target", [
({}, _EMPTY_TAG),
({"tags": []}, _EMPTY_TAG),
({"other_field": None}, _EMPTY_TAG),
# test special tags
({"tags": ["imports"]}, {"step_names": ["imports"], "prev_steps": []}),
({"tags": ["pipeline-parameters"]},
{"step_names": ["pipeline-parameters"], "prev_steps": []}),
({"tags": ["functions"]}, {"step_names": ["functions"], "prev_steps": []}),
# test skip tag
({"tags": ["skip"]},
{"step_names": ["skip"], "prev_steps": []}),
({"tags": ["skip", "block:other"]},
{"step_names": ["skip"], "prev_steps": []}),
# test that prev tag is ignored when having a special tag
({"tags": ["imports", "prev:step1"]},
{"step_names": ["imports"], "prev_steps": []}),
({"tags": ["functions", "prev:step1"]},
{"step_names": ["functions"], "prev_steps": []}),
({"tags": ["pipeline-parameters", "prev:step1"]},
{"step_names": ["pipeline-parameters"], "prev_steps": []}),
# when specifying multiple blocks, only the special block tag name
# is returned
({"tags": ["imports", "block:step1"]},
{"step_names": ["imports"], "prev_steps": []}),
({"tags": ["functions", "block:step1"]},
{"step_names": ["functions"], "prev_steps": []}),
({"tags": ["pipeline-parameters", "block:step1"]},
{"step_names": ["pipeline-parameters"], "prev_steps": []}),
# test simple block names and prev step names
({"tags": ["block:step1"]},
{"step_names": ["step1"], "prev_steps": []}),
({"tags": ["block:step1", "block:step2"]},
{"step_names": ["step1", "step2"], "prev_steps": []}),
({"tags": ["block:step1", "prev:step2"]},
{"step_names": ["step1"], "prev_steps": ["step2"]}),
])
def test_parse_metadata_success(notebook_processor, metadata, target):
"""Test parse_metadata function."""
notebook_processor.parse_cell_metadata(metadata)
@pytest.mark.parametrize("metadata", [
({"tags": ["random_value"]}),
({"tags": [0]}),
({"tags": ["prev:step2"]}),
])
def test_parse_metadata_exc(notebook_processor, metadata):
"""Test parse_metadata exception cases."""
with pytest.raises(ValueError):
notebook_processor.parse_cell_metadata(metadata)
def test_get_pipeline_parameters_source_simple(notebook_processor):
"""Test that the function gets the correct pipeline parameters source."""
notebook = nbformat.v4.new_notebook()
cells = [
("0", {}),
("0", {"tags": []}),
("1", {"tags": ["pipeline-parameters"]}),
("1", {}),
]
notebook.cells = [nbformat.v4.new_code_cell(source=s, metadata=m)
for (s, m) in cells]
notebook_processor.notebook = notebook
assert notebook_processor.get_pipeline_parameters_source() == "1\n1"
def test_get_pipeline_parameters_source_with_step(notebook_processor):
"""Test that the function gets the correct pipeline parameters source."""
notebook = nbformat.v4.new_notebook()
cells = [
("1", {"tags": ["pipeline-parameters"]}),
("0", {"tags": ["step:test"]}),
("1", {"tags": ["pipeline-parameters"]}),
]
notebook.cells = [nbformat.v4.new_code_cell(source=s, metadata=m)
for (s, m) in cells]
notebook_processor.notebook = notebook
assert notebook_processor.get_pipeline_parameters_source() == "1\n1"
def test_get_pipeline_parameters_source_skip(notebook_processor):
"""Test that the function gets the correct pipeline parameters source."""
notebook = nbformat.v4.new_notebook()
cells = [
("1", {"tags": ["pipeline-parameters"]}),
("0", {"tags": ["skip"]}),
("1", {"tags": ["pipeline-parameters"]}),
("1", {"tags": []}),
]
notebook.cells = [nbformat.v4.new_code_cell(source=s, metadata=m)
for (s, m) in cells]
notebook_processor.notebook = notebook
assert notebook_processor.get_pipeline_parameters_source() == "1\n1\n1"
def test_get_pipeline_parameters_source_followed_reserved(notebook_processor):
"""Test that the function gets the correct pipeline parameters source."""
notebook = nbformat.v4.new_notebook()
cells = [
("1", {"tags": ["pipeline-parameters"]}),
("0", {"tags": ["imports"]}),
("1", {"tags": ["pipeline-parameters"]}),
("1", {"tags": []}),
]
notebook.cells = [nbformat.v4.new_code_cell(source=s, metadata=m)
for (s, m) in cells]
notebook_processor.notebook = notebook
assert notebook_processor.get_pipeline_parameters_source() == "1\n1\n1"
def test_get_pipeline_metrics_source_raises(notebook_processor):
"""Test exception when pipeline metrics isn't at the end of notebook."""
notebook = nbformat.v4.new_notebook()
cells = [
("1", {"tags": ["pipeline-metrics"]}),
("0", {"tags": ["imports"]}),
]
notebook.cells = [nbformat.v4.new_code_cell(source=s, metadata=m)
for (s, m) in cells]
with pytest.raises(ValueError, match=r"Tag pipeline-metrics tag must be"
r" placed on a cell at the end of"
r" the Notebook\..*"):
notebook_processor.notebook = notebook
notebook_processor.get_pipeline_metrics_source()
|
<filename>emodelrunner/create_cells.py
"""Functions to create cells."""
# Copyright 2020-2021 Blue Brain Project / EPFL
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from emodelrunner.cell import CellModelCustom
from emodelrunner.load import (
load_mechanisms,
load_syn_mechs,
load_unoptimized_parameters,
get_sscx_morph_args,
get_synplas_morph_args,
get_syn_mech_args,
)
from emodelrunner.morphology import NrnFileMorphologyCustom, get_axon_hoc
def create_cell(
unopt_params_path,
emodel,
add_synapses,
morph_args,
gid,
syn_mech_args=None,
use_glu_synapse=False,
fixhp=False,
syn_setup_params=None,
v_init=-80,
celsius=34,
):
"""Create a cell.
Args:
unopt_params_path (str): path to the unoptimized parameters json file
emodel (str): name to give to the cell model
add_synapses (bool): whether to add synapses to the cell
morph_args (dict): morphology-related configuration
gid (int): cell model ID
syn_mech_args (dict): synapse-related configuration
use_glu_synapse (bool): whether to use GluSynapseCustom class for synapses
fixhp (bool): to uninsert SK_E2 for hyperpolarization in cell model
syn_setup_params (dict): contains extra parameters to setup synapses
when using GluSynapseCustom
v_init (int): initial voltage (mV)
celsius (int): cell temperature (celsius)
Returns:
CellModelCustom: cell model
"""
# pylint: disable=too-many-arguments, too-many-locals
# load mechanisms
mechs = load_mechanisms(unopt_params_path)
# add synapses mechs
if add_synapses:
mechs += [
load_syn_mechs(
syn_mech_args["seed"],
syn_mech_args["rng_settings_mode"],
os.path.join(syn_mech_args["syn_dir"], syn_mech_args["syn_data_file"]),
os.path.join(syn_mech_args["syn_dir"], syn_mech_args["syn_conf_file"]),
use_glu_synapse=use_glu_synapse,
syn_setup_params=syn_setup_params,
)
]
# load parameters
params = load_unoptimized_parameters(unopt_params_path, v_init, celsius)
# load morphology
try:
replace_axon_hoc = get_axon_hoc(morph_args["axon_hoc_path"])
except KeyError:
replace_axon_hoc = None
morph = NrnFileMorphologyCustom(
morph_args["morph_path"],
do_replace_axon=morph_args["do_replace_axon"],
replace_axon_hoc=replace_axon_hoc,
)
# create cell
cell = CellModelCustom(
name=emodel,
morph=morph,
mechs=mechs,
params=params,
gid=gid,
add_synapses=add_synapses,
fixhp=fixhp,
)
return cell
def create_cell_using_config(config):
"""Create a cell given configuration.
Args:
config (configparser.ConfigParser): configuration
Returns:
CellModelCustom: cell model
"""
unopt_params_path = config.get("Paths", "unoptimized_params_path")
# get synapse config data
add_synapses = config.getboolean("Synapses", "add_synapses")
syn_mech_args = get_syn_mech_args(config)
# get morphology config data
morph_args = get_sscx_morph_args(config)
# create cell
cell = create_cell(
unopt_params_path,
config.get("Cell", "emodel"),
add_synapses,
morph_args,
config.getint("Cell", "gid"),
syn_mech_args,
v_init=config.getfloat("Cell", "v_init"),
celsius=config.getfloat("Cell", "celsius"),
)
return cell
def get_postcell(
config,
fixhp=False,
syn_setup_params=None,
):
"""Return the postcell for synapse plasticity run.
Args:
config (configparser.ConfigParser): configuration
fixhp (bool): to uninsert SK_E2 for hyperpolarization in cell model
syn_setup_params (dict): contains extra parameters to setup synapses
when using GluSynapseCustom
Returns:
CellModelCustom: post-synaptic cell model
"""
emodel = config.get("Cell", "emodel")
gid = config.getint("Cell", "gid")
base_seed = config.getint("SynapsePlasticity", "base_seed")
v_init = config.getfloat("Cell", "v_init")
celsius = config.getfloat("Cell", "celsius")
unopt_params_path = config.get("Paths", "unoptimized_params_path")
syn_mech_args = get_syn_mech_args(config)
# rewrite seed and rng setting mode over basic emodelrunner config defaults
syn_mech_args["seed"] = base_seed
syn_mech_args["rng_settings_mode"] = "Compatibility"
morph_args = get_synplas_morph_args(config)
add_synapses = True
cell = create_cell(
unopt_params_path,
emodel,
add_synapses,
morph_args,
gid,
syn_mech_args,
use_glu_synapse=True,
fixhp=fixhp,
syn_setup_params=syn_setup_params,
v_init=v_init,
celsius=celsius,
)
return cell
def get_precell(
config,
fixhp=False,
):
"""Return the precell for synapse plasticity pair simulation run.
Args:
config (configparser.ConfigParser): configuration
fixhp (bool): to uninsert SK_E2 for hyperpolarization in cell model
Returns:
CellModelCustom: pre-synaptic cell model
"""
emodel = config.get("Cell", "emodel")
gid = config.getint("Cell", "gid")
v_init = config.getfloat("Cell", "v_init")
celsius = config.getfloat("Cell", "celsius")
unopt_params_path = config.get("Paths", "precell_unoptimized_params_path")
morph_args = get_synplas_morph_args(config, precell=True)
add_synapses = False
cell = create_cell(
unopt_params_path,
emodel,
add_synapses,
morph_args,
gid,
fixhp=fixhp,
v_init=v_init,
celsius=celsius,
)
return cell
|
<filename>patch_management/tests/test_cve_scan_utils.py
from django.test import TestCase
from django.urls import reverse, resolve
from ..models import User, SSHProfile, System, Package, CVE
from ..tasks import celery_scan_cve
class CveScanViewTestCase(TestCase):
'''
Test CVE scanning function
This test requires an internet connection to consume Redhat CVE public API
(https://access.redhat.com/labs/securitydataapi)
'''
def setUp(self):
# Setup a test account
self.user = User.objects.create_user(username='johndoe', email='<EMAIL>', password='<PASSWORD>')
self.client.login(username='johndoe', password='<PASSWORD>')
# Setup a test SSH profile
ssh_setup_url = reverse('setup_ssh')
sshProfile = SSHProfile.objects.get(pk=self.user.id)
sshProfile.ssh_server_address = '127.0.0.1'
sshProfile.ssh_username = 'test_user'
sshProfile.save()
# Setup System information
self.system1 = System.objects.create(
hostname='test1.server',
owner=self.user,
connected=True,
system_os_name= 'OS1_name',
system_os_version= 'OS1_version',
system_kernel= 'OS1_kernel_version',
system_package_manager= 'yum' # Set to support RPM
)
self.system2 = System.objects.create(
hostname='test2.server',
owner=self.user,
connected=True,
system_os_name= 'OS2_name',
system_os_version= 'OS2_version',
system_kernel= 'OS2_kernel_version',
system_package_manager= 'apt' # Set to not support RPM
)
# Setup package information
Package.objects.create(name='openssh.x86_64', current_version='7.4p1-16.el7', new_version=None, active=True, system=self.system1)
Package.objects.create(name='openssh.x86_64', current_version='7.4p1-16.el7', new_version=None, active=True, system=self.system2)
def test_scan_cve_all_systems(self):
celery_scan_cve(self.user.id, None)
cves_on_system1 = CVE.objects.filter(system=self.system1)
cves_on_system2 = CVE.objects.filter(system=self.system2)
'''
Should find CVE info on system 1 as it uses RPM/YUM.
Expected CVE-2017-15906 according to openssh-7.4p1-16.el7.
(https://access.redhat.com/labs/securitydataapi/cve.json?package=openssh-7.4p1-16.el7)
'''
self.assertEquals(len(cves_on_system1), 1)
self.assertEquals(cves_on_system1[0].cve_id, 'CVE-2017-15906')
self.assertEquals(cves_on_system1[0].affected_package, 'openssh-7.4p1-16.el7')
'''
Should not find CVE info on system 2 as it doesn't use RPM/YUM.
Thus the scan won't occur.
'''
self.assertEquals(len(cves_on_system2), 0)
def test_scan_cve_on_RPM_supported_system(self):
celery_scan_cve(self.user.id, self.system1.id)
cves_on_system1 = CVE.objects.filter(system=self.system1)
'''
Should find CVE info on system 1 as it uses RPM/YUM.
Expected CVE-2017-15906 according to openssh-7.4p1-16.el7.
(https://access.redhat.com/labs/securitydataapi/cve.json?package=openssh-7.4p1-16.el7)
'''
self.assertEquals(len(cves_on_system1), 1)
self.assertEquals(cves_on_system1[0].cve_id, 'CVE-2017-15906')
self.assertEquals(cves_on_system1[0].affected_package, 'openssh-7.4p1-16.el7')
def test_scan_cve_on_non_RPM_supported_system(self):
celery_scan_cve(self.user.id, self.system2.id)
cves_on_system2 = CVE.objects.filter(system=self.system2)
'''
Should not find CVE info on system 2 as it doesn't use RPM/YUM.
Thus the scan won't occur.
'''
self.assertEquals(len(cves_on_system2), 0) |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prepare TF.Examples for on-device recommendation model.
Following functions are included: 1) downloading raw data 2) processing to user
activity sequence and splitting to train/test data 3) convert to TF.Examples
and write in output location.
More information about the movielens dataset can be found here:
https://grouplens.org/datasets/movielens/
"""
import collections
import json
import os
import random
import re
from absl import app
from absl import flags
from absl import logging
import pandas as pd
import tensorflow as tf
FLAGS = flags.FLAGS
# Permalinks to download movielens data.
MOVIELENS_1M_URL = "https://files.grouplens.org/datasets/movielens/ml-1m.zip"
MOVIELENS_ZIP_FILENAME = "ml-1m.zip"
MOVIELENS_ZIP_HASH = "a6898adb50b9ca05aa231689da44c217cb524e7ebd39d264c56e2832f2c54e20"
MOVIELENS_EXTRACTED_DIR = "ml-1m"
RATINGS_FILE_NAME = "ratings.dat"
MOVIES_FILE_NAME = "movies.dat"
RATINGS_DATA_COLUMNS = ["UserID", "MovieID", "Rating", "Timestamp"]
MOVIES_DATA_COLUMNS = ["MovieID", "Title", "Genres"]
OUTPUT_TRAINING_DATA_FILENAME = "train_movielens_1m.tfrecord"
OUTPUT_TESTING_DATA_FILENAME = "test_movielens_1m.tfrecord"
OUTPUT_MOVIE_VOCAB_FILENAME = "movie_vocab.json"
OUTPUT_MOVIE_YEAR_VOCAB_FILENAME = "movie_year_vocab.txt"
OUTPUT_MOVIE_GENRE_VOCAB_FILENAME = "movie_genre_vocab.txt"
OUTPUT_MOVIE_TITLE_UNIGRAM_VOCAB_FILENAME = "movie_title_unigram_vocab.txt"
OUTPUT_MOVIE_TITLE_BIGRAM_VOCAB_FILENAME = "movie_title_bigram_vocab.txt"
PAD_MOVIE_ID = 0
PAD_RATING = 0.0
PAD_MOVIE_YEAR = 0
UNKNOWN_STR = "UNK"
VOCAB_MOVIE_ID_INDEX = 0
VOCAB_COUNT_INDEX = 3
def define_flags():
"""Define flags."""
flags.DEFINE_string("data_dir", "/tmp",
"Path to download and store movielens data.")
flags.DEFINE_string("output_dir", None,
"Path to the directory of output files.")
flags.DEFINE_bool("build_vocabs", True,
"If yes, generate movie feature vocabs.")
flags.DEFINE_integer("min_timeline_length", 3,
"The minimum timeline length to construct examples.")
flags.DEFINE_integer("max_context_length", 10,
"The maximum length of user context history.")
flags.DEFINE_integer("max_context_movie_genre_length", 10,
"The maximum length of user context history.")
flags.DEFINE_integer(
"min_rating", None, "Minimum rating of movie that will be used to in "
"training data")
flags.DEFINE_float("train_data_fraction", 0.9, "Fraction of training data.")
class MovieInfo(
collections.namedtuple(
"MovieInfo", ["movie_id", "timestamp", "rating", "title", "genres"])):
"""Data holder of basic information of a movie."""
__slots__ = ()
def __new__(cls,
movie_id=PAD_MOVIE_ID,
timestamp=0,
rating=PAD_RATING,
title="",
genres=""):
return super(MovieInfo, cls).__new__(cls, movie_id, timestamp, rating,
title, genres)
def download_and_extract_data(data_directory,
url=MOVIELENS_1M_URL,
fname=MOVIELENS_ZIP_FILENAME,
file_hash=MOVIELENS_ZIP_HASH,
extracted_dir_name=MOVIELENS_EXTRACTED_DIR):
"""Download and extract zip containing MovieLens data to a given directory.
Args:
data_directory: Local path to extract dataset to.
url: Direct path to MovieLens dataset .zip file. See constants above for
examples.
fname: str, zip file name to download.
file_hash: str, SHA-256 file hash.
extracted_dir_name: str, extracted dir name under data_directory.
Returns:
Downloaded and extracted data file directory.
"""
if not tf.io.gfile.exists(data_directory):
tf.io.gfile.makedirs(data_directory)
path_to_zip = tf.keras.utils.get_file(
fname=fname,
origin=url,
file_hash=file_hash,
hash_algorithm="sha256",
extract=True,
cache_dir=data_directory)
extracted_file_dir = os.path.join(
os.path.dirname(path_to_zip), extracted_dir_name)
return extracted_file_dir
def read_data(data_directory, min_rating=None):
"""Read movielens ratings.dat and movies.dat file into dataframe."""
ratings_df = pd.read_csv(
os.path.join(data_directory, RATINGS_FILE_NAME),
sep="::",
names=RATINGS_DATA_COLUMNS,
encoding="unicode_escape") # May contain unicode. Need to escape.
ratings_df["Timestamp"] = ratings_df["Timestamp"].apply(int)
if min_rating is not None:
ratings_df = ratings_df[ratings_df["Rating"] >= min_rating]
movies_df = pd.read_csv(
os.path.join(data_directory, MOVIES_FILE_NAME),
sep="::",
names=MOVIES_DATA_COLUMNS,
encoding="unicode_escape") # May contain unicode. Need to escape.
return ratings_df, movies_df
def convert_to_timelines(ratings_df):
"""Convert ratings data to user."""
timelines = collections.defaultdict(list)
movie_counts = collections.Counter()
for user_id, movie_id, rating, timestamp in ratings_df.values:
timelines[user_id].append(
MovieInfo(movie_id=movie_id, timestamp=int(timestamp), rating=rating))
movie_counts[movie_id] += 1
# Sort per-user timeline by timestamp
for (user_id, context) in timelines.items():
context.sort(key=lambda x: x.timestamp)
timelines[user_id] = context
return timelines, movie_counts
def generate_movies_dict(movies_df):
"""Generates movies dictionary from movies dataframe."""
movies_dict = {
movie_id: MovieInfo(movie_id=movie_id, title=title, genres=genres)
for movie_id, title, genres in movies_df.values
}
movies_dict[0] = MovieInfo()
return movies_dict
def extract_year_from_title(title):
year = re.search(r"\((\d{4})\)", title)
if year:
return int(year.group(1))
return 0
def generate_feature_of_movie_years(movies_dict, movies):
"""Extracts year feature for movies from movie title."""
return [
extract_year_from_title(movies_dict[movie.movie_id].title)
for movie in movies
]
def generate_movie_genres(movies_dict, movies):
"""Create a feature of the genre of each movie.
Save genre as a feature for the movies.
Args:
movies_dict: Dict of movies, keyed by movie_id with value of (title, genre)
movies: list of movies to extract genres.
Returns:
movie_genres: list of genres of all input movies.
"""
movie_genres = []
for movie in movies:
if not movies_dict[movie.movie_id].genres:
continue
genres = [
tf.compat.as_bytes(genre)
for genre in movies_dict[movie.movie_id].genres.split("|")
]
movie_genres.extend(genres)
return movie_genres
def _pad_or_truncate_movie_feature(feature, max_len, pad_value):
feature.extend([pad_value for _ in range(max_len - len(feature))])
return feature[:max_len]
def generate_examples_from_single_timeline(timeline,
movies_dict,
max_context_len=100,
max_context_movie_genre_len=320):
"""Generate TF examples from a single user timeline.
Generate TF examples from a single user timeline. Timeline with length less
than minimum timeline length will be skipped. And if context user history
length is shorter than max_context_len, features will be padded with default
values.
Args:
timeline: The timeline to generate TF examples from.
movies_dict: Dictionary of all MovieInfos.
max_context_len: The maximum length of the context. If the context history
length is less than max_context_length, features will be padded with
default values.
max_context_movie_genre_len: The length of movie genre feature.
Returns:
examples: Generated examples from this single timeline.
"""
examples = []
for label_idx in range(1, len(timeline)):
start_idx = max(0, label_idx - max_context_len)
context = timeline[start_idx:label_idx]
# Pad context with out-of-vocab movie id 0.
while len(context) < max_context_len:
context.append(MovieInfo())
label_movie_id = int(timeline[label_idx].movie_id)
context_movie_id = [int(movie.movie_id) for movie in context]
context_movie_rating = [movie.rating for movie in context]
context_movie_year = generate_feature_of_movie_years(movies_dict, context)
context_movie_genres = generate_movie_genres(movies_dict, context)
context_movie_genres = _pad_or_truncate_movie_feature(
context_movie_genres, max_context_movie_genre_len,
tf.compat.as_bytes(UNKNOWN_STR))
feature = {
"context_movie_id":
tf.train.Feature(
int64_list=tf.train.Int64List(value=context_movie_id)),
"context_movie_rating":
tf.train.Feature(
float_list=tf.train.FloatList(value=context_movie_rating)),
"context_movie_genre":
tf.train.Feature(
bytes_list=tf.train.BytesList(value=context_movie_genres)),
"context_movie_year":
tf.train.Feature(
int64_list=tf.train.Int64List(value=context_movie_year)),
"label_movie_id":
tf.train.Feature(
int64_list=tf.train.Int64List(value=[label_movie_id]))
}
tf_example = tf.train.Example(features=tf.train.Features(feature=feature))
examples.append(tf_example)
return examples
def generate_examples_from_timelines(timelines,
movies_df,
min_timeline_len=3,
max_context_len=100,
max_context_movie_genre_len=320,
train_data_fraction=0.9,
random_seed=None,
shuffle=True):
"""Convert user timelines to tf examples.
Convert user timelines to tf examples by adding all possible context-label
pairs in the examples pool.
Args:
timelines: The user timelines to process.
movies_df: The dataframe of all movies.
min_timeline_len: The minimum length of timeline. If the timeline length is
less than min_timeline_len, empty examples list will be returned.
max_context_len: The maximum length of the context. If the context history
length is less than max_context_length, features will be padded with
default values.
max_context_movie_genre_len: The length of movie genre feature.
train_data_fraction: Fraction of training data.
random_seed: Seed for randomization.
shuffle: Whether to shuffle the examples before splitting train and test
data.
Returns:
train_examples: TF example list for training.
test_examples: TF example list for testing.
"""
examples = []
movies_dict = generate_movies_dict(movies_df)
progress_bar = tf.keras.utils.Progbar(len(timelines))
for timeline in timelines.values():
if len(timeline) < min_timeline_len:
progress_bar.add(1)
continue
single_timeline_examples = generate_examples_from_single_timeline(
timeline=timeline,
movies_dict=movies_dict,
max_context_len=max_context_len,
max_context_movie_genre_len=max_context_movie_genre_len)
examples.extend(single_timeline_examples)
progress_bar.add(1)
# Split the examples into train, test sets.
if shuffle:
random.seed(random_seed)
random.shuffle(examples)
last_train_index = round(len(examples) * train_data_fraction)
train_examples = examples[:last_train_index]
test_examples = examples[last_train_index:]
return train_examples, test_examples
def generate_movie_feature_vocabs(movies_df, movie_counts):
"""Generate vocabularies for movie features.
Generate vocabularies for movie features (movie_id, genre, year), sorted by
usage count. Vocab id 0 will be reserved for default padding value.
Args:
movies_df: Dataframe for movies.
movie_counts: Counts that each movie is rated.
Returns:
movie_id_vocab: List of all movie ids paired with movie usage count, and
sorted by counts.
movie_genre_vocab: List of all movie genres, sorted by genre usage counts.
movie_year_vocab: List of all movie years, sorted by year usage counts.
"""
movie_vocab = []
movie_genre_counter = collections.Counter()
movie_year_counter = collections.Counter()
for movie_id, title, genres in movies_df.values:
count = movie_counts.get(movie_id) or 0
movie_vocab.append([movie_id, title, genres, count])
year = extract_year_from_title(title)
movie_year_counter[year] += 1
for genre in genres.split("|"):
movie_genre_counter[genre] += 1
movie_vocab.sort(key=lambda x: x[VOCAB_COUNT_INDEX], reverse=True) # by count
movie_year_vocab = [0] + [x for x, _ in movie_year_counter.most_common()]
movie_genre_vocab = [UNKNOWN_STR
] + [x for x, _ in movie_genre_counter.most_common()]
return (movie_vocab, movie_year_vocab, movie_genre_vocab)
def write_tfrecords(tf_examples, filename):
"""Writes tf examples to tfrecord file, and returns the count."""
with tf.io.TFRecordWriter(filename) as file_writer:
length = len(tf_examples)
progress_bar = tf.keras.utils.Progbar(length)
for example in tf_examples:
file_writer.write(example.SerializeToString())
progress_bar.add(1)
return length
def write_vocab_json(vocab, filename):
"""Write generated movie vocabulary to specified file."""
with open(filename, "w", encoding="utf-8") as jsonfile:
json.dump(vocab, jsonfile, indent=2)
def write_vocab_txt(vocab, filename):
with open(filename, "w", encoding="utf-8") as f:
for item in vocab:
f.write(str(item) + "\n")
def generate_datasets(extracted_data_dir,
output_dir,
min_timeline_length,
max_context_length,
max_context_movie_genre_length,
min_rating=None,
build_vocabs=True,
train_data_fraction=0.9,
train_filename=OUTPUT_TRAINING_DATA_FILENAME,
test_filename=OUTPUT_TESTING_DATA_FILENAME,
vocab_filename=OUTPUT_MOVIE_VOCAB_FILENAME,
vocab_year_filename=OUTPUT_MOVIE_YEAR_VOCAB_FILENAME,
vocab_genre_filename=OUTPUT_MOVIE_GENRE_VOCAB_FILENAME):
"""Generates train and test datasets as TFRecord, and returns stats."""
logging.info("Reading data to dataframes.")
ratings_df, movies_df = read_data(extracted_data_dir, min_rating=min_rating)
logging.info("Generating movie rating user timelines.")
timelines, movie_counts = convert_to_timelines(ratings_df)
logging.info("Generating train and test examples.")
train_examples, test_examples = generate_examples_from_timelines(
timelines=timelines,
movies_df=movies_df,
min_timeline_len=min_timeline_length,
max_context_len=max_context_length,
max_context_movie_genre_len=max_context_movie_genre_length,
train_data_fraction=train_data_fraction)
if not tf.io.gfile.exists(output_dir):
tf.io.gfile.makedirs(output_dir)
logging.info("Writing generated training examples.")
train_file = os.path.join(output_dir, train_filename)
train_size = write_tfrecords(tf_examples=train_examples, filename=train_file)
logging.info("Writing generated testing examples.")
test_file = os.path.join(output_dir, test_filename)
test_size = write_tfrecords(tf_examples=test_examples, filename=test_file)
stats = {
"train_size": train_size,
"test_size": test_size,
"train_file": train_file,
"test_file": test_file,
}
if build_vocabs:
(movie_vocab, movie_year_vocab, movie_genre_vocab) = (
generate_movie_feature_vocabs(
movies_df=movies_df, movie_counts=movie_counts))
vocab_file = os.path.join(output_dir, vocab_filename)
write_vocab_json(movie_vocab, filename=vocab_file)
stats.update({
"vocab_size": len(movie_vocab),
"vocab_file": vocab_file,
"vocab_max_id": max([arr[VOCAB_MOVIE_ID_INDEX] for arr in movie_vocab])
})
for vocab, filename, key in zip([movie_year_vocab, movie_genre_vocab],
[vocab_year_filename, vocab_genre_filename],
["year_vocab", "genre_vocab"]):
vocab_file = os.path.join(output_dir, filename)
write_vocab_txt(vocab, filename=vocab_file)
stats.update({
key + "_size": len(vocab),
key + "_file": vocab_file,
})
return stats
def main(_):
logging.info("Downloading and extracting data.")
extracted_data_dir = download_and_extract_data(data_directory=FLAGS.data_dir)
stats = generate_datasets(
extracted_data_dir=extracted_data_dir,
output_dir=FLAGS.output_dir,
min_timeline_length=FLAGS.min_timeline_length,
max_context_length=FLAGS.max_context_length,
max_context_movie_genre_length=FLAGS.max_context_movie_genre_length,
min_rating=FLAGS.min_rating,
build_vocabs=FLAGS.build_vocabs,
train_data_fraction=FLAGS.train_data_fraction,
)
logging.info("Generated dataset: %s", stats)
if __name__ == "__main__":
define_flags()
app.run(main)
|
<reponame>vtmoreau/doccano
from django.conf import settings
from django.contrib.auth.models import User
from model_mommy import mommy
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from .utils import (assign_user_to_role, create_default_roles,
remove_all_role_mappings)
class TestCommentListAPI(APITestCase):
@classmethod
def setUpTestData(cls):
cls.project_member_name = 'project_member_name'
cls.project_member_pass = '<PASSWORD>'
cls.another_project_member_name = 'another_project_member_name'
cls.another_project_member_pass = '<PASSWORD>'
cls.non_project_member_name = 'non_project_member_name'
cls.non_project_member_pass = '<PASSWORD>'
create_default_roles()
cls.project_member = User.objects.create_user(username=cls.project_member_name,
password=cls.<PASSWORD>member_<PASSWORD>)
another_project_member = User.objects.create_user(username=cls.another_project_member_name,
password=<PASSWORD>)
User.objects.create_user(username=cls.non_project_member_name, password=cls.non_project_member_pass)
main_project = mommy.make('SequenceLabelingProject', users=[cls.project_member, another_project_member])
main_project_doc = mommy.make('Document', project=main_project)
cls.comment = mommy.make('Comment', document=main_project_doc, text='comment 1', user=cls.project_member)
mommy.make('Comment', document=main_project_doc, text='comment 2', user=cls.project_member)
mommy.make('Comment', document=main_project_doc, text='comment 3', user=another_project_member)
cls.url = reverse(viewname='comment_list_doc', args=[main_project.id, main_project_doc.id])
cls.url_project = reverse(viewname='comment_list_project', args=[main_project.id])
assign_user_to_role(project_member=cls.project_member, project=main_project,
role_name=settings.ROLE_ANNOTATOR)
assign_user_to_role(project_member=another_project_member, project=main_project,
role_name=settings.ROLE_ANNOTATOR)
def test_returns_comments_to_project_member(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 3)
self.client.login(username=self.another_project_member_name,
password=<PASSWORD>)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 3)
def test_does_not_return_comments_to_non_project_member(self):
self.client.login(username=self.non_project_member_name,
password=self.non_project_member_pass)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_does_not_allow_deletion_by_non_project_member(self):
self.client.login(username=self.non_project_member_name,
password=self.non_project_member_pass)
response = self.client.delete('{}/{}'.format(self.url, self.comment.id), format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_does_not_allow_deletion_of_non_owned_comment(self):
self.client.login(username=self.another_project_member_name,
password=<PASSWORD>member_<PASSWORD>)
response = self.client.delete('{}/{}'.format(self.url, self.comment.id), format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_update_delete_comment(self):
self.client.login(username=self.project_member_name,
password=<PASSWORD>.project_member_<PASSWORD>)
response = self.client.post(self.url, format='json', data={'text': 'comment'})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['user'], self.project_member.id)
self.assertEqual(response.data['text'], 'comment')
url = '{}/{}'.format(self.url, response.data['id'])
# check if all comments are fetched
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 4)
# update comment
response = self.client.patch(url, format='json', data={'text': 'new comment'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['text'], 'new comment')
# delete comment
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 3)
def test_returns_project_comments_to_project_member(self):
self.client.login(username=self.project_member_name,
password=<PASSWORD>.project_member_<PASSWORD>)
response = self.client.get(self.url_project, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 3)
self.client.login(username=self.another_project_member_name,
password=<PASSWORD>)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 3)
def test_does_not_return_project_comments_to_non_project_member(self):
self.client.login(username=self.non_project_member_name,
password=<PASSWORD>)
response = self.client.get(self.url_project, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
|
<gh_stars>0
#!/usr/bin/python3
# -*- coding: latin-1 -*-
import os
import sys
# import psycopg2
import json
from bson import json_util
from pymongo import MongoClient
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
def strip_quotes(word):
"""Strip all quotations marks from a word"""
return str(word).strip("\"\'")
def json_to_str(results):
"""Return a plain string with a json dict"""
return str(json_util.dumps(results, sort_keys=True, indent=4))
def create_app():
app = Flask(__name__)
return app
app = create_app()
# REPLACE WITH YOUR DATABASE NAME
MONGODATABASE = "dbEscuchas"
MONGOSERVER = "localhost"
MONGOPORT = 27017
client = MongoClient(MONGOSERVER, MONGOPORT)
mongodb = client[MONGODATABASE]
''' # Uncomment for postgres connection
# REPLACE WITH YOUR DATABASE NAME, USER AND PASS
POSTGRESDATABASE = "mydatabase"
POSTGRESUSER = "myuser"
POSTGRESPASS = "<PASSWORD>"
postgresdb = psycopg2.connect(
database=POSTGRESDATABASE,
user=POSTGRESUSER,
password=<PASSWORD>)
'''
#Cambiar por Path Absoluto en el servidor
QUERIES_FILENAME = '/var/www/FlaskApp/queries'
@app.route("/")
def home():
with open(QUERIES_FILENAME, 'r', encoding='utf-8') as queries_file:
json_file = json.load(queries_file)
pairs = [(x["name"],
x["database"],
x["description"],
x["query"]) for x in json_file]
return render_template('file.html', results=pairs)
@app.route("/mongo")
def mongo():
query = request.args.get("query")
if query is None or "find" not in query:
return "no query"
results = eval('mongodb.'+query)
results = json_util.dumps(results, sort_keys=True, indent=4)
return render_template('mongo.html', results=results)
@app.route("/word")
def search_by_word():
"""Provide a url to search word in 'contenido'"""
word = request.args.get("word")
if word is None:
return "[]" # No query
results = mongodb.colEscuchas.find({"$text":{"$search": word}})
return json_to_str(results)
@app.route("/date")
def search_by_date():
"""Provide a url to search phone numbers by date"""
date = request.args.get("date")
if date is None:
return "[]" # No query
results = mongodb.colEscuchas.find({"fecha": strip_quotes(date)}, {"_id":0, "numero":1})
return json_to_str(results)
@app.route("/number")
def search_by_number():
"""Provide a url to search the last k messages of a given number"""
number = request.args.get("number")
if number is None:
return "[]" # No query
k = request.args.get("k")
try:
k = int(k)
except: # k is None or malformed
k = 1 # defaults to one message
results = mongodb.colEscuchas.find({"numero": strip_quotes(number)}).limit(k).sort("fecha", -1)
return json_to_str(results)
@app.route("/postgres")
def postgres():
return "Postgres API is not available"
query = request.args.get("query")
if not query is None:
cursor = postgresdb.cursor()
cursor.execute(query)
results = [[a for a in result] for result in cursor]
print(results)
return render_template('postgres.html', results=results)
else:
return "no query"
@app.route("/example")
def example():
return render_template('example.html')
if __name__ == "__main__":
app.run()
|
<filename>cogs/mod_mail.py
import config
import discord
from discord.ext import commands
from discord.ext.commands import Cog
from helpers.checks import check_if_verified_or_dms
from helpers.userlogs import get_blank_userlog, get_userlog, set_userlog
import json
import time
class ModMail(Cog):
def __init__(self, bot):
self.bot = bot
def add_mail_log(self, uid, message):
userlogs = get_userlog()
uid = str(uid)
if uid not in userlogs:
userlogs[uid] = get_blank_userlog()
userlogs[uid]["mail"].append(message)
set_userlog(json.dumps(userlogs))
def build_embed(self, author, message):
embed = discord.Embed(description=message["body"],)
embed.set_author(
name=f"{author.name}#{author.discriminator}", icon_url=author.avatar_url
)
embed.colour = self.get_message_color(message)
embed.set_footer(text=f"{self.get_message_icon(message)} - {author.id}")
return embed
def get_message_color(self, message):
if message["resolved"] == True:
return 0x4CAF50
elif message["replier_id"] != 0:
return 0x2196F3
else:
return 0xE91E63
def get_message_icon(self, message):
if message["resolved"] == True:
return "✔️"
elif message["replier_id"] != 0:
return "↩️"
else:
return "✉️"
# Commands
@commands.check(check_if_verified_or_dms)
@commands.command(aliases=["creport"])
async def modmail(self, ctx, *, body: str = ""):
"""Sends a mod mail message"""
# We should probably delete the message for privacy.
if ctx.guild:
await ctx.message.delete()
# Prevent sending of blank messages.
if len(body.strip()) == 0:
await ctx.send("A message can not be empty.")
return
if len(body.strip()) > 2048:
await ctx.send("A message can not be longer than 2048 characters.")
return
logs = get_userlog()
uid = str(ctx.author.id)
# Get the timeout from the config and default it to 15 seconds.
timeout = getattr(config, "modmail_timeout", 15)
# Make sure our user exists in the userlog, and they've sent a message before.
if uid in logs and "mail" in logs[uid] and len(logs[uid]["mail"]) != 0:
last_message = logs[uid]["mail"][-1]
# Prevents sending the same message.
if last_message["body"].strip() == body.strip():
await ctx.send("Unable to send message.")
return
# Rate limit messages.
delta_time = int(time.time()) - last_message["timestamp"]
if delta_time < timeout:
await ctx.send(
f"Please wait {timeout - delta_time} seconds before sending another message."
)
return
message = {
"body": body.strip(),
"timestamp": int(time.time()),
"resolved": False,
"replier_id": 0,
"replier_name": "",
"message_id": 0,
}
# Send message
modmail_channel = self.bot.get_channel(config.modmail_channel)
message["message_id"] = (
await modmail_channel.send(embed=self.build_embed(ctx.author, message))
).id
# Log messages to the userlog.
self.add_mail_log(uid, message)
await ctx.send(f"{ctx.author.mention} - Message sent.")
@commands.check(check_if_verified_or_dms)
@commands.command(aliases=["solved", "completed"])
async def resolved(self, ctx):
"""Marks your last mod mail message as resolved"""
# We should probably delete the message for privacy.
await ctx.message.delete()
logs = get_userlog()
uid = str(ctx.author.id)
if uid not in logs or "mail" not in logs[uid] or len(logs[uid]["mail"]) == 0:
await ctx.send("No mod mail message to mark as resolved.")
return
if logs[uid]["mail"][-1]["resolved"]:
await ctx.send("Last mod mail message is already marked as resolved.")
return
logs[uid]["mail"][-1]["resolved"] = True
set_userlog(json.dumps(logs))
modmail_channel = self.bot.get_channel(config.modmail_channel)
message = await modmail_channel.fetch_message(logs[uid]["mail"][-1]["message_id"])
await message.edit(embed=self.build_embed(ctx.author, logs[uid]["mail"][-1]))
await ctx.send(f"{ctx.author.mention} - Message marked as resolved.")
def setup(bot):
bot.add_cog(ModMail(bot))
|
from abc import ABCMeta, abstractmethod
import sys
import traceback
from ..utility.text import colour_text as coloured
def _model_wrapper(dependency=None, message=None, complete=None):
def private(self, key, default=None):
return getattr(self,'_%s%s'%(self.__class__.__name__,key),default)
def private_set(self, key, data):
args = tuple()
kwargs = {}
if data is None:
pass
elif isinstance(data, tuple):
if len(data) == 2 and isinstance(data[0], tuple) and isinstance(data[1],dict):
args = data[0]
kwargs = data[1]
else:
args = data
elif isinstance(data, dict):
kwargs = data
else:
args = (data,)
setattr(self, '_%s%s_args'%(self.__class__.__name__,key), args)
setattr(self, '_%s%s_kwargs'%(self.__class__.__name__,key), kwargs)
setattr(self, '_%s%s'%(self.__class__.__name__,key), True)
def public_output(args, kwargs):
if len(args) == 0 and len(kwargs) == 0:
return None
elif len(kwargs) == 0:
if len(args) == 1:
return args[0]
else:
return args
elif len(args) == 0:
return kwargs
else:
return args, kwargs
def wrap(f):
fname = f.__name__
if fname.startswith('__'):
fname = fname[2:]
def wrapped(self, *args, **kwargs):
# If arguments provided, use them
if len(args) > 0 or len(kwargs) > 0:
return f(self, *args, **kwargs)
# Otherwise, use the existing results, generating them as required
if not private(self, '__%s'%fname, False):
if dependency is not None:
dependency_f = getattr(self, dependency, None)
if dependency_f is None:
raise ValueError("Unknown method: %s" % dependency)
if private(self, '__%s'%dependency) is None:
dependency_f()
if message is not None:
print coloured(message, "YELLOW", True),
sys.stdout.flush()
try:
private_set(self, '__%s'%fname, f(self, *private(self,'__%s_args'%dependency,[]), **private(self,'__%s_kwargs'%dependency,{})) )
except Exception, e:
print coloured("Error", "RED", True)
traceback.print_exc()
sys.exit()
if complete is not None:
print coloured("DONE" if complete is None else complete, "GREEN", True)
return public_output(private(self, '__%s_args'%fname), private(self, '__%s_kwargs'%fname))
return wrapped
return wrap
class ModelAnalysis(object):
'''
`ModelAnalysis` is a helper class that can simplify the routine of running
simulations, processing the results, and then plotting (or otherwise outputting)
them. One simply need subclass `ModelAnalysis`, and implement the following
methods:
- prepare(self, *args, **kwargs)
- simulate(self, *args, **kwargs)
- process(self, *args, **kwargs)
- plot(self, *args, **kwargs)
Each of these methods is guaranteed to be called in the order specified above, with
the return values of the previous method being fed forward to the next.
Calling `process` (with no arguments), for example, will also call `prepare` and `simulate` in order, with the
return values of `prepare` being passed to `simulate`, and the return values of `simulate`
being passed to `process`. If a method is called directly with input values, then this
chaining does not occur, and the method simply returns what it should.
It is necessary to be a little bit careful about what one returns in these methods.
In particular, this is the way in which return values are processed:
- If a tuple is returned of length 2, and the first element is a
tuple and the second a dict, then it is assumed that these are
respectively the `args` and `kwargs` to be fed forward.
- If a tuple is returned of any other length, or any of the above conditions
fail, then these are assumed to be the `args` to be fed forward.
- If a dictionary is returned, then these are assumed to be the
`kwargs` to be fed forward.
- Otherwise, the result is fed forward as the first non-keyword argument.
.. note:: It is not necessary to return values at these steps, if it is unnecessary
or if you prefer to save your results as attributes.
.. note:: Return values of all of these methods will be cached, so each method
will only be run once.
'''
__metaclass__ = ABCMeta
def __getattribute__(self, name):
if name in ['prepare', 'process', 'simulate', 'plot']:
return object.__getattribute__(self, '_ModelAnalysis__%s' % name )
else:
return object.__getattribute__(self, name)
@_model_wrapper(dependency=None, message="Preparing...", complete="DONE")
def __prepare(self, *args, **kwargs):
return object.__getattribute__(self, 'prepare')(*args, **kwargs)
@_model_wrapper(dependency="prepare", message="Simulating...", complete="DONE")
def __simulate(self, *args, **kwargs):
return object.__getattribute__(self, 'simulate')(*args, **kwargs)
@_model_wrapper(dependency="simulate", message="Processing...", complete="DONE")
def __process(self, *args, **kwargs):
return object.__getattribute__(self, 'process')(*args, **kwargs)
@_model_wrapper(dependency="process", message="Plotting...", complete="DONE")
def __plot(self, *args, **kwargs):
return object.__getattribute__(self, 'plot')(*args, **kwargs)
def __init__(self, *args, **kwargs):
self.prepare(*args, **kwargs)
@abstractmethod
def prepare(self, *args, **kwargs):
'''
This method should prepare the `ModelAnalysis` instance for calling the
rest of the methods. It is invoked on class initialisation, with the
arguments passed to the constructor. Any return values will be passed
onto `simulate` if it is ever called with no arguments.
'''
pass
@abstractmethod
def simulate(self, *args, **kwargs):
'''
This method should perform whichever simulations are required. Any values returned
will be passed onto `process` if it is ever called with no arguments.
'''
pass
@abstractmethod
def process(self, *args, **kwargs):
'''
This method should perform whatever processing is interesting on return values of
`simulate`. Any values returned will be passed onto `plot` if it is ever called
with no arguments.
'''
pass
@abstractmethod
def plot(self, *args, **kwargs):
'''
This method should perform whatever plotting/output is desired based upon return values of
`process`.
'''
pass
|
import dtlpy as dl
import os
import logging
logger = logging.getLogger(__name__)
def deploy_predict(package):
input_to_init = {
'package_name': package.name,
}
logger.info('deploying package . . .')
service_obj = package.services.deploy(service_name='predict',
module_name='predict_module',
package=package,
runtime={'gpu': False,
'numReplicas': 1,
'concurrency': 2,
'runnerImage': 'buffalonoam/zazu-image:0.3'
},
is_global=True,
execution_timeout=60 * 60 * 1e10,
init_input=input_to_init)
return service_obj
def deploy_model(package):
input_to_init = {
'package_name': package.name,
}
logger.info('deploying package . . .')
service_obj = package.services.deploy(service_name='trial',
module_name='models_module',
package=package,
runtime={'gpu': True,
'numReplicas': 1,
'concurrency': 1,
'runnerImage': 'buffalonoam/zazu-image:0.3'
},
is_global=True,
execution_timeout=60 * 60 * 1e10,
init_input=input_to_init)
return service_obj
def deploy_zazu(package):
input_to_init = {
'package_name': package.name
}
logger.info('deploying package . . .')
service_obj = package.services.deploy(service_name='zazu',
module_name='zazu_module',
package=package,
runtime={'gpu': False,
'numReplicas': 1,
'concurrency': 2,
'runnerImage': 'buffalonoam/zazu-image:0.3'
},
is_global=True,
execution_timeout=60 * 60 * 1e10,
init_input=input_to_init)
return service_obj
def deploy_zazu_timer(package, init_inputs):
logger.info('deploying package . . .')
service_obj = package.services.deploy(service_name='timer',
module_name='zazu_timer_module',
package=package,
runtime={'gpu': False,
'numReplicas': 1,
'concurrency': 2,
'runnerImage': 'buffalonoam/zazu-image:0.3'
},
is_global=True,
execution_timeout=60*60*1e10,
init_input=init_inputs)
return service_obj
def deploy_predict_item(package, model_id, checkpoint_id):
input_to_init = {'model_id': model_id,
'checkpoint_id': checkpoint_id}
service_obj = package.services.deploy(service_name='predict',
module_name='predict_item_module',
package=package,
runtime={'gpu': False,
'numReplicas': 1,
'concurrency': 2,
'runnerImage': 'buffalonoam/zazu-image:0.3',
'podType': 'gpu-k80-m'
},
is_global=True,
execution_timeout=60 * 60 * 1e10,
init_input=input_to_init)
return service_obj
def push_package(project):
dataset_input = dl.FunctionIO(type='Dataset', name='dataset')
train_query_input = dl.FunctionIO(type='Json', name='train_query')
val_query_input = dl.FunctionIO(type='Json', name='val_query')
hp_value_input = dl.FunctionIO(type='Json', name='hp_values')
model_specs_input = dl.FunctionIO(type='Json', name='model_specs')
checkpoint_path_input = dl.FunctionIO(type='Json', name='checkpoint_path')
package_name_input = dl.FunctionIO(type='Json', name='package_name')
configs_input = dl.FunctionIO(type='Json', name='configs')
time_input = dl.FunctionIO(type='Json', name='time')
test_dataset_input = dl.FunctionIO(type='Json', name='test_dataset_id')
query_input = dl.FunctionIO(type='Json', name='query')
item_input = dl.FunctionIO(type='Item', name='item')
model_input = dl.FunctionIO(type='Json', name='model_id')
checkpoint_input = dl.FunctionIO(type='Json', name='checkpoint_id')
predict_inputs = [dataset_input, val_query_input, checkpoint_path_input, model_specs_input]
model_inputs = [dataset_input, train_query_input, val_query_input, hp_value_input, model_specs_input]
zazu_inputs = [configs_input]
predict_function = dl.PackageFunction(name='run', inputs=predict_inputs, outputs=[], description='')
model_function = dl.PackageFunction(name='run', inputs=model_inputs, outputs=[], description='')
zazu_search_function = dl.PackageFunction(name='search', inputs=zazu_inputs, outputs=[], description='')
zazu_predict_function = dl.PackageFunction(name='predict', inputs=zazu_inputs, outputs=[], description='')
timer_update_function = dl.PackageFunction(name='update_time', inputs=time_input, outputs=[], description='')
predict_item_function = dl.PackageFunction(name='predict_single_item', inputs=[item_input], outputs=[],
description='')
load_checkpoint_function = dl.PackageFunction(name='load_new_inference_checkpoint',
inputs=[model_input, checkpoint_input], outputs=[],
description='')
predict_module = dl.PackageModule(entry_point='dataloop_services/predict_module.py',
name='predict_module',
functions=[predict_function],
init_inputs=[package_name_input])
models_module = dl.PackageModule(entry_point='dataloop_services/trial_module.py',
name='models_module',
functions=[model_function],
init_inputs=[package_name_input])
zazu_module = dl.PackageModule(entry_point='dataloop_services/zazu_module.py',
name='zazu_module',
functions=[zazu_search_function, zazu_predict_function],
init_inputs=package_name_input)
zazu_timer_module = dl.PackageModule(entry_point='dataloop_services/zazu_timer_module.py',
name='zazu_timer_module',
functions=[timer_update_function],
init_inputs=[configs_input, time_input, test_dataset_input, query_input])
predict_item_module = dl.PackageModule(entry_point='dataloop_services/prediction_module.py',
name='predict_item_module',
functions=[predict_item_function, load_checkpoint_function],
init_inputs=[model_input, checkpoint_input])
package_obj = project.packages.push(
package_name='zazuml',
src_path=os.getcwd(),
modules=[predict_module, models_module, zazu_module, zazu_timer_module, predict_item_module])
return package_obj
def update_service(project, service_name):
package_obj = project.packages.get('zazuml')
service = project.services.get(service_name)
service.package_revision = package_obj.version
service.update()
|
# this program use SymPy to symbolically derive several quantities that are described
# in the notes: the (1) relaxation and buoyancy transfer functions, (2) the velocity
# solutions, and (3) a limiting value of one of the eigenvalues of the problem.
#
# all of the printing is commented out; this file is best read/used in conjunction
# with the derivation in the appendix.
#---------------------- 1. ELEVATION SOLUTIONS----------------------------------
import sympy as sp
import matplotlib.pyplot as plt
nu = sp.Symbol('n')
mu = sp.exp(nu)
# use this matrix for floating ice:
M = sp.Matrix(( [mu, -1/mu, nu*mu,-nu/mu], [mu, 1/mu, mu*(nu+1),(nu-1)/mu], [1, 1, 1,-1],[1,-1,0,0] ))
b1 = sp.Symbol('b1') # proportional to h
b2 = sp.Symbol('b2') # proportional to s
# solution vector
A,B,C,D = sp.symbols('A,B,C,D')
# rhs vector:
b = sp.Matrix(4,1,[b1,0,0,b2])
sol, = sp.linsolve((M,b),[A,B,C,D])
# vertical velocity at upper surface of ice sheet
w_h = mu*sol[0] + (1/mu)*sol[1] + nu*mu*sol[2] + (nu/mu)*sol[3]
# # print the result (modulo a 1/k factor) for floating ice:
# sp.pprint(sp.collect(sp.collect(sp.collect(sp.simplify(w_h),b1),b2),mu) )
# Also need to print w_b for floating ice, since it is part of the solution
# (modulo a 1/k factor)
w_b = sol[0]+sol[1]
# # print this:
# sp.pprint(sp.collect(sp.collect(sp.collect(sp.simplify(w_b),b1),b2),mu) )
#---------------------- 2. VELOCITY SOLUTIONS------------------------------------
# # we also need the vertical velocity at an arbitrary depth z, which we can
# # compute as follows:
z = sp.Symbol('z')
# w(z) modulo a 1/k factor
w = sp.exp(nu*z)*sol[0] + (1/sp.exp(nu*z))*sol[1] + nu*z*sp.exp(nu*z)*sol[2] + nu*z*sol[3]/sp.exp(nu*z)
# # print this:
# sp.pprint(sp.simplify(sp.collect(sp.collect(sp.simplify(w.subs(b1,0)),b1),b2)) )
# coefficients from the elevation solution problem
A,B,C,D = sol
k = sp.Symbol('k')
wb = (A+B)/k # w at z=0
wh = (A*mu + B/mu + nu*mu*C + nu*D/mu)/k # w at z=H
wz0 = A-B+C+D # dw/dz at z=0
wzh = A*mu -B/mu +C*mu +C*nu*mu -D*nu/mu + D/mu # dw/dz at z=H
# second derivative at z=H
wzzh = A*k*mu + B*k/mu + 2*C*k*mu + C*k*nu*mu + D*k*nu/mu -2*D*k/mu
# second derivative at z=0
wzz0 = k*(A+B+2*C-2*D)
Ph = wzh - wz0*(mu+1/mu)/2- wzz0*(1/k)*(mu-1/mu)/2 # P(H)
Pzh = wzzh - wz0*k*(mu-1/mu)/2 - wzz0*(mu+1/mu)/2 # P_z(H)
b3 = -(k*wh + Pzh/k) # first rhs vector entry
b4 = -(k*wb) # 2nd rhs vector entry
# Matrix for horizontal surface velocity solutions
M2 = sp.Matrix(( [mu, -1/mu],[1, -1]))
# solution vector
E,F = sp.symbols('E,F')
# RHS vector:
d = sp.Matrix(2,1,[b3,b4])
sol2, = sp.linsolve((M2,d),[E,F])
uh = Ph + sol2[0]*mu + sol2[1]/mu
wz = A*sp.exp(nu*z) - B*sp.exp(-nu*z) + C*(1+nu*z)*sp.exp(nu*z)+ D*(1-nu*z)*sp.exp(-nu*z)
coshkz = (sp.exp(nu*z) + sp.exp(-nu*z))/2
sinhkz = (sp.exp(nu*z) - sp.exp(-nu*z))/2
P = wz -wz0*coshkz- wzz0*sinhkz/k
u = P + sol2[0]*sp.exp(nu*z) + sol2[1]*sp.exp(-nu*z)
## print velocity response functions
# sp.pprint(sp.simplify((sp.collect(sp.collect(u.subs(b2,0),b1),b2))))
#----------------------- 3. EIGENVALUE LIMIT------------------------------------
k = sp.Symbol('k',positive=True)
d = sp.Symbol('delta',positive=True)
mu = sp.exp(k)
R0 = (mu**4)+4*k*(mu**2)-1
D = k*(mu**4-2*(1+2*k**2)*(mu**2)+1)
B0 = 2*(k+1)*(mu**3) + 2*(k-1)*mu
R = R0/D
B = B0/D
# symbolic expression for the larger eigenvalue in the problem:
lamda = -(d+1)*R*(1- sp.sqrt((4*d/(d+1)**2)*((B0/R0)**2 -1) + 1))/2
# take the limit as k --> 0:
L = sp.limit(lamda,k,0)
# # print the limit:
# sp.pprint(sp.factor(sp.simplify(L)))
|
<reponame>JustM3Dev/Minecraft
# Imports, sorted alphabetically.
# Python packages
import random
# Third-party packages
# Nothing for now...
# Modules from this project
from blocks import *
#
# Base
#
class SmallPlant:
block = None
grows_on = grass_block, dirt_block
@classmethod
def add_to_world(cls, world, position, sync=False):
world.add_block(position, cls.block, sync=sync)
class Trunk:
block = None
height_range = 4, 8
grows_on = ()
def __init__(self, position, block=None, height_range=None):
if block is not None:
self.block = block
if height_range is not None:
self.height_range = height_range
x, y, z = position
self.height = random.randint(*self.height_range)
self.blocks = {}
for dy in range(self.height):
self.blocks[(x, y + dy, z)] = self.block
@classmethod
def add_to_world(cls, world, position, sync=False):
trunk = cls(position)
for item in list(trunk.blocks.items()):
world.add_block(*item, sync=sync)
class Tree:
trunk_block = None
leaf_block = None
trunk_height_range = 4, 8
grows_on = grass_block, dirt_block, snowgrass_block
@classmethod
def add_to_world(cls, world, position, sync=False):
trunk = Trunk(position, block=cls.trunk_block,
height_range=cls.trunk_height_range)
for item in list(trunk.blocks.items()):
world.add_block(*item, force=False, sync=sync)
x, y, z = position
height = trunk.height
treetop = y + height
# Leaves generation
d = height // 3 + 1
for xl in range(x - d, x + d):
dx = abs(xl - x)
for yl in range(treetop - d, treetop + d):
for zl in range(z - d, z + d):
# Don't replace existing blocks
if (xl, yl, zl) in world:
continue
# Avoids orphaned leaves
if not world.has_neighbors((xl, yl, zl),
set((cls.trunk_block,
cls.leaf_block))):
continue
dz = abs(zl - z)
# The farther we are (horizontally) from the trunk,
# the least leaves we can find.
if random.uniform(0, dx + dz) > 0.6:
continue
world.add_block((xl, yl, zl), cls.leaf_block, force=False,
sync=sync)
#
# Small plants
#
class WaterMelon(SmallPlant):
block = melon_block
grows_on = grass_block, dirt_block, snowgrass_block
class Pumpkin(SmallPlant):
block = pumpkin_block
grows_on = grass_block, dirt_block, snowgrass_block
class YFlowers(SmallPlant):
block = yflowers_block
class Potato(SmallPlant):
block = potato_block
class Carrot(SmallPlant):
block = carrot_block
class Rose(SmallPlant):
block = rose_block
class TallGrass(SmallPlant):
block = fern_block
class TallGrass0(SmallPlant):
block = wildgrass0_block
grows_on = grass_block, dirt_block
class TallGrass1(SmallPlant):
block = wildgrass1_block
grows_on = grass_block, dirt_block
class TallGrass2(SmallPlant):
block = wildgrass2_block
grows_on = grass_block, dirt_block
class TallGrass3(SmallPlant):
block = wildgrass3_block
grows_on = grass_block, dirt_block
class TallGrass4(SmallPlant):
block = wildgrass4_block
grows_on = grass_block, dirt_block
class TallGrass5(SmallPlant):
block = wildgrass5_block
grows_on = grass_block, dirt_block
class TallGrass6(SmallPlant):
block = wildgrass6_block
grows_on = grass_block, dirt_block
class TallGrass7(SmallPlant):
block = wildgrass7_block
grows_on = grass_block, dirt_block
class DeadBush(SmallPlant):
block = deadbush_block
grows_on = sand_block, sandstone_block
class DesertGrass(SmallPlant):
block = desertgrass_block
grows_on = sand_block, sandstone_block
#
# Tall plants
#
class Cactus(Trunk):
block = cactus_block
height_range = 1, 4
grows_on = sand_block, sandstone_block
class TallCactus(Trunk):
block = tallcactus_block
height_range = 1, 10
grows_on = sand_block, sandstone_block
class Reed(Trunk):
block = reed_block
height_range = 1, 4
grows_on = sand_block, dirt_block
#
# Trees
#
class OakTree(Tree):
trunk_block = oakwood_block
leaf_block = oakleaf_block
class JungleTree(Tree):
trunk_block = junglewood_block
leaf_block = jungleleaf_block
trunk_height_range = 8, 12
class BirchTree(Tree):
trunk_block = birchwood_block
leaf_block = birchleaf_block
trunk_height_range = 5, 7
SMALL_PLANTS = set((
WaterMelon,
Pumpkin,
YFlowers,
Potato,
Carrot,
Rose,
TallGrass,
TallGrass0,
TallGrass1,
TallGrass2,
TallGrass3,
TallGrass4,
TallGrass5,
TallGrass6,
TallGrass7,
DeadBush,
DesertGrass,
))
TALL_PLANTS = set((
Cactus,
TallCactus,
Reed,
))
PLANTS = SMALL_PLANTS | TALL_PLANTS
TREES = set((
OakTree,
JungleTree,
BirchTree,
))
VEGETATION = PLANTS | TREES
TREE_BLOCKS = set(tree.trunk_block for tree in TREES)
LEAF_BLOCKS = set(tree.leaf_block for tree in TREES)
PLANT_BLOCKS = set(plant.block for plant in PLANTS)
VEGETATION_BLOCKS = PLANT_BLOCKS | TREE_BLOCKS | LEAF_BLOCKS
|
Subsets and Splits