repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
cells
sequence
types
sequence
mne-tools/mne-tools.github.io
stable/_downloads/bcaf3ed1f43ea7377c6c0b00137d728f/custom_inverse_solver.ipynb
bsd-3-clause
[ "%matplotlib inline", "Source localization with a custom inverse solver\nThe objective of this example is to show how to plug a custom inverse solver\nin MNE in order to facilate empirical comparison with the methods MNE already\nimplements (wMNE, dSPM, sLORETA, eLORETA, LCMV, DICS, (TF-)MxNE etc.).\nThis script is educational and shall be used for methods\nevaluations and new developments. It is not meant to be an example\nof good practice to analyse your data.\nThe example makes use of 2 functions apply_solver and solver\nso changes can be limited to the solver function (which only takes three\nparameters: the whitened data, the gain matrix and the number of orientations)\nin order to try out another inverse algorithm.", "import numpy as np\nfrom scipy import linalg\nimport mne\nfrom mne.datasets import sample\nfrom mne.viz import plot_sparse_source_estimates\n\n\ndata_path = sample.data_path()\nmeg_path = data_path / 'MEG' / 'sample'\nfwd_fname = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif'\nave_fname = meg_path / 'sample_audvis-ave.fif'\ncov_fname = meg_path / 'sample_audvis-shrunk-cov.fif'\nsubjects_dir = data_path / 'subjects'\ncondition = 'Left Auditory'\n\n# Read noise covariance matrix\nnoise_cov = mne.read_cov(cov_fname)\n# Handling average file\nevoked = mne.read_evokeds(ave_fname, condition=condition, baseline=(None, 0))\nevoked.crop(tmin=0.04, tmax=0.18)\n\nevoked = evoked.pick_types(eeg=False, meg=True)\n# Handling forward solution\nforward = mne.read_forward_solution(fwd_fname)", "Auxiliary function to run the solver", "def apply_solver(solver, evoked, forward, noise_cov, loose=0.2, depth=0.8):\n \"\"\"Call a custom solver on evoked data.\n\n This function does all the necessary computation:\n\n - to select the channels in the forward given the available ones in\n the data\n - to take into account the noise covariance and do the spatial whitening\n - to apply loose orientation constraint as MNE solvers\n - to apply a weigthing of the columns of the forward operator as in the\n weighted Minimum Norm formulation in order to limit the problem\n of depth bias.\n\n Parameters\n ----------\n solver : callable\n The solver takes 3 parameters: data M, gain matrix G, number of\n dipoles orientations per location (1 or 3). A solver shall return\n 2 variables: X which contains the time series of the active dipoles\n and an active set which is a boolean mask to specify what dipoles are\n present in X.\n evoked : instance of mne.Evoked\n The evoked data\n forward : instance of Forward\n The forward solution.\n noise_cov : instance of Covariance\n The noise covariance.\n loose : float in [0, 1] | 'auto'\n Value that weights the source variances of the dipole components\n that are parallel (tangential) to the cortical surface. If loose\n is 0 then the solution is computed with fixed orientation.\n If loose is 1, it corresponds to free orientations.\n The default value ('auto') is set to 0.2 for surface-oriented source\n space and set to 1.0 for volumic or discrete source space.\n depth : None | float in [0, 1]\n Depth weighting coefficients. If None, no depth weighting is performed.\n\n Returns\n -------\n stc : instance of SourceEstimate\n The source estimates.\n \"\"\"\n # Import the necessary private functions\n from mne.inverse_sparse.mxne_inverse import \\\n (_prepare_gain, is_fixed_orient,\n _reapply_source_weighting, _make_sparse_stc)\n\n all_ch_names = evoked.ch_names\n\n # Handle depth weighting and whitening (here is no weights)\n forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain(\n forward, evoked.info, noise_cov, pca=False, depth=depth,\n loose=loose, weights=None, weights_min=None, rank=None)\n\n # Select channels of interest\n sel = [all_ch_names.index(name) for name in gain_info['ch_names']]\n M = evoked.data[sel]\n\n # Whiten data\n M = np.dot(whitener, M)\n\n n_orient = 1 if is_fixed_orient(forward) else 3\n X, active_set = solver(M, gain, n_orient)\n X = _reapply_source_weighting(X, source_weighting, active_set)\n\n stc = _make_sparse_stc(X, active_set, forward, tmin=evoked.times[0],\n tstep=1. / evoked.info['sfreq'])\n\n return stc", "Define your solver", "def solver(M, G, n_orient):\n \"\"\"Run L2 penalized regression and keep 10 strongest locations.\n\n Parameters\n ----------\n M : array, shape (n_channels, n_times)\n The whitened data.\n G : array, shape (n_channels, n_dipoles)\n The gain matrix a.k.a. the forward operator. The number of locations\n is n_dipoles / n_orient. n_orient will be 1 for a fixed orientation\n constraint or 3 when using a free orientation model.\n n_orient : int\n Can be 1 or 3 depending if one works with fixed or free orientations.\n If n_orient is 3, then ``G[:, 2::3]`` corresponds to the dipoles that\n are normal to the cortex.\n\n Returns\n -------\n X : array, (n_active_dipoles, n_times)\n The time series of the dipoles in the active set.\n active_set : array (n_dipoles)\n Array of bool. Entry j is True if dipole j is in the active set.\n We have ``X_full[active_set] == X`` where X_full is the full X matrix\n such that ``M = G X_full``.\n \"\"\"\n inner = np.dot(G, G.T)\n trace = np.trace(inner)\n K = linalg.solve(inner + 4e-6 * trace * np.eye(G.shape[0]), G).T\n K /= np.linalg.norm(K, axis=1)[:, None]\n X = np.dot(K, M)\n\n indices = np.argsort(np.sum(X ** 2, axis=1))[-10:]\n active_set = np.zeros(G.shape[1], dtype=bool)\n for idx in indices:\n idx -= idx % n_orient\n active_set[idx:idx + n_orient] = True\n X = X[active_set]\n return X, active_set", "Apply your custom solver", "# loose, depth = 0.2, 0.8 # corresponds to loose orientation\nloose, depth = 1., 0. # corresponds to free orientation\nstc = apply_solver(solver, evoked, forward, noise_cov, loose, depth)", "View in 2D and 3D (\"glass\" brain like 3D plot)", "plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),\n opacity=0.1)" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ijmbarr/causalgraphicalmodels
notebooks/cgm-examples.ipynb
mit
[ "An Introduction to CausalGraphicalModels\nCausalGraphicalModel is a python module for describing and manipulating Causal Graphical Models and Structural Causal Models. Behind the curtain, it is a light wrapper around the python graph library networkx.\nThis notebook is designed to give a quick overview of the functionality of this package.\nCausalGraphicalModels", "from causalgraphicalmodels import CausalGraphicalModel\n\nsprinkler = CausalGraphicalModel(\n nodes=[\"season\", \"rain\", \"sprinkler\", \"wet\", \"slippery\"],\n edges=[\n (\"season\", \"rain\"), \n (\"season\", \"sprinkler\"), \n (\"rain\", \"wet\"),\n (\"sprinkler\", \"wet\"), \n (\"wet\", \"slippery\")\n ]\n)\n\n# draw return a graphviz `dot` object, which jupyter can render\nsprinkler.draw()\n\n# get the distribution implied by the graph\nprint(sprinkler.get_distribution())\n\n# check for d-seperation of two nodes\nsprinkler.is_d_separated(\"slippery\", \"season\", {\"wet\"})\n\n# get all the conditional independence relationships implied by a CGM\nsprinkler.get_all_independence_relationships()\n\n# check backdoor adjustment set\nsprinkler.is_valid_backdoor_adjustment_set(\"rain\", \"slippery\", {\"wet\"})\n\n# get all backdoor adjustment sets\nsprinkler.get_all_backdoor_adjustment_sets(\"rain\", \"slippery\")\n\n# get the graph created by intervening on node \"rain\"\ndo_sprinkler = sprinkler.do(\"rain\")\n\ndo_sprinkler.draw()", "Latent Variables", "dag_with_latent_variables = CausalGraphicalModel(\n nodes=[\"x\", \"y\", \"z\"],\n edges=[\n (\"x\", \"z\"),\n (\"z\", \"y\"), \n ],\n latent_edges=[\n (\"x\", \"y\")\n ]\n)\n\ndag_with_latent_variables.draw()\n\n# here there are no observed backdoor adjustment sets\ndag_with_latent_variables.get_all_backdoor_adjustment_sets(\"x\", \"y\")\n\n# but there is a frontdoor adjustment set\ndag_with_latent_variables.get_all_frontdoor_adjustment_sets(\"x\", \"y\")", "StructuralCausalModels\nFor Structural Causal Models (SCM) we need to specify the functional form of each node:", "from causalgraphicalmodels import StructuralCausalModel\nimport numpy as np\n\nscm = StructuralCausalModel({\n \"x1\": lambda n_samples: np.random.binomial(n=1,p=0.7,size=n_samples),\n \"x2\": lambda x1, n_samples: np.random.normal(loc=x1, scale=0.1),\n \"x3\": lambda x2, n_samples: x2 ** 2,\n})", "The only requirement on the functions are:\n - that variable names are consistent \n - each function accepts keyword variables in the form of numpy arrays and output numpy arrays of shape [n_samples] \n - that in addition to it's parents, each function takes a n_samples variables indicating how many samples to generate \n - that any function acts on each row independently. This ensure that the output samples are independent\nWrapping these functions in the StructuralCausalModel object allows us to easily generate samples:", "ds = scm.sample(n_samples=100)\n\nds.head()\n\n# and visualise the samples\nimport seaborn as sns\n\n%matplotlib inline\n\nsns.kdeplot(\n data=ds.x2,\n data2=ds.x3,\n)", "And to access the implied CGM\"", "scm.cgm.draw()", "And to apply an intervention:", "scm_do = scm.do(\"x1\")\n\nscm_do.cgm.draw()", "And sample from the distribution implied by this intervention:", "scm_do.sample(n_samples=5, set_values={\"x1\": np.arange(5)})" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Unidata/unidata-python-workshop
notebooks/MetPy_Case_Study/MetPy_Case_Study.ipynb
mit
[ "<a name=\"top\"></a>\n<div style=\"width:1000 px\">\n\n<div style=\"float:right; width:98 px; height:98px;\">\n<img src=\"https://raw.githubusercontent.com/Unidata/MetPy/master/src/metpy/plots/_static/unidata_150x150.png\" alt=\"Unidata Logo\" style=\"height: 98px;\">\n</div>\n\n<h1>MetPy Case Study</h1>\n\n<div style=\"clear:both\"></div>\n</div>\n\n<hr style=\"height:2px;\">\n\nThis is a tutorial on building a case study map for Dynamic Meteorology courses with use of Unidata tools, specifically MetPy and Siphon. In this tutorial we will cover accessing, calculating, and plotting model output.\nLet's investigate The Storm of the Century, although it would easy to change which case you wanted (please feel free to do so).\nReanalysis Output: NARR 00 UTC 13 March 1993\nData from Reanalysis on pressure surfaces:\n\nGeopotential Heights\nTemperature\nu-wind component\nv-wind component\n\nCalculations:\n\nVertical Vorticity\nAdvection of Temperature and Vorticity\nHorizontal Divergence\nWind Speed", "from datetime import datetime\n\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nfrom netCDF4 import Dataset, num2date\nimport numpy as np\nfrom scipy.ndimage import gaussian_filter\nfrom siphon.catalog import TDSCatalog\nfrom siphon.ncss import NCSS\nimport matplotlib.pyplot as plt\nimport metpy.calc as mpcalc\nfrom metpy.plots import StationPlot\nfrom metpy.units import units", "Case Study Data\nThere are a number of different sites that you can utilize to access past model output analyses and even forecasts. The most robust collection is housed at the National Center for Environmental Information (NCEI, formerly NCDC) on a THREDDS server. The general website to begin your search is\nhttps://www.ncdc.noaa.gov/data-access\nthis link contains links to many different data sources (some of which we will come back to later in this tutorial). But for now, lets investigate what model output is avaiable\nhttps://www.ncdc.noaa.gov/data-access/model-data/model-datasets\nThe gridded model output that are available\nReanalysis\n * Climate Forecast System Reanalysis (CFSR)\n * CFSR provides a global reanalysis (a best estimate of the observed state of the atmosphere) of past weather from January 1979 through March 2011 at a horizontal resolution of 0.5°.\n * North American Regional Reanalysis (NARR)\n * NARR is a regional reanalysis of North America containing temperatures, winds, moisture, soil data, and dozens of other parameters at 32km horizontal resolution.\n * Reanalysis-1 / Reanalysis-2 (R1/R2)\n * Reanalysis-1 / Reanalysis-2 are two global reanalyses of atmospheric data spanning 1948/1979 to present at a 2.5° horizontal resolution.\nNumerical Weather Prediction\n * Climate Forecast System (CFS) \n * CFS provides a global reanalysis, a global reforecast of past weather, and an operational, seasonal forecast of weather out to nine months.\n * Global Data Assimilation System (GDAS)\n * GDAS is the set of assimilation data, both input and output, in various formats for the Global Forecast System model.\n * Global Ensemble Forecast System (GEFS)\n * GEFS is a global-coverage weather forecast model made up of 21 separate forecasts, or ensemble members, used to quantify the amount of uncertainty in a forecast. GEFS produces output four times a day with weather forecasts going out to 16 days.\n * Global Forecast System (GFS)\n * The GFS model is a coupled weather forecast model, composed of four separate models which work together to provide an accurate picture of weather conditions. GFS covers the entire globe down to a horizontal resolution of 28km.\n * North American Mesoscale (NAM)\n * NAM is a regional weather forecast model covering North America down to a horizontal resolution of 12km. Dozens of weather parameters are available from the NAM grids, from temperature and precipitation to lightning and turbulent kinetic energy.\n * Rapid Refresh (RAP)\n * RAP is a regional weather forecast model of North America, with separate sub-grids (with different horizontal resolutions) within the overall North America domain. RAP produces forecasts every hour with forecast lengths going out 18 hours. RAP replaced the Rapid Update Cycle (RUC) model on May 1, 2012.\n * Navy Operational Global Atmospheric Prediction System (NOGAPS)\n * NOGAPS analysis data are available in six-hourly increments on regularly spaced latitude-longitude grids at 1-degree and one-half-degree resolutions. Vertical resolution varies from 18 to 28 pressure levels, 34 sea level depths, the surface, and other various levels.\nOcean Models\n * Hybrid Coordinate Ocean Model (HYCOM), Global\n * The Navy implementation of HYCOM is the successor to Global NCOM. This site hosts regions covering U.S. coastal waters as well as a global surface model.\n * Navy Coastal Ocean Model (NCOM), Global\n * Global NCOM was run by the Naval Oceanographic Office (NAVOCEANO) as the Navy’s operational global ocean-prediction system prior to its replacement by the Global HYCOM system in 2013. This site hosts regions covering U.S., European, West Pacific, and Australian coastal waters as well as a global surface model.\n * Navy Coastal Ocean Model (NCOM), Regional\n * The Regional NCOM is a high-resolution version of NCOM for specific areas. NCEI serves the Americas Seas, U.S. East, and Alaska regions of NCOM.\n * Naval Research Laboratory Adaptive Ecosystem Climatology (AEC)\n * The Naval Research Laboratory AEC combines an ocean model with Earth observations to provide a synoptic view of the typical (climatic) state of the ocean for every day of the year. This dataset covers the Gulf of Mexico and nearby areas.\n * National Centers for Environmental Prediction (NCEP) Real Time Ocean Forecast System (RTOFS)–Atlantic\n * RTOFS–Atlantic is a data-assimilating nowcast-forecast system operated by NCEP. This dataset covers the Gulf of Mexico and most of the northern and central Atlantic.\nClimate Prediction\n * CM2 Global Coupled Climate Models (CM2.X)\n * CM2.X consists of two climate models to model the changes in climate over the past century and into the 21st century.\n * Coupled Model Intercomparison Project Phase 5 (CMIP5) (link is external)\n * The U.N. Intergovernmental Panel on Climate Change (IPCC) coordinates global analysis of climate models under the Climate Model Intercomparison Project (CMIP). CMIP5 is in its fifth iteration. Data are available through the Program for Climate Model Diagnosis and Intercomparison (PCMDI) website.\nDerived / Other Model Data\n * Service Records Retention System (SRRS)\n * SRRS is a store of weather observations, summaries, forecasts, warnings, and advisories generated by the National Weather Service for public use.\n * NOMADS Ensemble Probability Tool\n * The NOMADS Ensemble Probability Tool allows a user to query the Global Ensemble Forecast System (GEFS) to determine the probability that a set of forecast conditions will occur at a given location using all of the 21 separate GEFS ensemble members.\n * National Digital Forecast Database (NDFD)\n * NDFD are gridded forecasts created from weather data collected by National Weather Service field offices and processed through the National Centers for Environmental Prediction. NDFD data are available by WMO header or by date range.\n * National Digital Guidance Database (NDGD)\n * NDGD consists of forecasts, observations, model probabilities, climatological normals, and other digital data that complement the National Digital Forecast Database.\nNARR Output\nLets investigate what specific NARR output is available to work with from NCEI.\nhttps://www.ncdc.noaa.gov/data-access/model-data/model-datasets/north-american-regional-reanalysis-narr\nWe specifically want to look for data that has \"TDS\" data access, since that is short for a THREDDS server data access point. There are a total of four different GFS datasets that we could potentially use.\nChoosing our data source\nLet's go ahead and use the NARR Analysis data to investigate the past case we identified (The Storm of the Century).\nhttps://www.ncei.noaa.gov/thredds/catalog/narr-a-files/199303/19930313/catalog.html?dataset=narr-a-files/199303/19930313/narr-a_221_19930313_0000_000.grb\nAnd we will use a python package called Siphon to read this data through the NetCDFSubset (NetCDFServer) link.\nhttps://www.ncei.noaa.gov/thredds/ncss/grid/narr-a-files/199303/19930313/narr-a_221_19930313_0000_000.grb/dataset.html", "# Case Study Date\nyear = 1993\nmonth = 3\nday = 13\nhour = 0\n\ndt = datetime(year, month, day, hour)\n\n# Read NARR Data from THREDDS server\nbase_url = 'https://www.ncei.noaa.gov/thredds/catalog/narr-a-files/'\n\n# Programmatically generate the URL to the day of data we want\ncat = TDSCatalog(f'{base_url}{dt:%Y%m}/{dt:%Y%m%d}/catalog.xml')\n\n# Have Siphon find the appropriate dataset\nds = cat.datasets.filter_time_nearest(dt)\n\n# Download data using the NetCDF Subset Service\nncss = ds.subset()\nquery = ncss.query().lonlat_box(north=60, south=18, east=300, west=225)\nquery.all_times().variables('Geopotential_height_isobaric', 'Temperature_isobaric',\n 'u-component_of_wind_isobaric',\n 'v-component_of_wind_isobaric').add_lonlat().accept('netcdf')\ndata = ncss.get_data(query)\n\n# Back up in case of bad internet connection.\n# Uncomment the following line to read local netCDF file of NARR data\n# data = Dataset('../../data/NARR_19930313_0000.nc','r')", "Let's see what dimensions are in the file:", "data.dimensions", "Pulling Data for Calculation/Plotting\nThe object that we get from Siphon is netCDF-like, so we can pull data using familiar calls for all of the variables that are desired for calculations and plotting purposes.\nNOTE:\nDue to the curvilinear nature of the NARR grid, there is a need to smooth the data that we import for calculation and plotting purposes. For more information about why, please see the following link: http://www.atmos.albany.edu/facstaff/rmctc/narr/\nAdditionally, we want to attach units to our values for use in MetPy calculations later and it will also allow for easy conversion to other units.\n<div class=\"alert alert-success\">\n <b>EXERCISE</b>:\n Replace the `0`'s in the template below with your code:\n <ul>\n <li>Use the `gaussian_filter` function to smooth the `Temperature_isobaric`, `Geopotential_height_isobaric`, `u-component_of_wind_isobaric`, and `v-component_of_wind_isobaric` variables from the netCDF object with a `sigma` value of 1.</li>\n <li>Assign the units of `kelvin`, `meter`, `m/s`, and `m/s` resectively.</li>\n <li>Extract the `lat`, `lon`, and `isobaric1` variables.</li>\n </ul>\n</div>", "# Extract data and assign units\ntmpk = gaussian_filter(data.variables['Temperature_isobaric'][0], sigma=1.0) * units.K\nhght = 0\nuwnd = 0\nvwnd = 0\n\n# Extract coordinate data for plotting\nlat = data.variables['lat'][:]\nlon = data.variables['lon'][:]\nlev = 0", "<button data-toggle=\"collapse\" data-target=\"#sol1\" class='btn btn-primary'>View Solution</button>\n<div id=\"sol1\" class=\"collapse\">\n<code><pre>\n# Extract data and assign units\ntmpk = gaussian_filter(data.variables['Temperature_isobaric'][0],\n sigma=1.0) * units.K\nhght = gaussian_filter(data.variables['Geopotential_height_isobaric'][0],\n sigma=1.0) * units.meter\nuwnd = gaussian_filter(data.variables['u-component_of_wind_isobaric'][0], sigma=1.0) * units('m/s')\nvwnd = gaussian_filter(data.variables['v-component_of_wind_isobaric'][0], sigma=1.0) * units('m/s')\n\n\\# Extract coordinate data for plotting\nlat = data.variables['lat'][:]\nlon = data.variables['lon'][:]\nlev = data.variables['isobaric1'][:]\n</pre></code>\n</div>\n\nNext we need to extract the time variable. It's not in very useful units, but the num2date function can be used to easily create regular datetime objects.", "time = data.variables['time1']\nprint(time.units)\nvtime = num2date(time[0], units=time.units)\nprint(vtime)", "Finally, we need to calculate the spacing of the grid in distance units instead of degrees using the MetPy helper function lat_lon_grid_spacing.", "# Calcualte dx and dy for calculations\ndx, dy = mpcalc.lat_lon_grid_deltas(lon, lat)", "Finding Pressure Level Data\nA robust way to parse the data for a certain pressure level is to find the index value using the np.where function. Since the NARR pressure data ('levels') is in hPa, then we'll want to search that array for our pressure levels 850, 500, and 300 hPa.\n<div class=\"alert alert-success\">\n <b>EXERCISE</b>:\n Replace the `0`'s in the template below with your code:\n <ul>\n <li>Find the index of the 850 hPa, 500 hPa, and 300 hPa levels.</li>\n <li>Extract the heights, temperature, u, and v winds at those levels.</li>\n </ul>\n</div>", "# Specify 850 hPa data\nilev850 = np.where(lev==850)[0][0]\nhght_850 = hght[ilev850]\ntmpk_850 = 0\nuwnd_850 = 0\nvwnd_850 = 0\n\n# Specify 500 hPa data\nilev500 = 0\nhght_500 = 0\nuwnd_500 = 0\nvwnd_500 = 0\n\n# Specify 300 hPa data\nilev300 = 0\nhght_300 = 0\nuwnd_300 = 0\nvwnd_300 = 0", "<button data-toggle=\"collapse\" data-target=\"#sol2\" class='btn btn-primary'>View Solution</button>\n<div id=\"sol2\" class=\"collapse\">\n<code><pre>\n# Specify 850 hPa data\nilev850 = np.where(lev == 850)[0][0]\nhght_850 = hght[ilev850]\ntmpk_850 = tmpk[ilev850]\nuwnd_850 = uwnd[ilev850]\nvwnd_850 = vwnd[ilev850]\n\n\\# Specify 500 hPa data\nilev500 = np.where(lev == 500)[0][0]\nhght_500 = hght[ilev500]\nuwnd_500 = uwnd[ilev500]\nvwnd_500 = vwnd[ilev500]\n\n\\# Specify 300 hPa data\nilev300 = np.where(lev == 300)[0][0]\nhght_300 = hght[ilev300]\nuwnd_300 = uwnd[ilev300]\nvwnd_300 = vwnd[ilev300]\n</pre></code>\n</div>\n\nUsing MetPy to Calculate Atmospheric Dynamic Quantities\nMetPy has a large and growing list of functions to calculate many different atmospheric quantities. Here we want to use some classic functions to calculate wind speed, advection, planetary vorticity, relative vorticity, and divergence.\n\nWind Speed: mpcalc.wind_speed()\nAdvection: mpcalc.advection()\nPlanetary Vorticity: mpcalc.coriolis_parameter()\nRelative Vorticity: mpcalc.vorticity()\nDivergence: mpcalc.divergence()\n\nNote: For the above, MetPy Calculation module is imported in the following manner import metpy.calc as mpcalc.\nTemperature Advection\nA classic QG forcing term is 850-hPa temperature advection. MetPy has a function for advection\nadvection(scalar quantity, [advecting vector components], (grid spacing components))\nSo for temperature advection our scalar quantity would be the tempertaure, the advecting vector components would be our u and v components of the wind, and the grid spacing would be our dx and dy we computed in an earier cell.\n<div class=\"alert alert-success\">\n <b>EXERCISE</b>:\n <ul>\n <li>Uncomment and fill out the advection calculation below.</li>\n </ul>\n</div>", "# Temperature Advection\n# tmpc_adv_850 = mpcalc.advection(--Fill in this call--).to('degC/s')", "<button data-toggle=\"collapse\" data-target=\"#sol3\" class='btn btn-primary'>View Solution</button>\n<div id=\"sol3\" class=\"collapse\">\n<code><pre>\n# Temperature Advection\ntmpc_adv_850 = mpcalc.advection(tmpk_850, [uwnd_850, vwnd_850],\n (dx, dy), dim_order='yx').to('degC/s')\n\n</pre></code>\n</div>\n\nVorticity Calculations\nThere are a couple of different vorticities that we are interested in for various calculations, planetary vorticity, relative vorticity, and absolute vorticity. Currently MetPy has two of the three as functions within the calc module.\nPlanetary Vorticity (Coriolis Parameter)\ncoriolis_parameter(latitude in radians)\nNote: You must can convert your array of latitudes to radians...NumPy give a great function np.deg2rad() or have units attached to your latitudes in order for MetPy to convert them for you! Always check your output to make sure that your code is producing what you think it is producing.\nRelative Vorticity\nWhen atmospheric scientists talk about relative vorticity, we are really refering to the relative vorticity that is occuring about the vertical axis (the k-hat component). So in MetPy the function is\nvorticity(uwind, vwind, dx, dy)\n Absolute Vorticity\nCurrently there is no specific function for Absolute Vorticity, but this is easy for us to calculate from the previous two calculations because we just need to add them together!\nABS Vort = Rel. Vort + Coriolis Parameter\nHere having units are great, becase we won't be able to add things together that don't have the same units! Its a nice safety check just in case you entered something wrong in another part of the calculation, you'll get a units error.\n<div class=\"alert alert-success\">\n <b>EXERCISE</b>:\n <ul>\n <li>Fill in the function calls below to complete the vorticity calculations.</li>\n </ul>\n</div>", "# Vorticity and Absolute Vorticity Calculations\n\n# Planetary Vorticity\n# f = mpcalc.coriolis_parameter(-- Fill in here --).to('1/s')\n\n# Relative Vorticity\n# vor_500 = mpcalc.vorticity(-- Fill in here --)\n\n# Abosolute Vorticity\n# avor_500 = vor_500 + f", "<button data-toggle=\"collapse\" data-target=\"#sol4\" class='btn btn-primary'>View Solution</button>\n<div id=\"sol4\" class=\"collapse\">\n<code><pre>\n# Vorticity and Absolute Vorticity Calculations\n\n\\# Planetary Vorticity\nf = mpcalc.coriolis_parameter(np.deg2rad(lat)).to('1/s')\n\n\\# Relative Vorticity\nvor_500 = mpcalc.vorticity(uwnd_500, vwnd_500, dx, dy,\n dim_order='yx')\n\n\\# Abosolute Vorticity\navor_500 = vor_500 + f\n</pre></code>\n</div>\n\nVorticity Advection\nWe use the same MetPy function for temperature advection for our vorticity advection, we just have to change the scalar quantity (what is being advected) and have appropriate vector quantities for the level our scalar is from. So for vorticity advections well want our wind components from 500 hPa.", "# Vorticity Advection\nf_adv = mpcalc.advection(f, [uwnd_500, vwnd_500], (dx, dy), dim_order='yx')\n\nrelvort_adv = mpcalc.advection(vor_500, [uwnd_500, vwnd_500], (dx, dy), dim_order='yx')\n\nabsvort_adv = mpcalc.advection(avor_500, [uwnd_500, vwnd_500], (dx, dy), dim_order='yx')", "Divergence and Stretching Vorticity\nIf we want to analyze another component of the vorticity tendency equation other than advection, we might want to assess the stretching forticity term.\n-(Abs. Vort.)*(Divergence)\nWe already have absolute vorticity calculated, so now we need to calculate the divergence of the level, which MetPy has a function\ndivergence(uwnd, vwnd, dx, dy)\nThis function computes the horizontal divergence.", "# Stretching Vorticity\ndiv_500 = mpcalc.divergence(uwnd_500, vwnd_500, dx, dy, dim_order='yx')\n\nstretch_vort = -1 * avor_500 * div_500", "Wind Speed, Geostrophic and Ageostrophic Wind\nWind Speed\nCalculating wind speed is not a difficult calculation, but MetPy offers a function to calculate it easily keeping units so that it is easy to convert units for plotting purposes.\nwind_speed(uwnd, vwnd)\nGeostrophic Wind\nThe geostrophic wind can be computed from a given height gradient and coriolis parameter\ngeostrophic_wind(heights, coriolis parameter, dx, dy)\nThis function will return the two geostrophic wind components in a tuple. On the left hand side you'll be able to put two variables to save them off separately, if desired.\nAgeostrophic Wind\nCurrently, there is not a function in MetPy for calculating the ageostrophic wind, however, it is again a simple arithmatic operation to get it from the total wind (which comes from our data input) and out calculated geostrophic wind from above.\nAgeo Wind = Total Wind - Geo Wind", "# Divergence 300 hPa, Ageostrophic Wind\nwspd_300 = mpcalc.wind_speed(uwnd_300, vwnd_300).to('kts')\n\ndiv_300 = mpcalc.divergence(uwnd_300, vwnd_300, dx, dy, dim_order='yx')\nugeo_300, vgeo_300 = mpcalc.geostrophic_wind(hght_300, f, dx, dy, dim_order='yx')\n\nuageo_300 = uwnd_300 - ugeo_300\nvageo_300 = vwnd_300 - vgeo_300", "Maps and Projections", "# Data projection; NARR Data is Earth Relative\ndataproj = ccrs.PlateCarree()\n\n# Plot projection\n# The look you want for the view, LambertConformal for mid-latitude view\nplotproj = ccrs.LambertConformal(central_longitude=-100., central_latitude=40.,\n standard_parallels=[30, 60])\n\ndef create_map_background():\n fig=plt.figure(figsize=(14, 12))\n ax=plt.subplot(111, projection=plotproj)\n ax.set_extent([-125, -73, 25, 50],ccrs.PlateCarree())\n ax.coastlines('50m', linewidth=0.75)\n ax.add_feature(cfeature.STATES, linewidth=0.5)\n return fig, ax", "850-hPa Temperature Advection\n\nAdd one contour (Temperature in Celsius with a dotted linestyle\nAdd one colorfill (Temperature Advection in C/hr)\n\n<div class=\"alert alert-success\">\n <b>EXERCISE</b>:\n <ul>\n <li>Add one contour (Temperature in Celsius with a dotted linestyle</li>\n <li>Add one filled contour (Temperature Advection in C/hr)</li>\n </ul>\n</div>", "fig, ax = create_map_background()\n\n# Contour 1 - Temperature, dotted\n# Your code here!\n\n# Contour 2\nclev850 = np.arange(0, 4000, 30)\ncs = ax.contour(lon, lat, hght_850, clev850, colors='k',\n linewidths=1.0, linestyles='solid', transform=dataproj)\n\nplt.clabel(cs, fontsize=10, inline=1, inline_spacing=10, fmt='%i',\n rightside_up=True, use_clabeltext=True)\n\n# Filled contours - Temperature advection\ncontours = [-3, -2.2, -2, -1.5, -1, -0.5, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0]\n# Your code here!\n\n# Vector\nax.barbs(lon, lat, uwnd_850.to('kts').m, vwnd_850.to('kts').m,\n regrid_shape=15, transform=dataproj)\n\n# Titles\nplt.title('850-hPa Geopotential Heights, Temperature (C), \\\n Temp Adv (C/h), and Wind Barbs (kts)', loc='left')\nplt.title(f'VALID: {vtime}', loc='right')\n\nplt.tight_layout()\nplt.show()", "<button data-toggle=\"collapse\" data-target=\"#sol5\" class='btn btn-primary'>View Solution</button>\n<div id=\"sol5\" class=\"collapse\">\n<code><pre>\nfig, ax = create_map_background()\n\n\\# Contour 1 - Temperature, dotted\ncs2 = ax.contour(lon, lat, tmpk_850.to('degC'), range(-50, 50, 2),\n colors='grey', linestyles='dotted', transform=dataproj)\n\nplt.clabel(cs2, fontsize=10, inline=1, inline_spacing=10, fmt='%i',\n rightside_up=True, use_clabeltext=True)\n\n\\# Contour 2\nclev850 = np.arange(0, 4000, 30)\ncs = ax.contour(lon, lat, hght_850, clev850, colors='k',\n linewidths=1.0, linestyles='solid', transform=dataproj)\n\nplt.clabel(cs, fontsize=10, inline=1, inline_spacing=10, fmt='%i',\n rightside_up=True, use_clabeltext=True)\n\n\\# Filled contours - Temperature advection\ncontours = [-3, -2.2, -2, -1.5, -1, -0.5, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0]\ncf = ax.contourf(lon, lat, tmpc_adv_850*3600, contours,\n cmap='bwr', extend='both', transform=dataproj)\nplt.colorbar(cf, orientation='horizontal', pad=0, aspect=50,\n extendrect=True, ticks=contours)\n\n\\# Vector\nax.barbs(lon, lat, uwnd_850.to('kts').m, vwnd_850.to('kts').m,\n regrid_shape=15, transform=dataproj)\n\n\\# Titles\nplt.title('850-hPa Geopotential Heights, Temperature (C), \\\n Temp Adv (C/h), and Wind Barbs (kts)', loc='left')\nplt.title(f'VALID: {vtime}', loc='right')\n\nplt.tight_layout()\nplt.show()\n</pre></code>\n</div>\n\n500-hPa Absolute Vorticity\n<div class=\"alert alert-success\">\n <b>EXERCISE</b>:\n <ul>\n <li>Add code for plotting vorticity as filled contours with given levels and colors.</li>\n </ul>\n</div>", "fig, ax = create_map_background()\n\n# Contour 1\nclev500 = np.arange(0, 7000, 60)\ncs = ax.contour(lon, lat, hght_500, clev500, colors='k',\n linewidths=1.0, linestyles='solid', transform=dataproj)\nplt.clabel(cs, fontsize=10, inline=1, inline_spacing=4,\n fmt='%i', rightside_up=True, use_clabeltext=True)\n\n# Filled contours\n# Set contour intervals for Absolute Vorticity\nclevavor500 = [-4, -3, -2, -1, 0, 7, 10, 13, 16, 19,\n 22, 25, 28, 31, 34, 37, 40, 43, 46]\n\n# Set colorfill colors for absolute vorticity\n# purple negative\n# yellow to orange positive\ncolorsavor500 = ('#660066', '#660099', '#6600CC', '#6600FF',\n '#FFFFFF', '#ffE800', '#ffD800', '#ffC800',\n '#ffB800', '#ffA800', '#ff9800', '#ff8800',\n '#ff7800', '#ff6800', '#ff5800', '#ff5000',\n '#ff4000', '#ff3000')\n\n# YOUR CODE HERE!\n\nplt.colorbar(cf, orientation='horizontal', pad=0, aspect=50)\n\n# Vector\nax.barbs(lon, lat, uwnd_500.to('kts').m, vwnd_500.to('kts').m,\n regrid_shape=15, transform=dataproj)\n\n# Titles\nplt.title('500-hPa Geopotential Heights, Absolute Vorticity \\\n (1/s), and Wind Barbs (kts)', loc='left')\nplt.title(f'VALID: {vtime}', loc='right')\n\nplt.tight_layout()\nplt.show()", "<button data-toggle=\"collapse\" data-target=\"#sol6\" class='btn btn-primary'>View Solution</button>\n<div id=\"sol6\" class=\"collapse\">\n<code><pre>\nfig, ax = create_map_background()\n\n\\# Contour 1\nclev500 = np.arange(0, 7000, 60)\ncs = ax.contour(lon, lat, hght_500, clev500, colors='k',\n linewidths=1.0, linestyles='solid', transform=dataproj)\nplt.clabel(cs, fontsize=10, inline=1, inline_spacing=4,\n fmt='%i', rightside_up=True, use_clabeltext=True)\n\n\\# Filled contours\n\\# Set contour intervals for Absolute Vorticity\nclevavor500 = [-4, -3, -2, -1, 0, 7, 10, 13, 16, 19,\n 22, 25, 28, 31, 34, 37, 40, 43, 46]\n\n\\# Set colorfill colors for absolute vorticity\n\\# purple negative\n\\# yellow to orange positive\ncolorsavor500 = ('#660066', '#660099', '#6600CC', '#6600FF',\n '#FFFFFF', '#ffE800', '#ffD800', '#ffC800',\n '#ffB800', '#ffA800', '#ff9800', '#ff8800',\n '#ff7800', '#ff6800', '#ff5800', '#ff5000',\n '#ff4000', '#ff3000')\n\ncf = ax.contourf(lon, lat, avor_500 * 10**5, clevavor500,\n colors=colorsavor500, transform=dataproj)\nplt.colorbar(cf, orientation='horizontal', pad=0, aspect=50)\n\n\\# Vector\nax.barbs(lon, lat, uwnd_500.to('kts').m, vwnd_500.to('kts').m,\n regrid_shape=15, transform=dataproj)\n\n\\# Titles\nplt.title('500-hPa Geopotential Heights, Absolute Vorticity \\\n (1/s), and Wind Barbs (kts)', loc='left')\nplt.title(f'VALID: {vtime}', loc='right')\n\nplt.tight_layout()\nplt.show()\n</pre></code>\n</div>\n\n300-hPa Wind Speed, Divergence, and Ageostrophic Wind\n<div class=\"alert alert-success\">\n <b>EXERCISE</b>:\n <ul>\n <li>Add code to plot 300-hPa Ageostrophic Wind vectors using matplotlib's quiver function.</li>\n </ul>\n</div>", "fig, ax = create_map_background()\n\n# Contour 1\nclev300 = np.arange(0, 11000, 120)\ncs2 = ax.contour(lon, lat, div_300 * 10**5, range(-10, 11, 2),\n colors='grey', transform=dataproj)\nplt.clabel(cs2, fontsize=10, inline=1, inline_spacing=4,\n fmt='%i', rightside_up=True, use_clabeltext=True)\n\n# Contour 2\ncs = ax.contour(lon, lat, hght_300, clev300, colors='k',\n linewidths=1.0, linestyles='solid', transform=dataproj)\nplt.clabel(cs, fontsize=10, inline=1, inline_spacing=4,\n fmt='%i', rightside_up=True, use_clabeltext=True)\n\n# Filled Contours\nspd300 = np.arange(50, 250, 20)\ncf = ax.contourf(lon, lat, wspd_300, spd300, cmap='BuPu',\n transform=dataproj, zorder=0)\nplt.colorbar(cf, orientation='horizontal', pad=0.0, aspect=50)\n\n# Vector of 300-hPa Ageostrophic Wind Vectors\n# Your code goes here!\n\n# Titles\nplt.title('300-hPa Geopotential Heights, Divergence (1/s),\\\n Wind Speed (kts), Ageostrophic Wind Vector (m/s)',\n loc='left')\nplt.title(f'VALID: {vtime}', loc='right')\n\nplt.tight_layout()\nplt.show()", "<button data-toggle=\"collapse\" data-target=\"#sol7\" class='btn btn-primary'>View Solution</button>\n<div id=\"sol7\" class=\"collapse\">\n<code><pre>\nfig, ax = create_map_background()\n\n\\# Contour 1\nclev300 = np.arange(0, 11000, 120)\ncs2 = ax.contour(lon, lat, div_300 * 10**5, range(-10, 11, 2),\n colors='grey', transform=dataproj)\nplt.clabel(cs2, fontsize=10, inline=1, inline_spacing=4,\n fmt='%i', rightside_up=True, use_clabeltext=True)\n\n\\# Contour 2\ncs = ax.contour(lon, lat, hght_300, clev300, colors='k',\n linewidths=1.0, linestyles='solid', transform=dataproj)\nplt.clabel(cs, fontsize=10, inline=1, inline_spacing=4,\n fmt='%i', rightside_up=True, use_clabeltext=True)\n\n\\# Filled Contours\nspd300 = np.arange(50, 250, 20)\ncf = ax.contourf(lon, lat, wspd_300, spd300, cmap='BuPu',\n transform=dataproj, zorder=0)\nplt.colorbar(cf, orientation='horizontal', pad=0.0, aspect=50)\n\n\\# Vector of 300-hPa Ageostrophic Wind Vectors\nax.quiver(lon, lat, uageo_300.m, vageo_300.m, regrid_shape=15,\n pivot='mid', transform=dataproj, zorder=10)\n\n\\# Titles\nplt.title('300-hPa Geopotential Heights, Divergence (1/s),\\\n Wind Speed (kts), Ageostrophic Wind Vector (m/s)',\n loc='left')\nplt.title(f'VALID: {vtime}', loc='right')\n\nplt.tight_layout()\nplt.show()\n</pre></code>\n</div>\n\nVorticity Tendency Terms\nHere is an example of a four-panel plot for a couple of terms in the Vorticity Tendency equation\nUpper-left Panel: Planetary Vorticity Advection\nUpper-right Panel: Relative Vorticity Advection\nLower-left Panel: Absolute Vorticity Advection\nLower-right Panel: Stretching Vorticity", "fig=plt.figure(1,figsize=(21.,16.))\n\n# Upper-Left Panel\nax=plt.subplot(221,projection=plotproj)\nax.set_extent([-125.,-73,25.,50.],ccrs.PlateCarree())\nax.coastlines('50m', linewidth=0.75)\nax.add_feature(cfeature.STATES,linewidth=0.5)\n\n# Contour #1\nclev500 = np.arange(0,7000,60)\ncs = ax.contour(lon,lat,hght_500,clev500,colors='k',\n linewidths=1.0,linestyles='solid',transform=dataproj)\nplt.clabel(cs, fontsize=10, inline=1, inline_spacing=3, fmt='%i', rightside_up=True, use_clabeltext=True)\n\n# Contour #2\ncs2 = ax.contour(lon,lat,f*10**4,np.arange(0,3,.05),colors='grey',\n linewidths=1.0,linestyles='dashed',transform=dataproj)\nplt.clabel(cs2, fontsize=10, inline=1, inline_spacing=3, fmt='%.2f', rightside_up=True, use_clabeltext=True)\n\n# Colorfill\ncf = ax.contourf(lon,lat,f_adv*10**10,np.arange(-10,11,0.5),\n cmap='PuOr_r',extend='both',transform=dataproj)\nplt.colorbar(cf, orientation='horizontal',pad=0.0,aspect=50,extendrect=True)\n\n# Vector\nax.barbs(lon,lat,uwnd_500.to('kts').m,vwnd_500.to('kts').m,regrid_shape=15,transform=dataproj)\n\n# Titles\nplt.title(r'500-hPa Geopotential Heights, Planetary Vorticity Advection ($*10^{10}$ 1/s^2)',loc='left')\nplt.title('VALID: %s' %(vtime),loc='right')\n\n\n\n# Upper-Right Panel\nax=plt.subplot(222,projection=plotproj)\nax.set_extent([-125.,-73,25.,50.],ccrs.PlateCarree())\nax.coastlines('50m', linewidth=0.75)\nax.add_feature(cfeature.STATES, linewidth=0.5)\n\n# Contour #1\nclev500 = np.arange(0,7000,60)\ncs = ax.contour(lon,lat,hght_500,clev500,colors='k',\n linewidths=1.0,linestyles='solid',transform=dataproj)\nplt.clabel(cs, fontsize=10, inline=1, inline_spacing=3, fmt='%i', rightside_up=True, use_clabeltext=True)\n\n# Contour #2\ncs2 = ax.contour(lon,lat,vor_500*10**5,np.arange(-40,41,4),colors='grey',\n linewidths=1.0,transform=dataproj)\nplt.clabel(cs2, fontsize=10, inline=1, inline_spacing=3, fmt='%d', rightside_up=True, use_clabeltext=True)\n\n# Colorfill\ncf = ax.contourf(lon,lat,relvort_adv*10**8,np.arange(-5,5.5,0.5),\n cmap='BrBG',extend='both',transform=dataproj)\nplt.colorbar(cf, orientation='horizontal',pad=0.0,aspect=50,extendrect=True)\n\n# Vector\nax.barbs(lon,lat,uwnd_500.to('kts').m,vwnd_500.to('kts').m,regrid_shape=15,transform=dataproj)\n\n# Titles\nplt.title(r'500-hPa Geopotential Heights, Relative Vorticity Advection ($*10^{8}$ 1/s^2)',loc='left')\nplt.title('VALID: %s' %(vtime),loc='right')\n\n\n\n# Lower-Left Panel\nax=plt.subplot(223,projection=plotproj)\nax.set_extent([-125.,-73,25.,50.],ccrs.PlateCarree())\nax.coastlines('50m', linewidth=0.75)\nax.add_feature(cfeature.STATES, linewidth=0.5)\n\n# Contour #1\nclev500 = np.arange(0,7000,60)\ncs = ax.contour(lon,lat,hght_500,clev500,colors='k',\n linewidths=1.0,linestyles='solid',transform=dataproj)\nplt.clabel(cs, fontsize=10, inline=1, inline_spacing=3, fmt='%i', rightside_up=True, use_clabeltext=True)\n\n# Contour #2\ncs2 = ax.contour(lon,lat,avor_500*10**5,np.arange(-5,41,4),colors='grey',\n linewidths=1.0,transform=dataproj)\nplt.clabel(cs2, fontsize=10, inline=1, inline_spacing=3, fmt='%d', rightside_up=True, use_clabeltext=True)\n\n# Colorfill\ncf = ax.contourf(lon,lat,absvort_adv*10**8,np.arange(-5,5.5,0.5),\n cmap='RdBu',extend='both',transform=dataproj)\nplt.colorbar(cf, orientation='horizontal',pad=0.0,aspect=50,extendrect=True)\n\n# Vector\nax.barbs(lon,lat,uwnd_500.to('kts').m,vwnd_500.to('kts').m,regrid_shape=15,transform=dataproj)\n\n# Titles\nplt.title(r'500-hPa Geopotential Heights, Absolute Vorticity Advection ($*10^{8}$ 1/s^2)',loc='left')\nplt.title('VALID: %s' %(vtime),loc='right')\n\n\n\n# Lower-Right Panel\nax=plt.subplot(224,projection=plotproj)\nax.set_extent([-125.,-73,25.,50.],ccrs.PlateCarree())\nax.coastlines('50m', linewidth=0.75)\nax.add_feature(cfeature.STATES, linewidth=0.5)\n\n# Contour #1\nclev500 = np.arange(0,7000,60)\ncs = ax.contour(lon,lat,hght_500,clev500,colors='k',\n linewidths=1.0,linestyles='solid',transform=dataproj)\nplt.clabel(cs, fontsize=10, inline=1, inline_spacing=3, fmt='%i', rightside_up=True, use_clabeltext=True)\n\n# Contour #2\ncs2 = ax.contour(lon,lat,gaussian_filter(avor_500*10**5,sigma=1.0),np.arange(-5,41,4),colors='grey',\n linewidths=1.0,transform=dataproj)\nplt.clabel(cs2, fontsize=10, inline=1, inline_spacing=3, fmt='%d', rightside_up=True, use_clabeltext=True)\n\n# Colorfill\ncf = ax.contourf(lon,lat,gaussian_filter(stretch_vort*10**9,sigma=1.0),np.arange(-15,16,1),\n cmap='PRGn',extend='both',transform=dataproj)\nplt.colorbar(cf, orientation='horizontal',pad=0.0,aspect=50,extendrect=True)\n\n# Vector\nax.barbs(lon,lat,uwnd_500.to('kts').m,vwnd_500.to('kts').m,regrid_shape=15,transform=dataproj)\n\n# Titles\nplt.title(r'500-hPa Geopotential Heights, Stretching Vorticity ($*10^{9}$ 1/s^2)',loc='left')\nplt.title('VALID: %s' %(vtime),loc='right')\n\nplt.tight_layout()\nplt.show()", "Plotting Data for Hand Calculation\nCalculating dynamic quantities with a computer is great and can allow for many different educational opportunities, but there are times when we want students to calculate those quantities by hand. So can we plot values of geopotential height, u-component of the wind, and v-component of the wind on a map? Yes! And its not too hard to do.\nSince we are using NARR data, we'll plot every third point to get a roughly 1 degree by 1 degree separation of grid points and thus an average grid spacing of 111 km (not exact, but close enough for back of the envelope calculations).\nTo do our plotting we'll be using the functionality of MetPy to plot station plot data, but we'll use our gridded data to plot around our points. To do this we'll have to make or 2D data into 1D (which is made easy by the ravel() method associated with our data objects).\nFirst we'll want to set some bounds (so that we only plot what we want) and create a mask to make plotting easier.\nSecond we'll set up our figure with a projection and then set up our \"stations\" at the grid points we desire using the MetPy class StationPlot\nhttps://unidata.github.io/MetPy/latest/api/generated/metpy.plots.StationPlot.html#metpy.plots.StationPlot\nThird we'll plot our points using matplotlibs scatter() function and use our stationplot object to plot data around our \"stations\"", "# Set lat/lon bounds for region to plot data\nLLlon = -104\nLLlat = 33\nURlon = -94\nURlat = 38.1\n\n# Set up mask so that you only plot what you want\nskip_points = (slice(None, None, 3), slice(None, None, 3))\nmask_lon = ((lon[skip_points].ravel() > LLlon + 0.05) & (lon[skip_points].ravel() < URlon + 0.01))\nmask_lat = ((lat[skip_points].ravel() < URlat - 0.01) & (lat[skip_points].ravel() > LLlat - 0.01))\nmask = mask_lon & mask_lat", "<div class=\"alert alert-success\">\n <b>EXERCISE</b>:\n <ul>\n <li>Plot markers and data around the markers.</li>\n </ul>\n</div>", "# Set up plot basics and use StationPlot class from MetPy to help with plotting\nfig = plt.figure(figsize=(14, 8))\nax = plt.subplot(111,projection=ccrs.LambertConformal(central_latitude=50,central_longitude=-107))\nax.set_extent([LLlon,URlon,LLlat,URlat],ccrs.PlateCarree())\nax.coastlines('50m', edgecolor='grey', linewidth=0.75)\nax.add_feature(cfeature.STATES, edgecolor='grey', linewidth=0.5)\n\n# Set up station plotting using only every third element from arrays for plotting\nstationplot = StationPlot(ax, lon[skip_points].ravel()[mask],\n lat[skip_points].ravel()[mask],\n transform=ccrs.PlateCarree(), fontsize=12)\n\n# Plot markers then data around marker for calculation purposes\n# Your code goes here!\n\n# Title\nplt.title('Geopotential (m; top), U-wind (m/s; Lower Left), V-wind (m/s; Lower Right)')\n\nplt.tight_layout()\nplt.show()", "<button data-toggle=\"collapse\" data-target=\"#sol8\" class='btn btn-primary'>View Solution</button>\n<div id=\"sol8\" class=\"collapse\">\n<code><pre>\n# Set up plot basics and use StationPlot class from MetPy to help with plotting\nfig = plt.figure(figsize=(14, 8))\nproj = ccrs.LambertConformal(central_latitude=50, central_longitude=-107)\nax = plt.subplot(111, projection=proj)\nax.coastlines('50m', edgecolor='grey', linewidth=0.75)\nax.add_feature(cfeature.STATES, edgecolor='grey', linewidth=0.5)\n\n\\# Set up station plotting using only every third\n\\# element from arrays for plotting\nstationplot = StationPlot(ax, lon[::3, ::3].ravel()[mask],\n lat[::3, ::3].ravel()[mask],\n transform=ccrs.PlateCarree(), fontsize=12)\n\n\\# Plot markers then data around marker for calculation purposes\nax.scatter(lon[::3, ::3].ravel()[mask], lat[::3, ::3].ravel()[mask],\n marker='o', transform=dataproj)\nstationplot.plot_parameter((0, 1), hght_500[::3, ::3].ravel()[mask])\nstationplot.plot_parameter((-1.5, -1), uwnd_500[::3, ::3].ravel()[mask],\n formatter='.1f')\nstationplot.plot_parameter((1.5, -1), vwnd_500[::3, ::3].ravel()[mask],\n formatter='.1f')\n\n\\# Title\nplt.title('Geopotential (m; top), U-wind (m/s; Lower Left), \\\n V-wind (m/s; Lower Right)')\n\nplt.tight_layout()\nplt.show()\n</pre></code>\n</div>" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
letsgoexploring/economicData
business-cycle-data/python/.ipynb_checkpoints/business_cycle_data-checkpoint.ipynb
mit
[ "U.S. Business Cycle Data\nThis notebook downloads, manages, and exports several data series for studying business cycles in the US. Four files are created in the csv directory:\nFile name | Description |\n---------------------------------------------|------------------------------------------------------|\nrbc_data_actual_trend.csv | RBC data with actual and trend values |\nrbc_data_actual_trend_cycle.csv | RBC data with actual, trend, and cycle values |\nbusiness_cycle_data_actual_trend.csv | Larger data set with actual and trend values |\nbusiness_cycle_data_actual_trend_cycle.csv | Larger data set with actual, trend, and cycle values |\nThe first two files are useful for studying basic RBC models. The second two contain all of the RBC data plus money, inflation, and inflation data.", "import pandas as pd\nimport numpy as np\nimport fredpy as fp\nimport matplotlib.pyplot as plt\n\nplt.style.use('classic')\n%matplotlib inline\n\n# Export path: Set to empty string '' if you want to export data to current directory\nexport_path = '../Csv/'\n\n# Load FRED API key\nfp.api_key = fp.load_api_key('fred_api_key.txt')", "Download and manage data\nDownload the following series from FRED:\nFRED series ID | Name | Frequency |\n---------------|------|-----------|\nGDP | Gross Domestic Product | Q |\nPCEC | Personal Consumption Expenditures | Q |\nGPDI | Gross Private Domestic Investment | Q |\nGCE | Government Consumption Expenditures and Gross Investment | Q |\nEXPGS | Exports of Goods and Services | Q |\nIMPGS | Imports of Goods and Services | Q |\nNETEXP | Net Exports of Goods and Services | Q |\nHOANBS | Nonfarm Business Sector: Hours Worked for All Employed Persons | Q |\nGDPDEF | Gross Domestic Product: Implicit Price Deflator | Q |\nPCECTPI | Personal Consumption Expenditures: Chain-type Price Index | Q |\nCPIAUCSL | Consumer Price Index for All Urban Consumers: All Items in U.S. City Average | M |\nM2SL | M2 | M |\nTB3MS | 3-Month Treasury Bill Secondary Market Rate | M |\nUNRATE | Unemployment Rate | M |\nMonthly series (M2, T-Bill, unemployment rate) are converted to quarterly frequencies. CPI and PCE inflation rates are computed as the percent change in the indices over the previous year. GDP, consumption, investment, government expenditures, net exports and M2 are deflated by the GDP deflator. The data ranges for nataional accounts series (GDP, consumption, investment, government expenditures, net exports) and hours are equalized to the largest common date range.", "# Download data\ngdp = fp.series('GDP')\nconsumption = fp.series('PCEC')\ninvestment = fp.series('GPDI')\ngovernment = fp.series('GCE')\nexports = fp.series('EXPGS')\nimports = fp.series('IMPGS')\nnet_exports = fp.series('NETEXP')\nhours = fp.series('HOANBS')\ndeflator = fp.series('GDPDEF')\npce_deflator = fp.series('PCECTPI')\ncpi = fp.series('CPIAUCSL')\nm2 = fp.series('M2SL')\ntbill_3mo = fp.series('TB3MS')\nunemployment = fp.series('UNRATE')\n\n# Base year for NIPA deflators\ncpi_base_year = cpi.units.split(' ')[1].split('=')[0]\n\n# Base year for CPI\nnipa_base_year = deflator.units.split(' ')[1].split('=')[0]\n\n# Convert monthly M2, 3-mo T-Bill, and unemployment to quarterly\nm2 = m2.as_frequency('Q')\ntbill_3mo = tbill_3mo.as_frequency('Q')\nunemployment = unemployment.as_frequency('Q')\ncpi = cpi.as_frequency('Q')\n\n# Deflate GDP, consumption, investment, government expenditures, net exports, and m2 with the GDP deflator\ndef deflate(series,deflator):\n \n deflator, series = fp.window_equalize([deflator, series])\n series = series.divide(deflator).times(100)\n \n return series\n\ngdp = deflate(gdp,deflator)\nconsumption = deflate(consumption,deflator)\ninvestment = deflate(investment,deflator)\ngovernment = deflate(government,deflator)\nnet_exports = deflate(net_exports,deflator)\nexports = deflate(exports,deflator)\nimports = deflate(imports,deflator)\nm2 = deflate(m2,deflator)\n\n# pce inflation as percent change over past year\npce_deflator = pce_deflator.apc()\n\n# cpi inflation as percent change over past year\ncpi = cpi.apc()\n\n# GDP deflator inflation as percent change over past year\ndeflator = deflator.apc()\n\n# Convert unemployment, 3-mo T-Bill, pce inflation, cpi inflation, GDP deflator inflation data to rates \nunemployment = unemployment.divide(100)\ntbill_3mo = tbill_3mo.divide(100)\npce_deflator = pce_deflator.divide(100)\ncpi = cpi.divide(100)\ndeflator = deflator.divide(100)\n\n# Make sure that the RBC data has the same data range\ngdp,consumption,investment,government,exports,imports,net_exports,hours = fp.window_equalize([gdp,consumption,investment,government,exports,imports,net_exports,hours])\n\n# T-Bill data doesn't neet to go all the way back to 1930s\ntbill_3mo = tbill_3mo.window([gdp.data.index[0],'2222'])\n\nmetadata = pd.Series(dtype=str,name='Values')\nmetadata['nipa_base_year'] = nipa_base_year\nmetadata['cpi_base_year'] = cpi_base_year\n\nmetadata.to_csv(export_path+'/business_cycle_metadata.csv')", "Compute capital stock for US using the perpetual inventory method\nNext, compute the quarterly capital stock series for the US using the perpetual inventory method. The discrete-time Solow growth model is given by:\n\\begin{align}\nY_t & = A_tK_t^{\\alpha}L_t^{1-\\alpha} \\tag{1}\\\nC_t & = (1-s)Y_t \\tag{2}\\\nY_t & = C_t + I_t \\tag{3}\\\nK_{t+1} & = I_t + (1-\\delta)K_t \\tag{4}\\\nA_{t+1} & = (1+g)A_t \\tag{5}\\\nL_{t+1} & = (1+n)L_t \\tag{6}.\n\\end{align}\nHere the model is assumed to be quarterly so $n$ is the quarterly growth rate of labor hours, $g$ is the quarterly growth rate of TFP, and $\\delta$ is the quarterly rate of depreciation of the capital stock. Given a value of the quarterly depreciation rate $\\delta$, an investment series $I_t$, and an initial capital stock $K_0$, the law of motion for the capital stock, Equation (4), can be used to compute an implied capital series. But we don't know $K_0$ or $\\delta$ so we'll have to calibrate these values using statistics computed from the data that we've already obtained.\nLet lowercase letters denote a variable that's been divided by $A_t^{1/(1-\\alpha)}L_t$. E.g.,\n\\begin{align}\ny_t = \\frac{Y_t}{A_t^{1/(1-\\alpha)}L_t}\\tag{7}\n\\end{align}\nThen (after substituting consumption from the model), the scaled version of the model can be written as: \n\\begin{align}\ny_t & = k_t^{\\alpha} \\tag{8}\\\ni_t & = sy_t \\tag{9}\\\nk_{t+1} & = i_t + (1-\\delta-n-g')k_t,\\tag{10}\n\\end{align}\nwhere $g' = g/(1-\\alpha)$ is the growth rate of $A_t^{1/(1-\\alpha)}$. In the steady state:\n\\begin{align}\nk & = \\left(\\frac{s}{\\delta+n+g'}\\right)^{\\frac{1}{1-\\alpha}} \\tag{11}\n\\end{align}\nwhich means that the ratio of capital to output is constant:\n\\begin{align}\n\\frac{k}{y} & = \\frac{s}{\\delta+n+g'} \\tag{12}\n\\end{align}\nand therefore the steady state ratio of depreciation to output is:\n\\begin{align}\n\\overline{\\delta K/ Y} & = \\frac{\\delta s}{\\delta + n + g'} \\tag{13}\n\\end{align}\nwhere $\\overline{\\delta K/ Y}$ is the long-run average ratio of depreciation to output. We can use Equation (13) to calibrate $\\delta$ given $\\overline{\\delta K/ Y}$, $s$, $n$, and $g'$.\nFurthermore, in the steady state, the growth rate of output is constant:\n\\begin{align}\n\\frac{\\Delta Y}{Y} & = n + g' \\tag{14}\n\\end{align} \n\nAssume $\\alpha = 0.35$.\nCalibrate $s$ as the average of ratio of investment to GDP.\nCalibrate $n$ as the average quarterly growth rate of labor hours.\nCalibrate $g'$ as the average quarterly growth rate of real GDP minus n.\nCalculate the average ratio of depreciation to GDP $\\overline{\\delta K/ Y}$ and use the result to calibrate $\\delta$. That is, find the average ratio of Current-Cost Depreciation of Fixed Assets (FRED series ID: M1TTOTL1ES000) to GDP (FRED series ID: GDPA). Then calibrate $\\delta$ from the following steady state relationship:\n\\begin{align}\n\\delta & = \\frac{\\left( \\overline{\\delta K/ Y} \\right)\\left(n + g' \\right)}{s - \\left( \\overline{\\delta K/ Y} \\right)} \\tag{15}\n\\end{align}\nCalibrate $K_0$ by asusming that the capital stock is initially equal to its steady state value:\n\\begin{align}\nK_0 & = \\left(\\frac{s}{\\delta + n + g'}\\right) Y_0 \\tag{16}\n\\end{align}\n\nThen, armed with calibrated values for $K_0$ and $\\delta$, compute $K_1, K_2, \\ldots$ recursively. See Timothy Kehoe's notes for more information on the perpetual inventory method:\nhttp://users.econ.umn.edu/~tkehoe/classes/GrowthAccountingNotes.pdf", "# Set the capital share of income\nalpha = 0.35\n\n# Average saving rate\ns = np.mean(investment.data/gdp.data)\n\n# Average quarterly labor hours growth rate\nn = (hours.data[-1]/hours.data[0])**(1/(len(hours.data)-1)) - 1\n\n# Average quarterly real GDP growth rate\ng = ((gdp.data[-1]/gdp.data[0])**(1/(len(gdp.data)-1)) - 1) - n\n\n# Compute annual depreciation rate\ndepA = fp.series('M1TTOTL1ES000')\ngdpA = fp.series('gdpa')\n\ngdpA = gdpA.window([gdp.data.index[0],gdp.data.index[-1]])\ngdpA,depA = fp.window_equalize([gdpA,depA])\n\ndeltaKY = np.mean(depA.data/gdpA.data)\ndelta = (n+g)*deltaKY/(s-deltaKY)\n\n# print calibrated values:\nprint('Avg saving rate: ',round(s,5))\nprint('Avg annual labor growth:',round(4*n,5))\nprint('Avg annual gdp growth: ',round(4*g,5))\nprint('Avg annual dep rate: ',round(4*delta,5))\n\n# Construct the capital series. Note that the GPD and investment data are reported on an annualized basis\n# so divide by 4 to get quarterly data.\ncapital = np.zeros(len(gdp.data))\ncapital[0] = gdp.data[0]/4*s/(n+g+delta)\n\nfor t in range(len(gdp.data)-1):\n capital[t+1] = investment.data[t]/4 + (1-delta)*capital[t]\n\n# Save in a fredpy series\ncapital = fp.to_fred_series(data = capital,dates =gdp.data.index,units = gdp.units,title='Capital stock of the US',frequency='Quarterly')", "Compute total factor productivity\nUse the Cobb-Douglas production function:\n\\begin{align}\nY_t & = A_tK_t^{\\alpha}L_t^{1-\\alpha} \\tag{17}\n\\end{align}\nand data on GDP, capital, and hours with $\\alpha=0.35$ to compute an implied series for $A_t$.", "# Compute TFP\ntfp = gdp.data/capital.data**alpha/hours.data**(1-alpha)\ntfp = fp.to_fred_series(data = tfp,dates =gdp.data.index,units = gdp.units,title='TFP of the US',frequency='Quarterly')", "Additional data management\nNow that we have used the aggregate production data to compute an implied capital stock and TFP, we can scale the production data and M2 by the population.", "# Convert real GDP, consumption, investment, government expenditures, net exports and M2\n# into thousands of dollars per civilian 16 and over\ngdp = gdp.per_capita(civ_pop=True).times(1000)\nconsumption = consumption.per_capita(civ_pop=True).times(1000)\ninvestment = investment.per_capita(civ_pop=True).times(1000)\ngovernment = government.per_capita(civ_pop=True).times(1000)\nexports = exports.per_capita(civ_pop=True).times(1000)\nimports = imports.per_capita(civ_pop=True).times(1000)\nnet_exports = net_exports.per_capita(civ_pop=True).times(1000)\nhours = hours.per_capita(civ_pop=True).times(1000)\ncapital = capital.per_capita(civ_pop=True).times(1000)\nm2 = m2.per_capita(civ_pop=True).times(1000)\n\n\n# Scale hours per person to equal 100 on October (Quarter III) of GDP deflator base year.\nhours.data = hours.data/hours.data.loc[base_year+'-10-01']*100", "Plot aggregate data", "fig, axes = plt.subplots(3,4,figsize=(6*4,4*3))\n\naxes[0][0].plot(gdp.data)\naxes[0][0].set_title('GDP')\naxes[0][0].set_ylabel('Thousands of '+base_year+' $')\n\naxes[0][1].plot(consumption.data)\naxes[0][1].set_title('Consumption')\naxes[0][1].set_ylabel('Thousands of '+base_year+' $')\n\naxes[0][2].plot(investment.data)\naxes[0][2].set_title('Investment')\naxes[0][2].set_ylabel('Thousands of '+base_year+' $')\n\naxes[0][3].plot(government.data)\naxes[0][3].set_title('Gov expenditure')\naxes[0][3].set_ylabel('Thousands of '+base_year+' $')\n\naxes[1][0].plot(capital.data)\naxes[1][0].set_title('Capital')\naxes[1][0].set_ylabel('Thousands of '+base_year+' $')\n\naxes[1][1].plot(hours.data)\naxes[1][1].set_title('Hours')\naxes[1][1].set_ylabel('Index ()'+base_year+'=100)')\n\naxes[1][2].plot(tfp.data)\naxes[1][2].set_title('TFP')\n\naxes[1][3].plot(m2.data)\naxes[1][3].set_title('M2')\naxes[1][3].set_ylabel('Thousands of '+base_year+' $')\n\naxes[2][0].plot(tbill_3mo.data*100)\naxes[2][0].set_title('3mo T-Bill')\naxes[2][0].set_ylabel('Percent')\n\naxes[2][1].plot(pce_deflator.data*100)\naxes[2][1].set_title('PCE Inflation')\naxes[2][1].set_ylabel('Percent')\n\naxes[2][2].plot(cpi.data*100)\naxes[2][2].set_title('CPI Inflation')\naxes[2][2].set_ylabel('Percent')\n\naxes[2][3].plot(unemployment.data*100)\naxes[2][3].set_title('Unemployment rate')\naxes[2][3].set_ylabel('Percent');", "Compute HP filter of data", "# HP filter to isolate trend and cyclical components\ngdp_log_cycle,gdp_log_trend= gdp.log().hp_filter()\nconsumption_log_cycle,consumption_log_trend= consumption.log().hp_filter()\ninvestment_log_cycle,investment_log_trend= investment.log().hp_filter()\ngovernment_log_cycle,government_log_trend= government.log().hp_filter()\nexports_log_cycle,exports_log_trend= exports.log().hp_filter()\nimports_log_cycle,imports_log_trend= imports.log().hp_filter()\n# net_exports_log_cycle,net_exports_log_trend= net_exports.log().hp_filter()\ncapital_log_cycle,capital_log_trend= capital.log().hp_filter()\nhours_log_cycle,hours_log_trend= hours.log().hp_filter()\ntfp_log_cycle,tfp_log_trend= tfp.log().hp_filter()\ndeflator_cycle,deflator_trend= deflator.hp_filter()\npce_deflator_cycle,pce_deflator_trend= pce_deflator.hp_filter()\ncpi_cycle,cpi_trend= cpi.hp_filter()\nm2_log_cycle,m2_log_trend= m2.log().hp_filter()\ntbill_3mo_cycle,tbill_3mo_trend= tbill_3mo.hp_filter()\nunemployment_cycle,unemployment_trend= unemployment.hp_filter()", "Plot aggregate data with trends", "fig, axes = plt.subplots(3,4,figsize=(6*4,4*3))\n\naxes[0][0].plot(gdp.data)\naxes[0][0].plot(np.exp(gdp_log_trend.data),c='r')\naxes[0][0].set_title('GDP')\naxes[0][0].set_ylabel('Thousands of '+base_year+' $')\n\naxes[0][1].plot(consumption.data)\naxes[0][1].plot(np.exp(consumption_log_trend.data),c='r')\naxes[0][1].set_title('Consumption')\naxes[0][1].set_ylabel('Thousands of '+base_year+' $')\n\naxes[0][2].plot(investment.data)\naxes[0][2].plot(np.exp(investment_log_trend.data),c='r')\naxes[0][2].set_title('Investment')\naxes[0][2].set_ylabel('Thousands of '+base_year+' $')\n\naxes[0][3].plot(government.data)\naxes[0][3].plot(np.exp(government_log_trend.data),c='r')\naxes[0][3].set_title('Gov expenditure')\naxes[0][3].set_ylabel('Thousands of '+base_year+' $')\n\naxes[1][0].plot(capital.data)\naxes[1][0].plot(np.exp(capital_log_trend.data),c='r')\naxes[1][0].set_title('Capital')\naxes[1][0].set_ylabel('Thousands of '+base_year+' $')\n\naxes[1][1].plot(hours.data)\naxes[1][1].plot(np.exp(hours_log_trend.data),c='r')\naxes[1][1].set_title('Hours')\naxes[1][1].set_ylabel('Index ()'+base_year+'=100)')\n\naxes[1][2].plot(tfp.data)\naxes[1][2].plot(np.exp(tfp_log_trend.data),c='r')\naxes[1][2].set_title('TFP')\n\naxes[1][3].plot(m2.data)\naxes[1][3].plot(np.exp(m2_log_trend.data),c='r')\naxes[1][3].set_title('M2')\naxes[1][3].set_ylabel('Thousands of '+base_year+' $')\n\naxes[2][0].plot(tbill_3mo.data*100)\naxes[2][0].plot(tbill_3mo_trend.data*100,c='r')\naxes[2][0].set_title('3mo T-Bill')\naxes[2][0].set_ylabel('Percent')\n\naxes[2][1].plot(pce_deflator.data*100)\naxes[2][1].plot(pce_deflator_trend.data*100,c='r')\naxes[2][1].set_title('PCE Inflation')\naxes[2][1].set_ylabel('Percent')\n\naxes[2][2].plot(cpi.data*100)\naxes[2][2].plot(cpi_trend.data*100,c='r')\naxes[2][2].set_title('CPI Inflation')\naxes[2][2].set_ylabel('Percent')\n\naxes[2][3].plot(unemployment.data*100)\naxes[2][3].plot(unemployment_trend.data*100,c='r')\naxes[2][3].set_title('Unemployment rate')\naxes[2][3].set_ylabel('Percent')\n\n\nax = fig.add_subplot(1,1,1)\nax.axis('off')\nax.plot(0,0,label='Actual')\nax.plot(0,0,c='r',label='Trend')\n\nax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),ncol=2)", "Plot cyclical components of the data", "fig, axes = plt.subplots(3,4,figsize=(6*4,4*3))\n\naxes[0][0].plot(gdp_log_cycle.data)\naxes[0][0].set_title('GDP')\naxes[0][0].set_ylabel('Thousands of '+base_year+' $')\n\naxes[0][1].plot(consumption_log_cycle.data)\naxes[0][1].set_title('Consumption')\naxes[0][1].set_ylabel('Thousands of '+base_year+' $')\n\naxes[0][2].plot(investment_log_cycle.data)\naxes[0][2].set_title('Investment')\naxes[0][2].set_ylabel('Thousands of '+base_year+' $')\n\naxes[0][3].plot(government_log_cycle.data)\naxes[0][3].set_title('Gov expenditure')\naxes[0][3].set_ylabel('Thousands of '+base_year+' $')\n\naxes[1][0].plot(capital_log_cycle.data)\naxes[1][0].set_title('Capital')\naxes[1][0].set_ylabel('Thousands of '+base_year+' $')\n\naxes[1][1].plot(hours_log_cycle.data)\naxes[1][1].set_title('Hours')\naxes[1][1].set_ylabel('Index ()'+base_year+'=100)')\n\naxes[1][2].plot(tfp_log_cycle.data)\naxes[1][2].set_title('TFP')\n\naxes[1][3].plot(m2_log_cycle.data)\naxes[1][3].set_title('M2')\naxes[1][3].set_ylabel('Thousands of '+base_year+' $')\n\naxes[2][0].plot(tbill_3mo_cycle.data)\naxes[2][0].set_title('3mo T-Bill')\naxes[2][0].set_ylabel('Percent')\n\naxes[2][1].plot(pce_deflator_cycle.data)\naxes[2][1].set_title('PCE Inflation')\naxes[2][1].set_ylabel('Percent')\n\naxes[2][2].plot(cpi_cycle.data)\naxes[2][2].set_title('CPI Inflation')\naxes[2][2].set_ylabel('Percent')\n\naxes[2][3].plot(unemployment_cycle.data)\naxes[2][3].set_title('Unemployment rate')\naxes[2][3].set_ylabel('Percent');", "Create data files", "# Create a DataFrame with actual and trend data\ndata = pd.DataFrame({\n 'gdp':gdp.data,\n 'gdp_trend':np.exp(gdp_log_trend.data),\n 'gdp_cycle':gdp_log_cycle.data,\n 'consumption':consumption.data,\n 'consumption_trend':np.exp(consumption_log_trend.data),\n 'consumption_cycle':consumption_log_cycle.data,\n 'investment':investment.data,\n 'investment_trend':np.exp(investment_log_trend.data),\n 'investment_cycle':investment_log_cycle.data,\n 'government':government.data,\n 'government_trend':np.exp(government_log_trend.data),\n 'government_cycle':government_log_cycle.data,\n 'exports':exports.data,\n 'exports_trend':np.exp(exports_log_trend.data),\n 'exports_cycle':exports_log_cycle.data,\n 'imports':imports.data,\n 'imports_trend':np.exp(imports_log_trend.data),\n 'imports_cycle':imports_log_cycle.data,\n 'hours':hours.data,\n 'hours_trend':np.exp(hours_log_trend.data),\n 'hours_cycle':hours_log_cycle.data,\n 'capital':capital.data,\n 'capital_trend':np.exp(capital_log_trend.data),\n 'capital_cycle':capital_log_cycle.data,\n 'tfp':tfp.data,\n 'tfp_trend':np.exp(tfp_log_trend.data),\n 'tfp_cycle':tfp_log_cycle.data,\n 'real_m2':m2.data,\n 'real_m2_trend':np.exp(m2_log_trend.data),\n 'real_m2_cycle':m2_log_cycle.data,\n 't_bill_3mo':tbill_3mo.data,\n 't_bill_3mo_trend':tbill_3mo_trend.data,\n 't_bill_3mo_cycle':tbill_3mo_cycle.data,\n 'cpi_inflation':cpi.data,\n 'cpi_inflation_trend':cpi_trend.data,\n 'cpi_inflation_cycle':cpi_cycle.data,\n 'pce_inflation':pce_deflator.data,\n 'pce_inflation_trend':pce_deflator_trend.data,\n 'pce_inflation_cycle':pce_deflator_cycle.data,\n 'unemployment':unemployment.data,\n 'unemployment_trend':unemployment_trend.data,\n 'unemployment_cycle':unemployment_cycle.data,\n })\n\n# RBC data\ncolumns_ordered =[]\nnames = ['gdp','consumption','investment','hours','capital','tfp']\nfor name in names:\n columns_ordered.append(name)\n columns_ordered.append(name+'_trend')\n \ndata[columns_ordered].dropna().to_csv(export_path+'rbc_data_actual_trend.csv',index=True)\n\n# Create a DataFrame with actual, trend, and cycle data\ncolumns_ordered =[]\nnames = ['gdp','consumption','investment','hours','capital','tfp']\nfor name in names:\n columns_ordered.append(name)\n columns_ordered.append(name+'_trend')\n columns_ordered.append(name+'_cycle')\n \ndata[columns_ordered].dropna().to_csv(export_path+'rbc_data_actual_trend_cycle.csv',index=True)\n\n# More comprehensive Business Cycle Data\ncolumns_ordered =[]\nnames = ['gdp','consumption','investment','hours','capital','tfp','real_m2','t_bill_3mo','pce_inflation','unemployment']\nfor name in names:\n columns_ordered.append(name)\n columns_ordered.append(name+'_trend')\n\ndata[columns_ordered].dropna().to_csv(export_path+'business_cycle_data_actual_trend.csv',index=True)\n\n# Create a DataFrame with actual, trend, and cycle data\ncolumns_ordered =[]\nnames = ['gdp','consumption','investment','hours','capital','tfp','real_m2','t_bill_3mo','pce_inflation','unemployment']\nfor name in names:\n columns_ordered.append(name)\n columns_ordered.append(name+'_trend')\n columns_ordered.append(name+'_cycle')\n \ndata[columns_ordered].dropna().to_csv(export_path+'business_cycle_data_actual_trend_cycle.csv')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
dmittov/misc
BikeSharing-Linear.ipynb
apache-2.0
[ "Linear methods\nhttps://www.kaggle.com/c/bike-sharing-demand", "# !pip install -U kaggle\n# register the token in you kaggle profile & save it to ~/.kaggle/kaggle.json\n# !kaggle competitions download -c bike-sharing-demand\n\nimport pandas as pd\nfrom sklearn import linear_model\nfrom scipy import stats\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, make_scorer\nimport numpy as np\nfrom dateutil.parser import parse\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.model_selection import cross_val_score\n%matplotlib inline\n\n# to prevent warining inside sklearn code\npd.options.mode.chained_assignment = None\n\ndf = pd.read_csv(\"train.csv\")\ndf.shape\n\ndf.head()", "At first, we need custom score function, described in task.\nhttps://www.kaggle.com/c/bike-sharing-demand/overview/evaluation\n\nWhy do we need +1 in score function?", "def rmsle(y_true, y_pred):\n y_pred_clipped = np.clip(y_pred, 0., None)\n return mean_squared_error(np.log1p(y_true), np.log1p(y_pred_clipped)) ** .5", "What happens without np.clip?\n\nLet's start with the exisiting features and simple linear regression.\nAll that feature extractors and grid search would be more clear further.", "class SimpleFeatureExtractor(BaseEstimator, TransformerMixin):\n \n def fit(self, X, y=None):\n return self \n \n def transform(self, X, y=None):\n return X[[\"holiday\", \"workingday\", \"season\", \"weather\", \"temp\", \"atemp\", \"humidity\", \"windspeed\"]].values\n\nexctractor = SimpleFeatureExtractor()\nclf = Pipeline([\n (\"extractor\", exctractor),\n (\"regression\", linear_model.LinearRegression()),\n])\nparam_grid = {}\nscorerer = make_scorer(rmsle, greater_is_better=False)\nresearcher = GridSearchCV(clf, param_grid, scoring=scorerer, cv=5, n_jobs=4, verbose=1, refit=False)\nresearcher.fit(df, df[\"count\"].values)", "Hyperparameters Searcher always maximizes the score function, so if we need to decrease it, it just adds the minus.", "researcher.best_score_", "Add regularization and grid search the hyperparameters\nNow it's more clear why we have Grid Searcher ;-)", "exctractor = SimpleFeatureExtractor()\nclf = Pipeline([\n (\"extractor\", exctractor),\n (\"regression\", linear_model.ElasticNet()),\n])\n\nparam_grid = {\n \"regression__alpha\": np.logspace(-3, 2, 10),\n \"regression__l1_ratio\": np.linspace(0, 1, 10)\n}\nscorerer = make_scorer(rmsle, greater_is_better=False)\nresearcher = GridSearchCV(clf, param_grid, scoring=scorerer, cv=5, n_jobs=4, verbose=1, refit=False)\nresearcher.fit(df, df[\"count\"].values)\n\nresearcher.best_score_\n\nresearcher.best_params_", "Try to add some custom features", "class FeatureExtractor(BaseEstimator, TransformerMixin):\n \n ohe = OneHotEncoder(categories='auto', sparse=False)\n scaler = StandardScaler()\n \n categorical_columns = [\"week_day\", \"hour\", \"season\", \"weather\"]\n numerical_columns = [\"temp\", \"atemp\", \"humidity\", \"windspeed\"]\n \n def _add_features(self, X):\n X[\"week_day\"] = X.datetime.apply(lambda dttm: parse(dttm).weekday())\n X[\"hour\"] = X.datetime.apply(lambda dttm: parse(dttm).hour)\n \n def _combine(self, *feature_groups):\n return np.hstack(feature_groups)\n \n def collect_stats(self, X):\n self._add_features(X)\n self.ohe.fit(X[self.categorical_columns])\n self.scaler.fit(X[self.numerical_columns])\n \n def fit(self, X, y=None):\n return self \n \n def transform(self, X, y=None):\n self._add_features(X)\n custom_binary_features = self.ohe.transform(X[self.categorical_columns])\n scaled_features = self.scaler.transform(X[self.numerical_columns])\n return self._combine(\n custom_binary_features, \n scaled_features,\n X[[\"holiday\", \"workingday\"]].values\n ) \n\nexctractor = FeatureExtractor()\nexctractor.collect_stats(df)\nclf = Pipeline([\n (\"extractor\", exctractor),\n (\"regression\", linear_model.ElasticNet()),\n])\n\nparam_grid = {\n \"regression__alpha\": np.logspace(-3, 2, 10),\n \"regression__l1_ratio\": np.linspace(0, 1, 10)\n}\npd.options.mode.chained_assignment = None\nscorerer = make_scorer(rmsle, greater_is_better=False)\nresearcher = GridSearchCV(clf, param_grid, scoring=scorerer, cv=5, n_jobs=4, verbose=1, refit=False)\nresearcher.fit(df, df[\"count\"].values)\n\nresearcher.best_score_\n\nresearcher.best_params_\n\nscorerer = make_scorer(mean_squared_error, greater_is_better=False)\nscores = cross_val_score(clf, df, df[\"count\"].values, cv=5, n_jobs=4, scoring=scorerer)\nnp.mean((-np.array(scores)) ** .5)", "What we can theoretically get if we optimize RMSE", "param_grid = {\n \"regression__alpha\": np.logspace(-3, 2, 10),\n \"regression__l1_ratio\": np.linspace(0, 1, 10)\n}\npd.options.mode.chained_assignment = None\n\ndef rmse(y_true, y_pred):\n return mean_squared_error(y_true, y_pred) ** .5\n\nscorerer = make_scorer(rmse, greater_is_better=False)\nresearcher = GridSearchCV(clf, param_grid, scoring=scorerer, cv=5, n_jobs=4, verbose=1, refit=False)\nresearcher.fit(df, df[\"count\"].values)\n\nresearcher.best_score_\n\nresearcher.best_params_", "11 min!!! Now we also learn FeaureExtractor every time and the pipeline becomes heavier. Why? Can you speed it up?\n\nWhat was the point about Maximum Likelihood\nThe process is described by possion distribution better\nhttps://en.wikipedia.org/wiki/Poisson_distribution\nIn probability theory and statistics, the Poisson distribution (French pronunciation: ​[pwasɔ̃]; in English often rendered /ˈpwɑːsɒn/), named after French mathematician Siméon Denis Poisson, is a discrete probability distribution that expresses the probability of a given number of events occurring in a fixed interval of time or space if these events occur with a known constant rate and independently of the time since the last event.[1] The Poisson distribution can also be used for the number of events in other specified intervals such as distance, area or volume.\nThe other point of view: we have 200 people with 3% probability to pick up the bike.\nWhat about CLT??? It works when $n \\rightarrow \\inf$. For poisson distribution there is a special case called De Moivre–Laplace theorem.\nThe list of different kinds of Generalized Linear Regression methods in sklearn: https://scikit-learn.org/stable/modules/linear_model.html\nAnd there is no Poisson regression there.\nSo, let's write a probabilistic model for poisson distribution and optimize maximum likelihood.\nHausaufgaben: try to do it.\nHint: \nstart from the assumption $\\hat{y} = \\exp{\\langle x, \\theta \\rangle}$ and find the derivative of log-likelihood by $\\theta$. It's zero + check the sign of the second derivative.\nThe conclusion: we can simulate poisson regression with simple wrapper.\nPoisson hierarchical regression\nCheck if we have issues with np.log(y == 0)", "df[df[\"count\"] == 0]\n\nnp.log(0)\n\nclass PoissonRegression(linear_model.ElasticNet):\n \n def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,\n normalize=False, precompute=False, max_iter=1000,\n copy_X=True, tol=1e-4, warm_start=False, positive=False,\n random_state=None, selection='cyclic'):\n super().__init__(alpha, l1_ratio, fit_intercept, normalize, precompute, max_iter,\n copy_X, tol, warm_start, positive, random_state, selection)\n \n def fit(self, X, y, *args):\n return super().fit(X, np.log(y), *args)\n \n def predict(self, X):\n return np.exp(super().predict(X))\n\nexctractor = FeatureExtractor()\nexctractor.collect_stats(df)\nclf = Pipeline([\n (\"extractor\", exctractor),\n (\"regression\", PoissonRegression()),\n])\nparam_grid = {\n \"regression__alpha\": np.logspace(-5, 1, 20),\n \"regression__l1_ratio\": np.linspace(0, 1, 10)\n}\npd.options.mode.chained_assignment = None\nscorerer = make_scorer(rmsle, greater_is_better=False)\nresearcher = GridSearchCV(clf, param_grid, scoring=scorerer, cv=5, n_jobs=4, verbose=1, refit=False)\nresearcher.fit(df, df[\"count\"].values)\n\nresearcher.best_params_\n\nresearcher.best_score_", "In terms of MSE the score is worse. But it doesn't mean MSE is the most relevant metric. At least poisson regression never predicts negative values.\n\nWhen you expect poisson regression to have better MSE score?", "scorerer = make_scorer(mean_squared_error, greater_is_better=False)\nscores = cross_val_score(clf, df, df[\"count\"].values, cv=5, n_jobs=4, scoring=scorerer)\nnp.mean((-np.array(scores)) ** .5)", "Skill vs Education\nWhen you need to predict counts, try to use Poisson Regression.\nYou can get good enough results with experience, but you can't handle on just your skills when face a new type of tasks. More complicated tasks you have less your previous experience can help you.\nThe key to success is to have good enough education. With education you can do research.", "df_test = pd.read_csv(\"test.csv\")\ncols = df_test.columns\n\nall_data = pd.concat([df[cols], df_test[cols]])\n\nexctractor = FeatureExtractor()\nexctractor.collect_stats(all_data)\nclf = Pipeline([\n (\"extractor\", exctractor),\n (\"regression\", PoissonRegression(alpha=0.001623776739188721, l1_ratio=0.1111111111111111)),\n])\nclf.fit(df, df[\"count\"].values)\ndf_test[\"count\"] = clf.predict(df_test)\n\ndf_test[[\"datetime\",\"count\"]].set_index(\"datetime\").to_csv(\"linear.csv\")\n\n# !kaggle competitions submit -f linear.csv -m \"linear regression\" bike-sharing-demand\n# score 0.64265", "Further steps: use Random Forest Regressor & Catboost to get into top 10%." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
billzhao1990/CS231n-Spring-2017
assignment2/BatchNormalization.ipynb
mit
[ "Batch Normalization\nOne way to make deep networks easier to train is to use more sophisticated optimization procedures such as SGD+momentum, RMSProp, or Adam. Another strategy is to change the architecture of the network to make it easier to train. One idea along these lines is batch normalization which was recently proposed by [3].\nThe idea is relatively straightforward. Machine learning methods tend to work better when their input data consists of uncorrelated features with zero mean and unit variance. When training a neural network, we can preprocess the data before feeding it to the network to explicitly decorrelate its features; this will ensure that the first layer of the network sees data that follows a nice distribution. However even if we preprocess the input data, the activations at deeper layers of the network will likely no longer be decorrelated and will no longer have zero mean or unit variance since they are output from earlier layers in the network. Even worse, during the training process the distribution of features at each layer of the network will shift as the weights of each layer are updated.\nThe authors of [3] hypothesize that the shifting distribution of features inside deep neural networks may make training deep networks more difficult. To overcome this problem, [3] proposes to insert batch normalization layers into the network. At training time, a batch normalization layer uses a minibatch of data to estimate the mean and standard deviation of each feature. These estimated means and standard deviations are then used to center and normalize the features of the minibatch. A running average of these means and standard deviations is kept during training, and at test time these running averages are used to center and normalize features.\nIt is possible that this normalization strategy could reduce the representational power of the network, since it may sometimes be optimal for certain layers to have features that are not zero-mean or unit variance. To this end, the batch normalization layer includes learnable shift and scale parameters for each feature dimension.\n[3] Sergey Ioffe and Christian Szegedy, \"Batch Normalization: Accelerating Deep Network Training by Reducing\nInternal Covariate Shift\", ICML 2015.", "# As usual, a bit of setup\nfrom __future__ import print_function\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom cs231n.classifiers.fc_net import *\nfrom cs231n.data_utils import get_CIFAR10_data\nfrom cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array\nfrom cs231n.solver import Solver\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# for auto-reloading external modules\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2\n\ndef rel_error(x, y):\n \"\"\" returns relative error \"\"\"\n return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))\n\n# Load the (preprocessed) CIFAR10 data.\n\ndata = get_CIFAR10_data()\nfor k, v in data.items():\n print('%s: ' % k, v.shape)", "Batch normalization: Forward\nIn the file cs231n/layers.py, implement the batch normalization forward pass in the function batchnorm_forward. Once you have done so, run the following to test your implementation.", "# Check the training-time forward pass by checking means and variances\n# of features both before and after batch normalization\n\n# Simulate the forward pass for a two-layer network\nnp.random.seed(231)\nN, D1, D2, D3 = 200, 50, 60, 3\nX = np.random.randn(N, D1)\nW1 = np.random.randn(D1, D2)\nW2 = np.random.randn(D2, D3)\na = np.maximum(0, X.dot(W1)).dot(W2)\n\nprint('Before batch normalization:')\nprint(' means: ', a.mean(axis=0))\nprint(' stds: ', a.std(axis=0))\n\n# Means should be close to zero and stds close to one\nprint('After batch normalization (gamma=1, beta=0)')\na_norm, _ = batchnorm_forward(a, np.ones(D3), np.zeros(D3), {'mode': 'train'})\nprint(' mean: ', a_norm.mean(axis=0))\nprint(' std: ', a_norm.std(axis=0))\n\n# Now means should be close to beta and stds close to gamma\ngamma = np.asarray([1.0, 2.0, 3.0])\nbeta = np.asarray([11.0, 12.0, 13.0])\na_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})\nprint('After batch normalization (nontrivial gamma, beta)')\nprint(' means: ', a_norm.mean(axis=0))\nprint(' stds: ', a_norm.std(axis=0))\n\n# Check the test-time forward pass by running the training-time\n# forward pass many times to warm up the running averages, and then\n# checking the means and variances of activations after a test-time\n# forward pass.\nnp.random.seed(231)\nN, D1, D2, D3 = 200, 50, 60, 3\nW1 = np.random.randn(D1, D2)\nW2 = np.random.randn(D2, D3)\n\nbn_param = {'mode': 'train'}\ngamma = np.ones(D3)\nbeta = np.zeros(D3)\nfor t in range(50):\n X = np.random.randn(N, D1)\n a = np.maximum(0, X.dot(W1)).dot(W2)\n batchnorm_forward(a, gamma, beta, bn_param)\nbn_param['mode'] = 'test'\nX = np.random.randn(N, D1)\na = np.maximum(0, X.dot(W1)).dot(W2)\na_norm, _ = batchnorm_forward(a, gamma, beta, bn_param)\n\n# Means should be close to zero and stds close to one, but will be\n# noisier than training-time forward passes.\nprint('After batch normalization (test-time):')\nprint(' means: ', a_norm.mean(axis=0))\nprint(' stds: ', a_norm.std(axis=0))", "Batch Normalization: backward\nNow implement the backward pass for batch normalization in the function batchnorm_backward.\nTo derive the backward pass you should write out the computation graph for batch normalization and backprop through each of the intermediate nodes. Some intermediates may have multiple outgoing branches; make sure to sum gradients across these branches in the backward pass.\nOnce you have finished, run the following to numerically check your backward pass.", "# Gradient check batchnorm backward pass\nnp.random.seed(231)\nN, D = 4, 5\nx = 5 * np.random.randn(N, D) + 12\ngamma = np.random.randn(D)\nbeta = np.random.randn(D)\ndout = np.random.randn(N, D)\n\nbn_param = {'mode': 'train'}\nfx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0]\nfg = lambda a: batchnorm_forward(x, a, beta, bn_param)[0]\nfb = lambda b: batchnorm_forward(x, gamma, b, bn_param)[0]\n\ndx_num = eval_numerical_gradient_array(fx, x, dout)\nda_num = eval_numerical_gradient_array(fg, gamma.copy(), dout)\ndb_num = eval_numerical_gradient_array(fb, beta.copy(), dout)\n\n_, cache = batchnorm_forward(x, gamma, beta, bn_param)\ndx, dgamma, dbeta = batchnorm_backward(dout, cache)\n\nprint('dx error: ', rel_error(dx_num, dx))\nprint('dgamma error: ', rel_error(da_num, dgamma))\nprint('dbeta error: ', rel_error(db_num, dbeta))", "Batch Normalization: alternative backward (OPTIONAL, +3 points extra credit)\nIn class we talked about two different implementations for the sigmoid backward pass. One strategy is to write out a computation graph composed of simple operations and backprop through all intermediate values. Another strategy is to work out the derivatives on paper. For the sigmoid function, it turns out that you can derive a very simple formula for the backward pass by simplifying gradients on paper.\nSurprisingly, it turns out that you can also derive a simple expression for the batch normalization backward pass if you work out derivatives on paper and simplify. After doing so, implement the simplified batch normalization backward pass in the function batchnorm_backward_alt and compare the two implementations by running the following. Your two implementations should compute nearly identical results, but the alternative implementation should be a bit faster.\nNOTE: This part of the assignment is entirely optional, but we will reward 3 points of extra credit if you can complete it.", "np.random.seed(231)\nN, D = 100, 500\nx = 5 * np.random.randn(N, D) + 12\ngamma = np.random.randn(D)\nbeta = np.random.randn(D)\ndout = np.random.randn(N, D)\n\nbn_param = {'mode': 'train'}\nout, cache = batchnorm_forward(x, gamma, beta, bn_param)\n\nt1 = time.time()\ndx1, dgamma1, dbeta1 = batchnorm_backward(dout, cache)\nt2 = time.time()\ndx2, dgamma2, dbeta2 = batchnorm_backward_alt(dout, cache)\nt3 = time.time()\n\nprint('dx difference: ', rel_error(dx1, dx2))\nprint('dgamma difference: ', rel_error(dgamma1, dgamma2))\nprint('dbeta difference: ', rel_error(dbeta1, dbeta2))\nprint('speedup: %.2fx' % ((t2 - t1) / (t3 - t2)))", "Fully Connected Nets with Batch Normalization\nNow that you have a working implementation for batch normalization, go back to your FullyConnectedNet in the file cs2312n/classifiers/fc_net.py. Modify your implementation to add batch normalization.\nConcretely, when the flag use_batchnorm is True in the constructor, you should insert a batch normalization layer before each ReLU nonlinearity. The outputs from the last layer of the network should not be normalized. Once you are done, run the following to gradient-check your implementation.\nHINT: You might find it useful to define an additional helper layer similar to those in the file cs231n/layer_utils.py. If you decide to do so, do it in the file cs231n/classifiers/fc_net.py.", "np.random.seed(231)\nN, D, H1, H2, C = 2, 15, 20, 30, 10\nX = np.random.randn(N, D)\ny = np.random.randint(C, size=(N,))\n\nfor reg in [0, 3.14]:\n print('Running check with reg = ', reg)\n model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,\n reg=reg, weight_scale=5e-2, dtype=np.float64,\n use_batchnorm=True)\n\n loss, grads = model.loss(X, y)\n print('Initial loss: ', loss)\n\n for name in sorted(grads):\n f = lambda _: model.loss(X, y)[0]\n grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)\n print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))\n if reg == 0: print()", "Batchnorm for deep networks\nRun the following to train a six-layer network on a subset of 1000 training examples both with and without batch normalization.", "np.random.seed(231)\n# Try training a very deep net with batchnorm\nhidden_dims = [100, 100, 100, 100, 100]\n\nnum_train = 1000\nsmall_data = {\n 'X_train': data['X_train'][:num_train],\n 'y_train': data['y_train'][:num_train],\n 'X_val': data['X_val'],\n 'y_val': data['y_val'],\n}\n\nweight_scale = 2e-2\nbn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)\nmodel = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)\n\nbn_solver = Solver(bn_model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=True, print_every=200)\nbn_solver.train()\n\nsolver = Solver(model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=True, print_every=200)\nsolver.train()", "Run the following to visualize the results from two networks trained above. You should find that using batch normalization helps the network to converge much faster.", "plt.subplot(3, 1, 1)\nplt.title('Training loss')\nplt.xlabel('Iteration')\n\nplt.subplot(3, 1, 2)\nplt.title('Training accuracy')\nplt.xlabel('Epoch')\n\nplt.subplot(3, 1, 3)\nplt.title('Validation accuracy')\nplt.xlabel('Epoch')\n\nplt.subplot(3, 1, 1)\nplt.plot(solver.loss_history, 'o', label='baseline')\nplt.plot(bn_solver.loss_history, 'o', label='batchnorm')\n\nplt.subplot(3, 1, 2)\nplt.plot(solver.train_acc_history, '-o', label='baseline')\nplt.plot(bn_solver.train_acc_history, '-o', label='batchnorm')\n\nplt.subplot(3, 1, 3)\nplt.plot(solver.val_acc_history, '-o', label='baseline')\nplt.plot(bn_solver.val_acc_history, '-o', label='batchnorm')\n \nfor i in [1, 2, 3]:\n plt.subplot(3, 1, i)\n plt.legend(loc='upper center', ncol=4)\nplt.gcf().set_size_inches(15, 15)\nplt.show()", "Batch normalization and initialization\nWe will now run a small experiment to study the interaction of batch normalization and weight initialization.\nThe first cell will train 8-layer networks both with and without batch normalization using different scales for weight initialization. The second layer will plot training accuracy, validation set accuracy, and training loss as a function of the weight initialization scale.", "np.random.seed(231)\n# Try training a very deep net with batchnorm\nhidden_dims = [50, 50, 50, 50, 50, 50, 50]\n\nnum_train = 1000\nsmall_data = {\n 'X_train': data['X_train'][:num_train],\n 'y_train': data['y_train'][:num_train],\n 'X_val': data['X_val'],\n 'y_val': data['y_val'],\n}\n\nbn_solvers = {}\nsolvers = {}\nweight_scales = np.logspace(-4, 0, num=20)\nfor i, weight_scale in enumerate(weight_scales):\n print('Running weight scale %d / %d' % (i + 1, len(weight_scales)))\n bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)\n model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)\n\n bn_solver = Solver(bn_model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=False, print_every=200)\n bn_solver.train()\n bn_solvers[weight_scale] = bn_solver\n\n solver = Solver(model, small_data,\n num_epochs=10, batch_size=50,\n update_rule='adam',\n optim_config={\n 'learning_rate': 1e-3,\n },\n verbose=False, print_every=200)\n solver.train()\n solvers[weight_scale] = solver\n\n# Plot results of weight scale experiment\nbest_train_accs, bn_best_train_accs = [], []\nbest_val_accs, bn_best_val_accs = [], []\nfinal_train_loss, bn_final_train_loss = [], []\n\nfor ws in weight_scales:\n best_train_accs.append(max(solvers[ws].train_acc_history))\n bn_best_train_accs.append(max(bn_solvers[ws].train_acc_history))\n \n best_val_accs.append(max(solvers[ws].val_acc_history))\n bn_best_val_accs.append(max(bn_solvers[ws].val_acc_history))\n \n final_train_loss.append(np.mean(solvers[ws].loss_history[-100:]))\n bn_final_train_loss.append(np.mean(bn_solvers[ws].loss_history[-100:]))\n \nplt.subplot(3, 1, 1)\nplt.title('Best val accuracy vs weight initialization scale')\nplt.xlabel('Weight initialization scale')\nplt.ylabel('Best val accuracy')\nplt.semilogx(weight_scales, best_val_accs, '-o', label='baseline')\nplt.semilogx(weight_scales, bn_best_val_accs, '-o', label='batchnorm')\nplt.legend(ncol=2, loc='lower right')\n\nplt.subplot(3, 1, 2)\nplt.title('Best train accuracy vs weight initialization scale')\nplt.xlabel('Weight initialization scale')\nplt.ylabel('Best training accuracy')\nplt.semilogx(weight_scales, best_train_accs, '-o', label='baseline')\nplt.semilogx(weight_scales, bn_best_train_accs, '-o', label='batchnorm')\nplt.legend()\n\nplt.subplot(3, 1, 3)\nplt.title('Final training loss vs weight initialization scale')\nplt.xlabel('Weight initialization scale')\nplt.ylabel('Final training loss')\nplt.semilogx(weight_scales, final_train_loss, '-o', label='baseline')\nplt.semilogx(weight_scales, bn_final_train_loss, '-o', label='batchnorm')\nplt.legend()\nplt.gca().set_ylim(1.0, 3.5)\n\nplt.gcf().set_size_inches(10, 15)\nplt.show()", "Question:\nDescribe the results of this experiment, and try to give a reason why the experiment gave the results that it did.\nAnswer:" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
phievo/phievo
Examples/immune/Analyse_pMHC.ipynb
lgpl-3.0
[ "Analyse Run\nThis is a template notebook to browse the results of a evolution simulation.\nWARNING: THIS IS THE IMMUNE ADD-ON. THIS NOTEBOOK SHOULD BE MOVED TO BE RUN IN THE SAME DIRECTORY AS run_evolution\nPlease Restart & Run All to make shure you start with a clean notebook.\nEnter the path of the project here (necessary to load the addons)", "import os,sys\n#sys.path.append(\"immune\")\nsys.path.append(\"example_immune\")", "Import required libraries", "%matplotlib notebook\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom ipywidgets import widgets \nfrom ipywidgets import interact, interactive, fixed\nfrom IPython.display import display,HTML,clear_output\n\nHTML('''<script>code_show=true;function code_toggle() {if (code_show){$('div.input').hide();} else {$('div.input').show();} code_show = !code_show} $( document ).ready(code_toggle);</script><form action=\"javascript:code_toggle()\"><input type=\"submit\" value=\"Click here to toggle on/off the raw code.\"></form>''')\nfrom Immune import Add_ons_pMHC\nimport phievo.AnalysisTools as AT\nfrom phievo.AnalysisTools.Notebook import Notebook\n\nnotebook = Notebook()\nnotebook.run_dynamics_pMHC = Add_ons_pMHC.Run_Dynamics_pMHC(notebook)\nnotebook.plot_pMHC = Add_ons_pMHC.Plot_pMHC(notebook)\nnotebook.plot_layout_immune = Add_ons_pMHC.Plot_Layout_Immune(notebook)\n", "Select the Project", "notebook.select_project.display()", "Select Seed", "notebook.select_seed.display()", "Plot observable\nFor our immune simulations, the fitness is the mutual information between output concentrations (taken as a probability distribution) and binding time. An ideal fitness is $-1$.", "notebook.plot_evolution_observable.display()", "Select Generation", "notebook.select_generation.display()", "PLot Layout\nThe Layout of the network for immune accounts for the new interactions defined. $0$ represents the ligand, $1$ the receptor. They interact to form complex, that can be phosphorylated/dephosphorylated (black arrows, indexed with the corresponding kinase or phosphatase). All other species are either kinases or phosphatastases. Arrows with $1/\\tau$ correspond to kinetic proofreading steps.", "notebook.plot_layout_immune.display()", "Run Dynamics", "notebook.run_dynamics_pMHC.display()", "Plot Response function\nThe response function for Immune displays the concentration of all species at the end of simulation as a function of the number of ligands presented. The output is the solid line. Left column is for binding time $\\tau=3s$, right column for binding time $\\tau=10s$. The ideal case such as ``adaptive sorting\" corresponds to horizontal lines for the Output, at different levels for different the $\\tau$ s", "notebook.plot_pMHC.display()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
JasonSanchez/w261
exams/MIDS-MidTerm.ipynb
mit
[ "MIDS Machine Learning at Scale\nMidTerm Exam\n4:00PM - 6:00PM(CT)\nOctober 19, 2016 \nMidterm\nMIDS Machine Learning at Scale\nPlease insert your contact information here\nInsert you name here : Jason Sanchez\nInsert you email here : [email protected] \nInsert your UC Berkeley ID here: 26989981", "import numpy as np\nfrom __future__ import division\n\n%reload_ext autoreload\n%autoreload 2", "Exam Instructions\n\n: Please insert Name and Email address in the first cell of this notebook\n: Please acknowledge receipt of exam by sending a quick email reply to the instructor\n\n: Review the submission form first to scope it out (it will take a 5-10 minutes to input your \n answers and other information into this form): \n\nExam Submission form \n\n\n\n: Please keep all your work and responses in ONE (1) notebook only (and submit via the submission form)\n\n: Please make sure that the NBViewer link for your Submission notebook works \n\n: Please submit your solutions and notebook via the following form:\n\nExam Submission form\n\n\n\n: For the midterm you will need access to MrJob and Jupyter on your local machines or on AltaScale/AWS to complete some of the questions (like fill in the code to do X).\n\n\n: As for question types:\n\nKnowledge test Programmatic/doodle (take photos; embed the photos in your notebook) \nAll programmatic questions can be run locally on your laptop (using MrJob only) or on the cluster\n\n\n\n: This is an open book exam meaning you can consult webpages and textbooks, class notes, slides etc. but you can not discuss with each other or any other person/group. If any collusion, then this will result in a zero grade and will be grounds for dismissal from the entire program. Please complete this exam by yourself within the time limit. \n\n\nExam questions begins here\n===Map-Reduce===\nMT1. Which of the following statememts about map-reduce are true?\n(I) If you only have 1 computer with 1 computing core, then map-reduce is unlikely to help \n(II) If we run map-reduce using N single-core computers, then it is likely to get at least an N-Fold speedup compared to using 1 computer \n(III) Because of network latency and other overhead associated with map-reduce, if we run map-reduce using N computers, then we will get less than N-Fold speedup compared to using 1 computer \n(IV) When using map-reduce for learning a naive Bayes classifier for SPAM classification, we usually use a single machine that accumulates the partial class and word stats from each of the map machines, in order to compute the final model.\nPlease select one from the following that is most correct:\n\n(a) I, II, III, IV\n(b) I, III, IV\n(c) I, III\n(d) I,II, III\n\nC\n===Order inversion===\nMT2. normalized product co-occurrence\nSuppose you wish to write a MapReduce job that creates normalized product co-occurrence (i.e., pairs of products that have been purchased together) data form a large transaction file of shopping baskets. In addition, we want the relative frequency of coocurring products. Given this scenario, to ensure that all (potentially many) reducers\nreceive appropriate normalization factors (denominators)for a product\nin the most effcient order in their input streams (so as to minimize memory overhead on the reducer side), \nthe mapper should emit/yield records according to which pattern for the product occurence totals: \n(a) emit (*,product) count \n(b) There is no need to use order inversion here \n(c) emit (product,*) count \n(d) None of the above \nA\n===Map-Reduce===\nMT3. What is the input to the Reduce function in MRJob? Select the most correct choice.\n(a) An arbitrarily sized list of key/value pairs. \n(b) One key and a list of some values associated with that key\n(c) One key and a list of all values associated with that key. \n(d) None of the above \nC\n(Although it is not a list, but a generator)\n===Bayesian document classification=== \nMT4. When building a Bayesian document classifier, Laplace smoothing serves what purpose?\n(a) It allows you to use your training data as your validation data. \n(b) It prevents zero-products in the posterior distribution.\n(c) It accounts for words that were missed by regular expressions. \n(d) None of the above \nB\nMT5. Big Data\nBig data is defined as the voluminous amount of structured, unstructured or semi-structured data that has huge potential for mining but is so large that it cannot be processed nor stored using traditional (single computer) computing and storage systems. Big data is characterized by its high velocity, volume and variety that requires cost effective and innovative methods for information processing to draw meaningful business insights. More than the volume of the data – it is the nature of the data that defines whether it is considered as Big Data or not. What do the four V’s of Big Data denote? Here is a potential simple explanation for each of the four critical features of big data (some or all of which is correct):\nStatements \n* (I) Volume –Scale of data\n* (II) Velocity – Batch processing of data offline\n* (III)Variety – Different forms of data\n* (IV) Veracity –Uncertainty of data\nWhich combination of the above statements is correct. Select a single correct response from the following :\n\n(a) I, II, III, IV\n(b) I, III, IV\n(c) I, III\n(d) I,II, III\n\nB\nMT6. Combiners can be integral to the successful utilization of the Hadoop shuffle.\nUsing combiners result in what? \n\n(I) minimization of reducer workload \n(II) minimization of disk storage for mapper results \n(III) minimization of network traffic \n(IV) none of the above \n\nSelect most correct option (i.e., select one option only) from the following:\n\n(a) I \n(b) I, II and III \n(c) II and III \n(d) IV \n\nB (uncertain)\nPairwise similarity using K-L divergence\nIn probability theory and information theory, the Kullback–Leibler divergence \n(also information divergence, information gain, relative entropy, KLIC, or KL divergence) \nis a non-symmetric measure of the difference between two probability distributions P and Q. \nSpecifically, the Kullback–Leibler divergence of Q from P, denoted DKL(P\\‖Q), \nis a measure of the information lost when Q is used to approximate P:\nFor discrete probability distributions P and Q, \nthe Kullback–Leibler divergence of Q from P is defined to be\n+ KLDistance(P, Q) = Sum_over_item_i (P(i) log (P(i) / Q(i))\n\nIn the extreme cases, the KL Divergence is 1 when P and Q are maximally different\nand is 0 when the two distributions are exactly the same (follow the same distribution).\nFor more information on K-L Divergence see:\n+ [K-L Divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence)\n\nFor the next three question we will use an MRjob class for calculating pairwise similarity \nusing K-L Divergence as the similarity measure:\n\nJob 1: create inverted index (assume just two objects)\nJob 2: calculate/accumulate the similarity of each pair of objects using K-L Divergence\n\nUsing the following cells then fill in the code for the first reducer to calculate \nthe K-L divergence of objects (letter documents) in line1 and line2, i.e., KLD(Line1||line2).\nHere we ignore characters which are not alphabetical. And all alphabetical characters are lower-cased in the first mapper.\nUsing the MRJob Class below calculate the KL divergence of the following two string objects.", "%%writefile kltext.txt\n1.Data Science is an interdisciplinary field about processes and systems to extract knowledge or insights from large volumes of data in various forms (data in various forms, data in various forms, data in various forms), either structured or unstructured,[1][2] which is a continuation of some of the data analysis fields such as statistics, data mining and predictive analytics, as well as Knowledge Discovery in Databases.\n2.Machine learning is a subfield of computer science[1] that evolved from the study of pattern recognition and computational learning theory in artificial intelligence.[1] Machine learning explores the study and construction of algorithms that can learn from and make predictions on data.[2] Such algorithms operate by building a model from example inputs in order to make data-driven predictions or decisions,[3]:2 rather than following strictly static program instructions.", "MRjob class for calculating pairwise similarity using K-L Divergence as the similarity measure\nJob 1: create inverted index (assume just two objects) <P>\nJob 2: calculate the similarity of each pair of objects", "import numpy as np\nnp.log(3)\n\n!cat kltext.txt\n\n%%writefile kldivergence.py\n# coding: utf-8\n\nfrom __future__ import division\nfrom mrjob.job import MRJob\nfrom mrjob.step import MRStep\nimport re\nimport numpy as np\n\nclass kldivergence(MRJob):\n # process each string character by character\n # the relative frequency of each character emitting Pr(character|str)\n # for input record 1.abcbe\n # emit \"a\" [1, 0.2]\n # emit \"b\" [1, 0.4] etc...\n def mapper1(self, _, line):\n index = int(line.split('.',1)[0])\n letter_list = re.sub(r\"[^A-Za-z]+\", '', line).lower()\n count = {}\n for l in letter_list:\n if count.has_key(l):\n count[l] += 1\n else:\n count[l] = 1\n for key in count:\n yield key, [index, count[key]*1.0/len(letter_list)]\n\n # on a component i calculate (e.g., \"b\")\n # Kullback–Leibler divergence of Q from P is defined as (P(i) log (P(i) / Q(i))\n def reducer1(self, key, values):\n p = 0\n q = 0\n for v in values:\n if v[0] == 1: #String 1\n p = v[1]\n else: # String 2\n q = v[1]\n \n if p and q:\n yield (None, p*np.log(p/q))\n\n #Aggegate components \n def reducer2(self, key, values):\n kl_sum = 0\n for value in values:\n kl_sum = kl_sum + value\n yield \"KLDivergence\", kl_sum\n \n def steps(self):\n mr_steps = [self.mr(mapper=self.mapper1,\n reducer=self.reducer1),\n \n self.mr(reducer=self.reducer2)]\n# mr_steps = [MRStep(mapper=self.mapper1, reducer=self.reducer1)]\n return mr_steps\n\nif __name__ == '__main__':\n kldivergence.run()\n\n%reload_ext autoreload\n%autoreload 2\nfrom mrjob.job import MRJob\nfrom kldivergence import kldivergence\n\n#dont forget to save kltext.txt (see earlier cell)\nmr_job = kldivergence(args=['kltext.txt'])\nwith mr_job.make_runner() as runner: \n runner.run()\n # stream_output: get access of the output \n for line in runner.stream_output():\n print mr_job.parse_output_line(line)", "Questions:\nMT7. Which number below is the closest to the result you get for KLD(Line1||line2)?\n(a) 0.7 \n(b) 0.5 \n(c) 0.2 \n(d) 0.1 \nD\nMT8. Which of the following letters are missing from these character vectors?\n(a) p and t \n(b) k and q \n(c) j and q \n(d) j and f", "words = \"\"\"\n1.Data Science is an interdisciplinary field about processes and systems to extract knowledge or insights from large volumes of data in various forms (data in various forms, data in various forms, data in various forms), either structured or unstructured,[1][2] which is a continuation of some of the data analysis fields such as statistics, data mining and predictive analytics, as well as Knowledge Discovery in Databases.\n2.Machine learning is a subfield of computer science[1] that evolved from the study of pattern recognition and computational learning theory in artificial intelligence.[1] Machine learning explores the study and construction of algorithms that can learn from and make predictions on data.[2] Such algorithms operate by building a model from example inputs in order to make data-driven predictions or decisions,[3]:2 rather than following strictly static program instructions.\"\"\"\n\nfor char in ['p', 'k', 'f', 'q', 'j']:\n if char not in words:\n print char", "C", "%%writefile kldivergence_smooth.py\nfrom __future__ import division\nfrom mrjob.job import MRJob\nimport re\nimport numpy as np\nclass kldivergence_smooth(MRJob):\n \n # process each string character by character\n # the relative frequency of each character emitting Pr(character|str)\n # for input record 1.abcbe\n # emit \"a\" [1, (1+1)/(5+24)]\n # emit \"b\" [1, (2+1)/(5+24) etc...\n def mapper1(self, _, line):\n index = int(line.split('.',1)[0])\n letter_list = re.sub(r\"[^A-Za-z]+\", '', line).lower()\n count = {}\n \n # (ni+1)/(n+24)\n \n for l in letter_list:\n if count.has_key(l):\n count[l] += 1\n else:\n count[l] = 1\n \n for letter in ['q', 'j']:\n if letter not in letter_list:\n count[letter] = 0\n \n for key in count:\n yield key, [index, (1+count[key]*1.0)/(24+len(letter_list))]\n\n \n def reducer1(self, key, values):\n p = 0\n q = 0\n for v in values:\n if v[0] == 1:\n p = v[1]\n else:\n q = v[1]\n\n yield (None, p*np.log(p/q)) \n\n # Aggregate components \n def reducer2(self, key, values):\n kl_sum = 0\n for value in values:\n kl_sum = kl_sum + value\n yield \"KLDivergence\", kl_sum\n \n def steps(self):\n return [self.mr(mapper=self.mapper1,\n reducer=self.reducer1),\n self.mr(reducer=self.reducer2)\n \n ]\n\nif __name__ == '__main__':\n kldivergence_smooth.run()\n\n%reload_ext autoreload\n%autoreload 2\n\nfrom kldivergence_smooth import kldivergence_smooth\nmr_job = kldivergence_smooth(args=['kltext.txt'])\nwith mr_job.make_runner() as runner: \n runner.run()\n # stream_output: get access of the output \n for line in runner.stream_output():\n print mr_job.parse_output_line(line)", "MT9. The KL divergence on multinomials is defined only when they have nonzero entries.\nFor zero entries, we have to smooth distributions. Suppose we smooth in this way: \n(ni+1)/(n+24) \nwhere ni is the count for letter i and n is the total count of all letters. \nAfter smoothing, which number below is the closest to the result you get for KLD(Line1||line2)?? \n(a) 0.08 \n(b) 0.71 \n(c) 0.02 \n(d) 0.11 \nA\nMT10. Block size, and mapper tasks\nGiven ten (10) files in the input directory for a Hadoop Streaming job (MRjob or just Hadoop) with the following filesizes (in megabytes): 1, 2,3,4,5,6,7,8,9,10; and a block size of 5M (NOTE: normally we should set the blocksize to 1 GigB using modern computers). How many map tasks will result from processing the data in the input directory? Select the closest number from the following list.\n(a) 1 map task\n (b) 14\n (c) 12 \n (d) None of the above \nB\nMT11. Aggregation\nGiven a purchase transaction log file where each purchase transaction contains the customer identifier, item purchased and much more information about the transaction. Which of the following statements are true about a MapReduce job that performs an “aggregation” such as get the number of transaction per customer.\nStatements\n* (I) A mapper only job will not suffice, as each map tast only gets to see a subset of the data (e.g., one block). As such a mapper only job will only produce intermediate tallys for each customer. \n* (II) A reducer only job will suffice and is most efficient computationally\n* (III) If the developer provides a Mapper and Reducer it can potentially be more efficient than option II\n* (IV) A reducer only job with a custom partitioner will suffice.\nSelect the most correct option from the following:\n\n(a) I, II, III, IV\n(b) II, IV\n(c) III, IV\n(d) III\n\nC\nMT12. Naive Bayes\nWhich of the following statements are true regarding Naive Bayes?\nStatements\n* (I) Naive Bayes is a machine learning algorithm that can be used for classifcation problems only\n* (II) Multinomial Naive Bayes is a flavour of Naive Bayes for discrete input variables and can be combined with Laplace smoothing to avoid zero predictions for class posterior probabilities when attribute value combinations show up during classification but were not present during training. \n* (III) Naive Bayes can be used for continous valued input variables. In this case, one can use Gaussian distributions to model the class conditional probability distributions Pr(X|Class).\n* (IV) Naive Bayes can model continous target variables directly.\nPlease select the single most correct combination from the following:\n\n(a) I, II, III, IV\n(b) I, II, III\n(c) I, III, IV\n(d) I, II\n\nB\nMT13. Naive Bayes SPAM model\nGiven the following document dataset for a Two-Class problem: ham and spam. Use MRJob (please include your code) to build a muiltnomial Naive Bayes classifier. Please use Laplace Smoothing with a hyperparameter of 1. Please use words only (a-z) as features. Please lowercase all words.", "%%writefile spam.txt\n0002.2001-05-25.SA_and_HP\t0\t0\tgood\n0002.2001-05-25.SA_and_HP\t0\t0\tvery good\n0002.2001-05-25.SA_and_HP\t1\t0\tbad\n0002.2001-05-25.SA_and_HP\t1\t0\tvery bad\n0002.2001-05-25.SA_and_HP\t1\t0\tvery bad, very BAD\n\n%%writefile spam_test.txt\n0002.2001-05-25.SA_and_HP\t1\t0\tgood? bad! very Bad! \n\n%%writefile NaiveBayes.py\n\nimport sys\nimport re\nfrom mrjob.job import MRJob\nfrom mrjob.step import MRStep\nfrom mrjob.protocol import TextProtocol, TextValueProtocol\n\n# Prevents broken pipe errors from using ... | head\nfrom signal import signal, SIGPIPE, SIG_DFL\nsignal(SIGPIPE,SIG_DFL) \n\ndef sum_hs(counts):\n h_total, s_total = 0, 0\n for h, s in counts:\n h_total += h\n s_total += s\n return (h_total, s_total)\n\n\nclass NaiveBayes(MRJob):\n MRJob.OUTPUT_PROTOCOL = TextValueProtocol\n\n def mapper(self, _, lines):\n _, spam, subject, email = lines.split(\"\\t\")\n words = re.findall(r'[a-z]+', (email.lower()+\" \"+subject.lower()))\n \n if spam == \"1\":\n h, s = 0, 1\n else:\n h, s = 1, 0 \n yield \"***Total Emails\", (h, s)\n \n for word in words:\n yield word, (h, s)\n yield \"***Total Words\", (h, s)\n \n def combiner(self, key, count):\n yield key, sum_hs(count)\n \n def reducer_init(self):\n self.total_ham = 0\n self.total_spam = 0\n \n def reducer(self, key, count):\n ham, spam = sum_hs(count)\n if key.startswith(\"***\"):\n if \"Words\" in key:\n self.total_ham, self.total_spam = ham, spam\n elif \"Emails\" in key:\n total = ham + spam\n yield \"_\", \"***Priors\\t%.10f\\t%.10f\" % (ham/total, spam/total)\n else:\n pg_ham, pg_spam = ham/self.total_ham, spam/self.total_spam\n yield \"_\", \"%s\\t%.10f\\t%.10f\" % (key, pg_ham, pg_spam)\n \nif __name__ == \"__main__\":\n NaiveBayes.run()\n\n!cat spam.txt | python NaiveBayes.py --jobconf mapred.reduce.tasks=1 -q | head", "QUESTION\nHaving learnt the Naive Bayes text classification model for this problem using the training data and classified the test data (d6) please indicate which of the following is true:\nStatements\n* (I) P(very|ham) = 0.33\n* (II) P(good|ham) = 0.50\n* (I) Posterior Probability P(ham| d6) is approximately 24%\n* (IV) Class of d6 is ham\nPlease select the single most correct combination of these statements from the following:\n\n(a) I, II, III, IV\n(b) I, II, III\n(c) I, III, IV\n(d) I, II\n\nC (wild guess)\nMT14. Is there a map input format (for Hadoop or MRJob)?\n(a) Yes, but only in Hadoop 0.22+. \n(b) Yes, in Hadoop there is a default expectation that each record is delimited by an end of line charcacter and that key is the first token delimited by a tab character and that the value-part is everything after the tab character. \n(c) No, when MRJob INPUT_PROTOCOL = RawValueProtocol. In this case input is processed in format agnostic way thereby avoiding any type of parsing errors. The value is treated as a str, the key is read in as None. \n(d) Both b and c are correct answers. \nD\nMT15. What happens if mapper output does not match reducer input?\n(a) Hadoop API will convert the data to the type that is needed by the reducer. \n(b) Data input/output inconsistency cannot occur. A preliminary validation check is executed prior to the full execution of the job to ensure there is consistency. \n(c) The java compiler will report an error during compilation but the job will complete with exceptions. \n(d) A real-time exception will be thrown and map-reduce job will fail.\nD\nMT16. Why would a developer create a map-reduce without the reduce step?\n(a) Developers should design Map-Reduce jobs without reducers only if no reduce slots are available on the cluster. \n(b) Developers should never design Map-Reduce jobs without reducers. An error will occur upon compile. \n(c) There is a CPU intensive step that occurs between the map and reduce steps. Disabling the reduce step speeds up data processing.\n(d) It is not possible to create a map-reduce job without at least one reduce step. A developer may decide to limit to one reducer for debugging purposes. \nC\n===Gradient descent===\nMT17. Which of the following are true statements with respect to gradient descent for machine learning, where alpha is the learning rate. Select all that apply\n\n(I) To make gradient descent converge, we must slowly decrease alpha over time and use a combiner in the context of Hadoop.\n(II) Gradient descent is guaranteed to find the global minimum for any unconstrained convex objective function J() regardless of using a combiner or not in the context of Hadoop\n(III) Gradient descent can converge even if alpha is kept fixed. (But alpha cannot be too large, or else it may fail to converge.) Combiners will help speed up the process.\n(IV) For the specific choice of cost function J() used in linear regression, there is no local optima (other than the global optimum).\n\nSelect a single correct response from the following:\n* (a) I, II, III, IV\n* (b) I, III, IV\n* (c) II, III\n* (d) II,III, IV\nD\n===Weighted K-means===\nWrite a MapReduce job in MRJob to do the training at scale of a weighted K-means algorithm.\nYou can write your own code or you can use most of the code from the following notebook:\n\nhttp://nbviewer.jupyter.org/urls/dl.dropbox.com/s/oppgyfqxphlh69g/MrJobKmeans_Corrected.ipynb\n\nWeight each example as follows using the inverse vector length (Euclidean norm): \nweight(X)= 1/||X||, \nwhere ||X|| = SQRT(X.X)= SQRT(X1^2 + X2^2)\nHere X is vector made up of two component X1 and X2.\nUsing the following data to answer the following TWO questions:\n\nhttps://www.dropbox.com/s/ai1uc3q2ucverly/Kmeandata.csv?dl=0", "def inverse_vector_length(x1, x2):\n norm = (x1**2 + x2**2)**.5\n return 1.0/norm\n\ninverse_vector_length(1, 5)\n\n0 --> .2\n\n%matplotlib inline\nimport numpy as np\nimport pylab \nimport pandas as pd\n\ndata = pd.read_csv(\"Kmeandata.csv\", header=None)\n\npylab.plot(data[0], data[1], 'o', linewidth=0, alpha=.5);\n\n%%writefile Kmeans.py\nfrom numpy import argmin, array, random\nfrom mrjob.job import MRJob\nfrom mrjob.step import MRStep\nfrom itertools import chain\nimport os\n\n#Calculate find the nearest centroid for data point \ndef MinDist(datapoint, centroid_points):\n datapoint = array(datapoint)\n centroid_points = array(centroid_points)\n diff = datapoint - centroid_points \n diffsq = diff*diff\n # Get the nearest centroid for each instance\n minidx = argmin(list(diffsq.sum(axis = 1)))\n return minidx\n\n#Check whether centroids converge\ndef stop_criterion(centroid_points_old, centroid_points_new,T):\n oldvalue = list(chain(*centroid_points_old))\n newvalue = list(chain(*centroid_points_new))\n Diff = [abs(x-y) for x, y in zip(oldvalue, newvalue)]\n Flag = True\n for i in Diff:\n if(i>T):\n Flag = False\n break\n return Flag\n\nclass MRKmeans(MRJob):\n centroid_points=[]\n k=3 \n def steps(self):\n return [\n MRStep(mapper_init = self.mapper_init, mapper=self.mapper,combiner = self.combiner,reducer=self.reducer)\n ]\n #load centroids info from file\n def mapper_init(self):\n# print \"Current path:\", os.path.dirname(os.path.realpath(__file__))\n \n self.centroid_points = [map(float,s.split('\\n')[0].split(',')) for s in open(\"Centroids.txt\").readlines()]\n #open('Centroids.txt', 'w').close()\n# print \"Centroids: \", self.centroid_points\n \n #load data and output the nearest centroid index and data point \n def mapper(self, _, line):\n D = (map(float,line.split(',')))\n yield int(MinDist(D, self.centroid_points)), (D[0],D[1],1)\n \n #Combine sum of data points locally\n def combiner(self, idx, inputdata):\n sumx = sumy = num = 0\n for x,y,n in inputdata:\n num = num + n\n sumx = sumx + x\n sumy = sumy + y\n yield idx,(sumx,sumy,num)\n \n #Aggregate sum for each cluster and then calculate the new centroids\n def reducer(self, idx, inputdata): \n centroids = []\n num = [0]*self.k \n for i in range(self.k):\n centroids.append([0,0])\n for x, y, n in inputdata:\n num[idx] = num[idx] + n\n centroids[idx][0] = centroids[idx][0] + x\n centroids[idx][1] = centroids[idx][1] + y\n centroids[idx][0] = centroids[idx][0]/num[idx]\n centroids[idx][1] = centroids[idx][1]/num[idx]\n\n yield idx,(centroids[idx][0],centroids[idx][1])\n \nif __name__ == '__main__':\n MRKmeans.run()\n\n%reload_ext autoreload\n%autoreload 2\nfrom numpy import random\nfrom Kmeans import MRKmeans, stop_criterion\nmr_job = MRKmeans(args=['Kmeandata.csv', '--file=Centroids.txt'])\n\n#Geneate initial centroids\ncentroid_points = []\nk = 3\nfor i in range(k):\n centroid_points.append([random.uniform(-3,3),random.uniform(-3,3)])\nwith open('Centroids.txt', 'w+') as f:\n f.writelines(','.join(str(j) for j in i) + '\\n' for i in centroid_points)\n\n# Update centroids iteratively\ni = 0\nwhile(1):\n # save previous centoids to check convergency\n centroid_points_old = centroid_points[:]\n print \"iteration\"+str(i)+\":\"\n with mr_job.make_runner() as runner: \n runner.run()\n # stream_output: get access of the output \n for line in runner.stream_output():\n key,value = mr_job.parse_output_line(line)\n print key, value\n centroid_points[key] = value\n \n # Update the centroids for the next iteration\n with open('Centroids.txt', 'w') as f:\n f.writelines(','.join(str(j) for j in i) + '\\n' for i in centroid_points)\n \n print \"\\n\"\n i = i + 1\n if(stop_criterion(centroid_points_old,centroid_points,0.01)):\n break\nprint \"Centroids\\n\"\nprint centroid_points\n\npylab.plot(data[0], data[1], 'o', linewidth=0, alpha=.5);\nfor point in centroid_points:\n pylab.plot(point[0], point[1], '*',color='pink',markersize=20)\n \nfor point in [(-4.5,0.0), (4.5,0.0), (0.0,4.5)]:\n pylab.plot(point[0], point[1], '*',color='red',markersize=20)\npylab.show()", "MT18. Which result below is the closest to the centroids you got after running your weighted K-means code for K=3 for 10 iterations?\n(old11-12)\n\n(a) (-4.0,0.0), (4.0,0.0), (6.0,6.0) \n(b) (-4.5,0.0), (4.5,0.0), (0.0,4.5) \n(c) (-5.5,0.0), (0.0,0.0), (3.0,3.0) \n(d) (-4.5,0.0), (-4.0,0.0), (0.0,4.5) \n\nB\nMT19. Using the result of the previous question, which number below is the closest to the average weighted distance between each example and its assigned (closest) centroid?\nThe average weighted distance is defined as \nsum over i (weighted_distance_i) / sum over i (weight_i)\n\n(a) 2.5 \n(b) 1.5 \n(c) 0.5 \n(d) 4.0 \n\nC\nEND of Exam" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
arne-cl/alt-mulig
python/rstdt-batch-tokenization.ipynb
gpl-3.0
[ "Tokenization of RST-DT files using off-the-shelf tokenizers\n\nCoreNLP: failed\nnltk's TreebankWordTokenizer: failed, but might be adaptable\nlet's try the preprocessing provided by the Educational Testing Service's RST discourse parser,\n cf. rstdt-fixing-tokenization.ipynb", "import os\n\nfrom stanford_corenlp_pywrapper import sockwrap\n\nCORENLP_PYWRAPPER_DIR = os.path.expanduser('~/repos/stanford_corenlp_pywrapper')\njars = (\"stanford-corenlp-full-2014-08-27/stanford-corenlp-3.4.1.jar\",\n \"stanford-corenlp-full-2014-08-27/stanford-corenlp-3.4.1-models.jar\")\n\np=sockwrap.SockWrap(\"pos\",\n corenlp_jars=[os.path.join(CORENLP_PYWRAPPER_DIR, jar) for jar in jars])\n\nimport re\nimport discoursegraphs as dg\n\n# a string enclosed in '_!', possibly with '<P>' before the closing '_!' \nRST_DIS_TEXT_REGEX = re.compile(\"_!(.*?)(\\<P\\>)?_!\", re.DOTALL)\n\ncorenlp_result = p.parse_doc(\"\"\"that its money would be better spent \"in areas such as research\" and development.\"\"\")\n\nprint ' '.join(tok for sent in corenlp_result['sentences'] for tok in sent['tokens'])\n\nimport sys\nimport glob\nimport os\nimport codecs\n\nRSTDT_MAIN_ROOT = os.path.expanduser('~/repos/rst_discourse_treebank/data/RSTtrees-WSJ-main-1.0')\nRSTDT_TOKENIZED_ROOT = os.path.expanduser('~/repos/rst_discourse_treebank/data/RSTtrees-WSJ-main-1.0-tokenized')\n\nRSTDT_TEST_FILE = os.path.join(RSTDT_MAIN_ROOT, 'TEST', 'wsj_1306.out.dis')\n\ndef tokenize_rst_file(rst_input_path, rst_output_path):\n# edus = {}\n with open(rst_input_path, 'r') as rstfile, codecs.open(rst_output_path, 'w', encoding='utf-8') as outfile:\n rstfile_str = rstfile.read()\n input_file_onset = 0\n edu_matches = RST_DIS_TEXT_REGEX.finditer(rstfile_str)\n\n for edu in edu_matches:\n doc_onset = edu.start()\n doc_offset = edu.end()\n doc_untokenized_str = edu.groups()[0]\n corenlp_result = p.parse_doc(doc_untokenized_str)\n corenlp_tokenized_str = u' '.join(tok for sent in corenlp_result['sentences'] for tok in sent['tokens'])\n outfile.write(rstfile_str[input_file_onset:doc_onset])\n outfile.write(u'\"{}\"'.format(corenlp_tokenized_str))\n input_file_onset = doc_offset\n outfile.write(rstfile_str[input_file_onset:])\n\n# with open(RSTDT_TEST_FILE, 'r') as f:\n# print f.read()[325]\n\ntokenize_rst_file(RSTDT_TEST_FILE, '/tmp/1306.dis')\n\n%%time\nfor folder in ('TEST', 'TRAINING'):\n for rst_fpath in glob.glob(os.path.join(RSTDT_MAIN_ROOT, folder, '*.dis')):\n out_fpath = os.path.join(RSTDT_TOKENIZED_ROOT, folder, os.path.basename(rst_fpath))\n out_dir, _fname = os.path.split(out_fpath)\n dg.util.create_dir(out_dir)\n tokenize_rst_file(rst_fpath, out_fpath)", "tokenize using nltk.tokenize.treebank.TreebankWordTokenizer", "from nltk.tokenize.treebank import TreebankWordTokenizer\n\ntokenizer = TreebankWordTokenizer()\ntokenizer.tokenize(\"\"\"that its money would be better spent \"in areas such as research\" and development.\"\"\")\n\nimport re\n\nENDS_WITH_COMMA = re.compile('(.*),$')\nENDS_WITH_PUNCTUATION = re.compile('(.*)(,|.|!|:|;)$')\n\nfoo = \"Cummins Engine Co. , Columbus , Ind.,\"\nbar = ENDS_WITH_COMMA.sub(r'\\1 ,', foo)\n\nBRACKETS = {\n '(': '-LRB-', # round brackets\n ')': '-RRB-',\n '[': '-LSB-', # square brackets\n ']': '-RSB-',\n '{': '-LCB-', # curly brackets\n '}': '-RCB-'\n}\n\ndef fix_tokenized_sentence(tokenized_sentence):\n # If an EDU ends with a comma, we'll have to tokenize it,\n # e.g. \"when it ends,\" -> \"when it ends ,\"\n tokenized_sentence[-1] = ENDS_WITH_PUNCTUATION.sub(r'\\1 \\2', tokenized_sentence[-1])\n for i, token in enumerate(tokenized_sentence):\n if token in BRACKETS:\n tokenized_sentence[i] = BRACKETS[token]\n return tokenized_sentence\n\nENDS_WITH_PUNCTUATION = re.compile('(.*)(,|\\.|!|:|;)$')\n\nENDS_WITH_PUNCTUATION.match(foo).groups()\n\nfrom nltk.tokenize.treebank import TreebankWordTokenizer\nfrom nltk.tokenize import sent_tokenize\n\nTOKENIZER = TreebankWordTokenizer()\n\ndef tokenize_rst_file_with_nltk(rst_input_path, rst_output_path, tokenizer):\n# edus = {}\n with open(rst_input_path, 'r') as rstfile, codecs.open(rst_output_path, 'w', encoding='utf-8') as outfile:\n rstfile_str = rstfile.read()\n input_file_onset = 0\n edu_matches = RST_DIS_TEXT_REGEX.finditer(rstfile_str)\n\n for edu in edu_matches:\n doc_onset = edu.start()\n doc_offset = edu.end()\n doc_untokenized_str = edu.groups()[0]\n untokenized_sents = sent_tokenize(doc_untokenized_str)\n tokenized_sents = tokenizer.tokenize_sents(untokenized_sents)\n fixed_tokenized_sents = [fix_tokenized_sentence(sent) for sent in tokenized_sents]\n tokenized_str = u' '.join(tok for sent in fixed_tokenized_sents for tok in sent)\n\n outfile.write(rstfile_str[input_file_onset:doc_onset])\n outfile.write(u'\"{}\"'.format(tokenized_str))\n input_file_onset = doc_offset\n outfile.write(rstfile_str[input_file_onset:])\n\n%%time\n\nRSTDT_NLTK_TOKENIZED_ROOT = os.path.expanduser('~/repos/rst_discourse_treebank/data/RSTtrees-WSJ-main-1.0-nltk-tokenized')\n\nfor folder in ('TEST', 'TRAINING'):\n for rst_fpath in glob.glob(os.path.join(RSTDT_MAIN_ROOT, folder, '*.dis')):\n out_fpath = os.path.join(RSTDT_NLTK_TOKENIZED_ROOT, folder, os.path.basename(rst_fpath))\n out_dir, _fname = os.path.split(out_fpath)\n dg.util.create_dir(out_dir)\n tokenize_rst_file_with_nltk(rst_fpath, out_fpath, TOKENIZER)\n\nTOKENIZER.tokenize(\"on Monday the small ( investors ) are going to panic and sell\")", "The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank.\nThis is the method that is invoked by word_tokenize().\nIt assumes that the text has already been segmented into sentences, e.g. using sent_tokenize().", "from nltk.tokenize import sent_tokenize\n\nsents = sent_tokenize(\"a tree. You are a ball.\")\n\ntokenized_sents = TOKENIZER.tokenize_sents(sents)\nu' '.join(tok for sent in tokenized_sents for tok in sent)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
GoogleCloudPlatform/practical-ml-vision-book
09_deploying/09e_tflite.ipynb
apache-2.0
[ "from IPython.display import Markdown as md\n\n### change to reflect your notebook\n_nb_loc = \"09_deploying/09e_tflite.ipynb\"\n_nb_title = \"Edge ML with TensorFlow Lite\"\n\n### no need to change any of this\n_nb_safeloc = _nb_loc.replace('/', '%2F')\nmd(\"\"\"\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://console.cloud.google.com/ai-platform/notebooks/deploy-notebook?name={1}&url=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2Fpractical-ml-vision-book%2Fblob%2Fmaster%2F{2}&download_url=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2Fpractical-ml-vision-book%2Fraw%2Fmaster%2F{2}\">\n <img src=\"https://raw.githubusercontent.com/GoogleCloudPlatform/practical-ml-vision-book/master/logo-cloud.png\"/> Run in AI Platform Notebook</a>\n </td>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/GoogleCloudPlatform/practical-ml-vision-book/blob/master/{0}\">\n <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/GoogleCloudPlatform/practical-ml-vision-book/blob/master/{0}\">\n <img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://raw.githubusercontent.com/GoogleCloudPlatform/practical-ml-vision-book/master/{0}\">\n <img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>\n\"\"\".format(_nb_loc, _nb_title, _nb_safeloc))", "Edge ML with TensorFlow Lite\nIn this notebook, we convert the saved model into a TensorFlow Lite model\nso that we can run it on Edge devices.\nIn order to do edge inference, we need to handle raw image data from the camera\nand process a single image (not a batch of images).", "import tensorflow as tf\nimport os, shutil\n\nMODEL_LOCATION='export/flowers_model3' # will be created\n# load from checkpoint and export a model that has desired signature\nCHECK_POINT_DIR='gs://practical-ml-vision-book/flowers_5_trained/chkpts'\nmodel = tf.keras.models.load_model(CHECK_POINT_DIR)\n\nIMG_HEIGHT = 345\nIMG_WIDTH = 345\nIMG_CHANNELS = 3\nCLASS_NAMES = 'daisy dandelion roses sunflowers tulips'.split()\n \n# a single image of any size\[email protected](input_signature=[tf.TensorSpec([None, None, 3], dtype=tf.float32)])\ndef predict_flower_type(img):\n img = tf.image.resize_with_pad(img, IMG_HEIGHT, IMG_WIDTH)\n batch_pred = model(tf.expand_dims(img, axis=0))\n top_prob = tf.math.reduce_max(batch_pred, axis=[1])\n pred_label_index = tf.math.argmax(batch_pred, axis=1)\n pred_label = tf.gather(tf.convert_to_tensor(CLASS_NAMES), pred_label_index)\n return {\n 'probability': tf.squeeze(top_prob, axis=0),\n 'flower_type': tf.squeeze(pred_label, axis=0)\n }\n\nshutil.rmtree('export', ignore_errors=True)\nos.mkdir('export')\n\n\nmodel.save(MODEL_LOCATION,\n signatures={\n 'serving_default': predict_flower_type\n })", "Convert to TFLite\nThis will take a while to do the conversion", "import tensorflow as tf\nconverter = tf.lite.TFLiteConverter.from_saved_model(MODEL_LOCATION)\ntflite_model = converter.convert()\n\nwith open('export/model.tflite', 'wb') as ofp:\n ofp.write(tflite_model)\n\n!ls -lh export/model.tflite", "License\nCopyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License." ]
[ "code", "markdown", "code", "markdown", "code", "markdown" ]
AllenDowney/ThinkBayes2
examples/shuttle_soln.ipynb
mit
[ "Think Bayes\nCopyright 2018 Allen B. Downey\nMIT License: https://opensource.org/licenses/MIT", "# Configure Jupyter so figures appear in the notebook\n%matplotlib inline\n\n# Configure Jupyter to display the assigned value after an assignment\n%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'\n\nimport numpy as np\nimport pandas as pd\n\n# import classes from thinkbayes2\nfrom thinkbayes2 import Pmf, Cdf, Suite, Joint\n\nimport thinkplot", "The Space Shuttle problem\nHere's a problem from Bayesian Methods for Hackers\n\nOn January 28, 1986, the twenty-fifth flight of the U.S. space shuttle program ended in disaster when one of the rocket boosters of the Shuttle Challenger exploded shortly after lift-off, killing all seven crew members. The presidential commission on the accident concluded that it was caused by the failure of an O-ring in a field joint on the rocket booster, and that this failure was due to a faulty design that made the O-ring unacceptably sensitive to a number of factors including outside temperature. Of the previous 24 flights, data were available on failures of O-rings on 23, (one was lost at sea), and these data were discussed on the evening preceding the Challenger launch, but unfortunately only the data corresponding to the 7 flights on which there was a damage incident were considered important and these were thought to show no obvious trend. The data are shown below (see 1):", "# !wget https://raw.githubusercontent.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/master/Chapter2_MorePyMC/data/challenger_data.csv\n\ncolumns = ['Date', 'Temperature', 'Incident']\ndf = pd.read_csv('challenger_data.csv', parse_dates=[0])\ndf.drop(labels=[3, 24], inplace=True)\ndf\n\ndf['Incident'] = df['Damage Incident'].astype(float)\ndf\n\nimport matplotlib.pyplot as plt\n\nplt.scatter(df.Temperature, df.Incident, s=75, color=\"k\",\n alpha=0.5)\nplt.yticks([0, 1])\nplt.ylabel(\"Damage Incident?\")\nplt.xlabel(\"Outside temperature (Fahrenheit)\")\nplt.title(\"Defects of the Space Shuttle O-Rings vs temperature\");", "Grid algorithm\nWe can solve the problem first using a grid algorithm, with parameters b0 and b1, and\n$\\mathrm{logit}(p) = b0 + b1 * T$\nand each datum being a temperature T and a boolean outcome fail, which is true is there was damage and false otherwise.\nHint: the expit function from scipy.special computes the inverse of the logit function.", "from scipy.special import expit\n\nclass Logistic(Suite, Joint):\n \n def Likelihood(self, data, hypo):\n \"\"\"\n \n data: T, fail\n hypo: b0, b1\n \"\"\"\n return 1\n\n# Solution\n\nfrom scipy.special import expit\n\nclass Logistic(Suite, Joint):\n \n def Likelihood(self, data, hypo):\n \"\"\"\n \n data: T, fail\n hypo: b0, b1\n \"\"\"\n temp, fail = data\n b0, b1 = hypo\n \n log_odds = b0 + b1 * temp\n p_fail = expit(log_odds)\n if fail == 1:\n return p_fail\n elif fail == 0:\n return 1-p_fail\n else:\n # NaN\n return 1\n\nb0 = np.linspace(0, 50, 101);\n\nb1 = np.linspace(-1, 1, 101);\n\nfrom itertools import product\nhypos = product(b0, b1)\n\nsuite = Logistic(hypos);\n\nfor data in zip(df.Temperature, df.Incident):\n print(data)\n suite.Update(data)\n\nthinkplot.Pdf(suite.Marginal(0))\nthinkplot.decorate(xlabel='Intercept',\n ylabel='PMF',\n title='Posterior marginal distribution')\n\nthinkplot.Pdf(suite.Marginal(1))\nthinkplot.decorate(xlabel='Log odds ratio',\n ylabel='PMF',\n title='Posterior marginal distribution')", "According to the posterior distribution, what was the probability of damage when the shuttle launched at 31 degF?", "# Solution\n\nT = 31\ntotal = 0\n\nfor hypo, p in suite.Items():\n b0, b1 = hypo\n log_odds = b0 + b1 * T\n p_fail = expit(log_odds)\n total += p * p_fail\n \ntotal\n\n# Solution\n\npred = suite.Copy()\npred.Update((31, True))", "MCMC\nImplement this model using MCMC. As a starting place, you can use this example from the PyMC3 docs.\nAs a challege, try writing the model more explicitly, rather than using the GLM module.", "from warnings import simplefilter\nsimplefilter('ignore', FutureWarning)\n\nimport pymc3 as pm\n\n# Solution\n\nwith pm.Model() as model:\n pm.glm.GLM.from_formula('Incident ~ Temperature', df, \n family=pm.glm.families.Binomial())\n \n start = pm.find_MAP()\n trace = pm.sample(1000, start=start, tune=1000)\n\npm.traceplot(trace);\n\n# Solution\n\nwith pm.Model() as model:\n pm.glm.GLM.from_formula('Incident ~ Temperature', df, \n family=pm.glm.families.Binomial())\n \n trace = pm.sample(1000, tune=1000)", "The posterior distributions for these parameters should be similar to what we got with the grid algorithm." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
xiaodongpang23/anomaly_detection
anomaly_detection.ipynb
mit
[ "import numpy as np\nimport pandas as pd\nimport networkx as nx\nimport json\nimport sys", "Step1: build the initial state of the entire user network, as well as the purchae history of the users\nInput: sample_dataset/batch_log.json", "batchlogfile = 'sample_dataset/batch_log.json'\ndf_batch = pd.read_json(batchlogfile, lines=True)\n\nindex_purchase = ['event_type','id','timestamp','amount']\nindex_friend = ['event_type','id1','id2','timestamp']\n\n#df_batch.head()\n\n#df_batch.describe()\n\n# Read D and T\ndf_DT=df_batch[df_batch['D'].notnull()]\ndf_DT=df_DT[['D','T']]\nD = df_DT.values[0][0]\nT = df_DT.values[0][1]\n#print(D)\n#print(T)\n#df_DT.head()\n\n# check D and T values\nif D < 1:\n print('Program terminated because of D < 1')\n sys.exit()\nif T < 2:\n print('Program terminated because of T < 2')\n sys.exit()\n\n#for possible_value in set(df['event_type'].tolist()):\n# print(possible_value)\n\ndf_purchase = df_batch[df_batch['event_type']=='purchase']\ndf_purchase = df_purchase[index_purchase]\ndf_purchase = df_purchase.dropna(how='any')\n# If sort on the timestamp is needed, commentout the following line\n# df_purchase = df_purchase.sort_values('timestamp')\n#df_purchase.shape\n\ndf_friend=df_batch[(df_batch['event_type']=='befriend') | (df_batch['event_type']=='unfriend')]\ndf_friend=df_friend[index_friend]\ndf_friend=df_friend.dropna(how='any')\n# If sort on the timestamp is needed, commentout the following line\n#df_friend=df_friend.sort_values('timestamp')\n#df_friend.shape\n\nG = nx.Graph()\n\nidlist = set(df_purchase.id.tolist())\nG.add_nodes_from(idlist)\n#len(list(G.nodes()))\n\ndef Add_edges(data):\n for row in data.itertuples():\n id10 = row.id1\n id20 = row.id2\n event_type0 = row.event_type\n if event_type0 == 'befriend':\n G.add_edge(id10,id20)\n if event_type0 == 'unfriend':\n if G.has_edge(id10,id20):\n G.remove_edge(id10,id20) \n\nAdd_edges(df_friend)\n\n#len(list(G.edges()))\n\n#G[10.0]\n\n#G.number_of_nodes()\n\n#G.number_of_edges()\n\n# define a function to calcualte the mean and sd for userid's network\ndef Get_Mean_SD(userid):\n Nodes = list(nx.ego_graph(G, userid, D, center=False))\n df_Nodes = df_purchase.loc[df_purchase['id'].isin(Nodes)]\n if len(df_Nodes) >= 2: \n if len(df_Nodes) > T:\n df_Nodes = df_Nodes.sort_values('timestamp').iloc[-int(T):]\n #df_Nodes.shape\n #the std from pd is different from np; np is correct\n #mean = df_Nodes.amount.mean()\n #sd = df_Nodes.amount.std()\n mean = np.mean(df_Nodes['amount'])\n sd = np.std(df_Nodes['amount'])\n mean = float(\"{0:.2f}\".format(mean))\n sd = float(\"{0:.2f}\".format(sd))\n else:\n mean=np.nan\n sd=np.nan\n \n return mean, sd\n\n#Get_Mean_SD(0.0)\n\n#df_purchase.head()\n\n#df_purchase.tail()\n\n#df_purchase.shape", "Step2: Determine whether a purchase is anomalous\ninput file: sample_dataset/stream_log.json", "# read in the stream_log.json\nstreamlogfile = 'sample_dataset/stream_log.json'\ndf_stream = pd.read_json(streamlogfile, lines=True)\n# If sort on the timestamp is needed, commentout the following line\n#df_stream = df_stream.sort_values('timestamp')\n\n# open output file flagged_purchases.json\nflaggedfile = 'log_output/flagged_purchases.json'\nf = open(flaggedfile, 'w')\n\n# Determine whether a purchase is anomalous; update purchase history; update social network\nfor i in range(0, len(df_stream)):\n datai = df_stream.iloc[i]\n event_type = datai['event_type']\n if (event_type == 'purchase') & (not datai[index_purchase].isnull().any()):\n # update purchase history\n df_purchase = df_purchase.append(datai[index_purchase])\n timestamp = datai['timestamp']\n timestamp = str(timestamp)\n userid = datai['id']\n if (not G.has_node(userid)):\n G.add_node(userid)\n amount = datai['amount']\n mean, sd = Get_Mean_SD(userid)\n if mean != np.nan:\n mean_3sd = mean + (3*sd)\n if amount > mean_3sd:\n f.write('{{\"event_type\":\"{0:s}\", \"timestamp\":\"{1:s}\", \"id\": \"{2:.0f}\", \"amount\": \"{3:.2f}\", \"mean\": \"{4:.2f}\", \"sd\": \"{5:.2f}\"}}\\n'.format(event_type, timestamp, userid, amount, mean, sd))\n # update social network\n if (event_type == 'befriend') & (not datai[index_friend].isnull().any()):\n df_friend=df_friend.append(datai[index_friend])\n id1 = datai['id1']\n id2 = datai['id2']\n G.add_edge(id1,id2)\n if (event_type == 'unfriend') & (not datai[index_friend].isnull().any()):\n df_friend=df_friend.append(datai[index_friend])\n id1 = datai['id1']\n id2 = datai['id2']\n if G.has_edge(id1,id2):\n G.remove_edge(id1,id2) \n \n\n\nf.close() " ]
[ "code", "markdown", "code", "markdown", "code" ]
GAMPTeam/vampyre
demos/sparse/sparse_lin_inverse.ipynb
mit
[ "Sparse Linear Inverse Demo\nIn this demo, we illustrate how to use the vampyre package for a simple sparse linear inverse problem. The problem is to estimate a sparse vector $z$ from linear measurements of the form $y=Az+w$ where $w$ is Gaussian noise and $A$ is a known linear transform -- a basic problem in compressed sensing. By sparse, we mean that the vector $z$ has few non-zero values. Knowing that the vector is sparse can be used for improved reconstruction if an appropriate sparse reconstruction algorithm is used.\nThere are a large number of algorithms for sparse linear inverse problems. This demo uses the Vector Approximate Message Passing (VAMP) method, one of several methods that will be included in the vampyre package. In going through this demo, you will learn to:\n* Load the vampyre package\n* Create synthetic data for a sparse linear inverse problem\n* Set up the VAMP method in the vampyre package to perform the estimation for the linear inverse problem\n* Measure the mean squared error (MSE) and compare the value to the predicted value from the VAMP method.\n* Using the hist_list feature to track variables per iteration of the algorithm.\nImporting the Package\nFirst we need to import the vampyre package. Since python does not have relative imports, you need to add the path location for the vampyre package to the system path. In this case, we have specified the path use a relative path location, but you can change this depending on where vampyre is located.", "import os\nimport sys\nvp_path = os.path.abspath('../../')\nif not vp_path in sys.path:\n sys.path.append(vp_path)\nimport vampyre as vp", "We will also load the other packages we will use in this demo. This could be done before the above import.", "import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n%matplotlib inline", "Generating Synthetic Data\nWe begin by generating synthetic data $z$ and measurements $y$ that we will use to test the algorithms. First, we set the dimensions and the shapes of the vectors we will use.", "# Parameters\nnz = 1000 # number of components of z\nny = 500 # number of measurements y \n\n# Compute the shapes\nzshape = (nz,) # Shape of z matrix\nyshape = (ny,) # Shape of y matrix\nAshape = (ny,nz) # Shape of A matrix", "To generate the synthetic data for this demo, we use the following simple probabilistic model. For the input $z$, we will use Bernouli-Gaussian (BG) distribution, a simple model in sparse signal processing. In the BG model, the components $z_i$ are i.i.d. where each component $z_i=0$ with probability $1-\\rho$ and $z_i \\sim {\\mathcal N}(0,1)$ with probability $\\rho$. The parameter $\\rho$ is called the sparsity ratio and represents the average number of non-zero components. When $\\rho$ is small, the vector $z$ is sparse. The components on which $z_i$ are non-zero are called the active components. We set the parameters below. We also set the SNR for the measurements.", "sparse_rat = 0.1 # sparsity ratio\nzmean1 = 0 # mean for the active components\nzvar1 = 1 # variance for the active components\nsnr = 30 # SNR in dB", "Using these parameters, we can generate random sparse z following this distribution with the following simple code.", "# Generate the random input\nz1 = np.random.normal(zmean1, np.sqrt(zvar1), zshape)\nu = np.random.uniform(0, 1, zshape) < sparse_rat\nz = z1*u", "To illustrate the sparsity, we plot the vector z. We can see from this plot that the majority of the components of z are zero.", "ind = np.array(range(nz))\nplt.plot(ind,z)", "Now, we create a random transform $A$ and output $y_0 = Az$.", "A = np.random.normal(0, 1/np.sqrt(nz), Ashape)\ny0 = A.dot(z)", "Finally, we add noise at the desired SNR", "yvar = np.mean(np.abs(y0)**2)\nwvar = yvar*np.power(10, -0.1*snr)\ny = y0 + np.random.normal(0,np.sqrt(wvar), yshape)", "Creating the Vampyre estimators\nNow that we have created the sparse data, we will use the vampyre package to recover z from y. In vampyre the methods to perform this estimation are called solvers. For this demo, we will use a simple solver called VAMP described in the paper:\n\nRangan, Sundeep, Philip Schniter, and Alyson Fletcher. \"Vector approximate message passing.\" arXiv preprint arXiv:1610.03082 (2016).\n\nSimilar to most of the solvers in the vampyre package, the VAMP solver needs precise specifications of the probability distributions of z and y. The simplest way to use VAMP is to specify two densities:\n* The prior $p(z)$; and\n* The likelihood $p(y|z)$.\nEach of the densities are described by estimators. \nWe first describe the estimator for the prior $p(z)$. The vampyre package will eventually have a large number of estimators to describe various densities. In this simple demo, $p(z)$ is what is called a mixture distribution since $z$ is one distribution with probability $1-\\rho$ and a second distribution with probability $\\rho$. To describe this mixture distribution in the vampyre package, we need to first create estimator classes for each component distribution. The following code creates an estimator, est0, for a discrete distribution with a probability of 1 at a 0 and a second estimator, est1, for the Gaussian distribution with the active components.", "est0 = vp.estim.DiscreteEst(0,1,zshape)\nest1 = vp.estim.GaussEst(zmean1,zvar1,zshape)", "We next use the vampyre class, MixEst, to describe a mixture of the two distributions. This is done by creating a list, est_list, of the estimators and an array pz with the probability of each component. The resulting estimator, est_in, is the estimator for the prior $z$, which is also the input to the transform $A$. We give this a name Input since it corresponds to the input. But, any naming is fine. Or, you can let vampyre give it a generic name.", "est_list = [est0, est1]\npz = np.array([1-sparse_rat, sparse_rat])\nest_in = vp.estim.MixEst(est_list, w=pz, name='Input')", "Next, we describe the likelihood function, $p(y|z)$. Since $y=Az+w$, we can first use the MatrixLT class to define a linear transform operator Aop corresponding to the matrix A. Then, we use the LinEstim class to describe the likelihood $y=Az+w$.", "Aop = vp.trans.MatrixLT(A,zshape)\nest_out = vp.estim.LinEst(Aop,y,wvar,map_est=False, name='Output')", "Finally, the VAMP method needs a message handler to describe how to perform the Gaussian message passing. This is a more advanced feature. For most applications, you can just use the simple message handler as follows.", "msg_hdl = vp.estim.MsgHdlSimp(map_est=False, shape=zshape)", "Running the VAMP Solver\nHaving described the input and output estimators and the variance handler, we can now construct a VAMP solver. The construtor takes the input and output estimators, the variance handler and other parameters. The paramter nit is the number of iterations. This is fixed for now. Later, we will add auto-termination. The other parameter, hist_list is optional, and will be described momentarily.", "nit = 20 # number of iterations\nsolver = vp.solver.Vamp(est_in,est_out,msg_hdl,\\\n hist_list=['zhat', 'zhatvar'],nit=nit)", "We can print a summary of the model which indicates the dimensions and the estimators.", "solver.summary()", "We now run the solver by calling the solve() method. For a small problem like this, this should be close to instantaneous.", "solver.solve()", "The VAMP solver estimate is the field zhat. We plot one column of this (icol=0) and compare it to the corresponding column of the true matrix z. You should see a very good match.", "zhat = solver.zhat\nind = np.array(range(nz))\nplt.plot(ind,z)\nplt.plot(ind,zhat)\nplt.legend(['True', 'Estimate'])", "We can measure the normalized mean squared error as follows. The VAMP solver also produces an estimate of the MSE in the variable zhatvar. We can extract this variable to compute the predicted MSE. We see that the normalized MSE is indeed low and closely matches the predicted value from VAMP.", "zerr = np.mean(np.abs(zhat-z)**2)\nzhatvar = solver.zhatvar\nzpow = np.mean(np.abs(z)**2)\nmse_act = 10*np.log10(zerr/zpow)\nmse_pred = 10*np.log10(zhatvar/zpow)\nprint(\"Normalized MSE (dB): actual {0:f} pred {1:f}\".format(mse_act, mse_pred))", "Finally, we can plot the actual and predicted MSE as a function of the iteration number. When solver was contructed, we passed an argument hist_list=['zhat', 'zhatvar']. This indicated to store the value of the estimate zhat and predicted error variance zhatvar with each iteration. We can recover these values from solver.hist_dict, the history dictionary. Using the values we can compute and plot the normalized MSE on each iteartion. We see that VAMP gets a low MSE in very few iterations, about 10.", "# Compute the MSE as a function of the iteration\nzhat_hist = solver.hist_dict['zhat']\nzhatvar_hist = solver.hist_dict['zhatvar']\nnit = len(zhat_hist)\nmse_act = np.zeros(nit)\nmse_pred = np.zeros(nit)\nfor it in range(nit):\n zerr = np.mean(np.abs(zhat_hist[it]-z)**2)\n mse_act[it] = 10*np.log10(zerr/zpow)\n mse_pred[it] = 10*np.log10(zhatvar_hist[it]/zpow)\n \nplt.plot(range(nit), mse_act, 'o-', linewidth=2)\nplt.plot(range(nit), mse_pred, 's', linewidth=1)\nplt.xlabel('Iteration')\nplt.ylabel('Normalized MSE (dB)')\nplt.legend(['Actual', 'Predicted'])\nplt.grid()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
buckleylab/Buckley_Lab_SIP_project_protocols
sequence_analysis_walkthrough/QIIME2_Processing_Pipeline.ipynb
mit
[ "Pipeling to Process Raw Sequences into Phyloseq Object with DADA2\n\nPrep for Import to QIIME2 (Combine two index files)\nImport to QIIME2\nDemultiplex\nDenoise and Merge\nPrepare OTU Tables and Rep Sequences (Note: sample names starting with a digit will break this step)\nClassify Seqs\n\n\n100% Appropriated from the \"Atacama Desert Tutorial\" for QIIME2\nPipeline can handle both 16S rRNA gene and ITS sequences (in theory)\n\nTested on 515f and 806r\nTested on ITS1\n\nCommands to Install Dependencies\n|| QIIME2 ||\n\nconda create -n qiime2-pipeline --file https://data.qiime2.org/distro/core/qiime2-2017.11-conda-linux-64.txt\nsource activate qiime2-pipeline\n\n** Note: QIIME2 is still actively in development, and I've noticed frequent new releases. Check for the most up-to-date conda install file https://docs.qiime2.org/2017.11/install/native/#install-qiime-2-within-a-conda-environment\n|| Copyrighter rrn Database ||\n\nThe script will automatically install the curated GreenGenes rrn attribute database\nhttps://github.com/fangly/AmpliCopyrighter\n\n|| rpy2 (don't use conda version) ||\n\npip install rpy2 \n\n|| phyloseq ||\n\nconda install -c r r-igraph \nRscript -e \"source('http://bioconductor.org/biocLite.R');biocLite('phyloseq')\" \n\n|| R packages ||\n\nape (natively installed with in conda environment)\n\nCitations\n\n\nCaporaso, J. G., Kuczynski, J., Stombaugh, J., Bittinger, K., Bushman, F. D., Costello, E. K., et al. (2010). QIIME allows analysis of high-throughput community sequencing data. Nature methods, 7(5), 335-336.\n\n\nMcMurdie and Holmes (2013) phyloseq: An R Package for Reproducible Interactive Analysis and Graphics of Microbiome Census Data. PLoS ONE. 8(4):e61217\n\n\nParadis E., Claude J. & Strimmer K. 2004. APE: analyses of phylogenetics and evolution in R language. Bioinformatics 20: 289-290.\n\n\nAngly, F. E., Dennis, P. G., Skarshewski, A., Vanwonterghem, I., Hugenholtz, P., & Tyson, G. W. (2014). CopyRighter: a rapid tool for improving the accuracy of microbial community profiles through lineage-specific gene copy number correction. Microbiome, 2(1), 11.\n\n\nLast Modified by R. Wilhelm on October 12th, 2017\nStep 1: User Input", "import os, re\n\n# Provide the directory for your index and read files (you can do multiple independently in one go)\nbioblitz = '/home/roli/BioBlitz.2017/SV_based/'\n\n# Prepare an object with the name of the library, the name of the directory object (created above), and the metadatafile name\n#datasets = [['name',directory1,'metadata1','domain of life'],['name',directory2,'metadata2','domain of life']]\ndatasets = [['bioblitz',bioblitz,'metadata.tsv','bacteria']]\n\n# Ensure your reads files are named accordingly (or modify to suit your needs)\nreadFile1 = 'read1.fq.gz'\nreadFile2 = 'read2.fq.gz'\nindexFile1 = 'index_read1.fq.gz'\nindexFile2 = 'index_read2.fq.gz'\n\n## Enter Minimum Support for Keeping QIIME Classification\n# Note: Classifications that do not meet this criteria will simply be retained, but labeled 'putative'\nmin_support = 0.8", "Step 2: Concatenate Barcodes for QIIME2 Pipeline", "## Note: QIIME takes a single barcode file. The command 'extract_barcodes.py' concatenates the forward and reverse read barcode and attributes it to a single read.\n\n# See http://qiime.org/tutorials/processing_illumina_data.html\n\nfor dataset in datasets:\n directory = dataset[1]\n index1 = directory+indexFile1\n index2 = directory+indexFile2\n \n # Run extract_barcodes to merge the two index files\n !python2 /opt/anaconda2/bin/extract_barcodes.py --input_type barcode_paired_end -f $index1 -r $index2 --bc1_len 8 --bc2_len 8 -o $directory/output\n\n # QIIME2 import requires a directory containing files names: forward.fastq.gz, reverse.fastq.gz and barcodes.fastq.gz \n !ln -s $directory$readFile1 $directory/output/forward.fastq.gz\n !ln -s $directory$readFile2 $directory/output/reverse.fastq.gz\n \n # Gzip the barcodes files (apparently necessary)\n !pigz -p 5 $directory/output/barcodes.fastq\n\n # Removed orphaned reads files (not needed)\n !rm $directory/output/reads?.fastq\n", "Step 3: Import into QIIME2", "for dataset in datasets:\n name = dataset[0]\n directory = dataset[1]\n \n os.system(' '.join([\n \"qiime tools import\",\n \"--type EMPPairedEndSequences\",\n \"--input-path \"+directory+\"output/\",\n \"--output-path \"+directory+\"output/\"+name+\".qza\"\n ]))\n \n # This more direct command is broken by the fact QIIME uses multiple dashes in their arguments (is my theory)\n #!qiime tools import --type EMPPairedEndSequences --input-path $directory/output --output-path $directory/output/$name.qza\n ", "Step 4: Demultiplex", "########\n## Note: The barcode you supply to QIIME is now a concatenation of your forward and reverse barcode.\n# Your 'forward' barcode is actually the reverse complement of your reverse barcode and the 'reverse' is your forward barcode. The file 'primers.complete.csv' provides this information corresponding to the Buckley Lab 'primer number'\n# This quirk could be corrected in how different sequencing facilities pre-process the output from the sequencer\n\n##\n## SLOW STEP (~ 2 - 4 hrs)\n##\n\nfor dataset in datasets:\n name = dataset[0]\n directory = dataset[1]\n metadata = dataset[2]\n \n os.system(' '.join([\n \"qiime demux emp-paired\",\n \"--m-barcodes-file \"+directory+metadata,\n \"--m-barcodes-category BarcodeSequence\",\n \"--i-seqs \"+directory+\"output/\"+name+\".qza\",\n \"--o-per-sample-sequences \"+directory+\"output/\"+name+\".demux\"\n ]))\n \n # This more direct command is broken by the fact QIIME uses multiple dashes in their arguments (is my theory)\n #!qiime demux emp-paired --m-barcodes-file $directory/$metadata --m-barcodes-category BarcodeSequence --i-seqs $directory/output/$name.qza --o-per-sample-sequences $directory/output/$name.demux\n ", "Step 5: Visualize Quality Scores and Determine Trimming Parameters", "## Based on the Graph Produced using the Following Command enter the trim and truncate values. Trim refers to the start of a sequence and truncate the total length (i.e. number of bases to remove from end)\n\n# The example in the Atacam Desert Tutorial trims 13 bp from the start of each read and does not remove any bases from the end of the 150 bp reads:\n# --p-trim-left-f 13 \\ \n# --p-trim-left-r 13 \\\n# --p-trunc-len-f 150 \\\n# --p-trunc-len-r 150\n\nfor dataset in datasets:\n name = dataset[0]\n directory = dataset[1]\n \n os.system(' '.join([\n \"qiime demux summarize\",\n \"--i-data \"+directory+\"/output/\"+name+\".demux.qza\",\n \"--o-visualization \"+directory+\"/output/\"+name+\".demux.QC.summary.qzv\"\n ]))\n \n ## Take the output from this command and drop it into:\n #https://view.qiime2.org\n\nwait_for_user = input(\"The script will now wait for you to input trimming parameters in the next cell. You will need to take the .qzv files for each library and visualize them at <https://view.qiime2.org>. This is hopefully temporary, while QIIME2 developers improve on q2view.\\n\\n[ENTER ANYTHING. THIS IS ONLY MEANT TO PAUSE THE PIPELING]\")\nprint(\"\\nThe script is now proceeding. Stay tuned to make sure trimming works.\")", "Step 6: Trimming Parameters | USER INPUT REQUIRED", "## User Input Required\ntrim_dict = {}\n\n## Input your trimming parameters into a python dictionary for all libraries\n#trim_dict[\"LibraryName1\"] = [trim_forward, truncate_forward, trim_reverse, truncate_reverse]\n#trim_dict[\"LibraryName2\"] = [trim_forward, truncate_forward, trim_reverse, truncate_reverse]\n\n## Example\ntrim_dict[\"bioblitz\"] = [1, 240, 1, 190]", "Step 7: Trim, Denoise and Join (aka 'Merge') Reads Using DADA2", "## Hack for Multithreading\n# I hardcoded 'nthreads' in both versions of 'run_dada_paired.R' (find your versions by running 'locate run_dada_paired.R' from your home directory)\n# I used ~ 20 threads and the processing finished in ~ 7 - 8hrs\n\n##\n## SLOW STEP (~ 6 - 8 hrs, IF multithreading is used)\n##\n\n\nfor dataset in datasets:\n name = dataset[0]\n directory = dataset[1]\n \n os.system(' '.join([\n \"qiime dada2 denoise-paired\",\n \"--i-demultiplexed-seqs \"+directory+\"/output/\"+name+\".demux.qza\",\n \"--o-table \"+directory+\"/output/\"+name+\".table\",\n \"--o-representative-sequences \"+directory+\"/output/\"+name+\".rep.seqs.final\",\n \"--p-trim-left-f \"+str(trim_dict[name][0]),\n \"--p-trim-left-r \"+str(trim_dict[name][2]),\n \"--p-trunc-len-f \"+str(trim_dict[name][1]),\n \"--p-trunc-len-r \"+str(trim_dict[name][3])\n ]))\n \n", "Step 8: Create Summary of OTUs", "for dataset in datasets:\n name = dataset[0]\n directory = dataset[1]\n metadata = dataset[2]\n \n os.system(' '.join([\n \"qiime feature-table summarize\",\n \"--i-table \"+directory+\"/output/\"+name+\".table.qza\",\n \"--o-visualization \"+directory+\"/output/\"+name+\".table.qzv\",\n \"--m-sample-metadata-file \"+directory+metadata\n ]))\n\n os.system(' '.join([\n \"qiime feature-table tabulate-seqs\",\n \"--i-data \"+directory+\"/output/\"+name+\".rep.seqs.final.qza\",\n \"--o-visualization \"+directory+\"/output/\"+name+\".rep.seqs.final.qzv\"\n ])) ", "Step 9: Make Phylogenetic Tree", "## Hack for Multithreading\n# I hardcoded 'n_threads' in '_mafft.py' in the directory ~/anaconda3/envs/qiime2-2017.9/lib/python3.5/site-packages/q2_alignment\n# I used ~ 20 threads and the processing finished in ~ 15 min\n\nfor dataset in datasets:\n name = dataset[0]\n directory = dataset[1]\n metadata = dataset[2]\n domain = dataset[3]\n\n if domain != \"fungi\":\n # Generate Alignment with MAFFT\n os.system(' '.join([\n \"qiime alignment mafft\",\n \"--i-sequences \"+directory+\"/output/\"+name+\".rep.seqs.final.qza\",\n \"--o-alignment \"+directory+\"/output/\"+name+\".rep.seqs.aligned.qza\"\n ]))\n\n # Mask Hypervariable parts of Alignment\n os.system(' '.join([\n \"qiime alignment mask\",\n \"--i-alignment \"+directory+\"/output/\"+name+\".rep.seqs.aligned.qza\",\n \"--o-masked-alignment \"+directory+\"/output/\"+name+\".rep.seqs.aligned.masked.qza\"\n ])) \n\n # Generate Tree with FastTree\n os.system(' '.join([\n \"qiime phylogeny fasttree\",\n \"--i-alignment \"+directory+\"/output/\"+name+\".rep.seqs.aligned.masked.qza\",\n \"--o-tree \"+directory+\"/output/\"+name+\".rep.seqs.tree.unrooted.qza\"\n ])) \n\n # Root Tree\n os.system(' '.join([\n \"qiime phylogeny midpoint-root\",\n \"--i-tree \"+directory+\"/output/\"+name+\".rep.seqs.tree.unrooted.qza\",\n \"--o-rooted-tree \"+directory+\"/output/\"+name+\".rep.seqs.tree.final.qza\"\n ])) \n", "Step 10: Classify Seqs", "for dataset in datasets:\n name = dataset[0]\n directory = dataset[1]\n metadata = dataset[2]\n domain = dataset[3]\n\n # Classify\n if domain == 'bacteria':\n os.system(' '.join([\n \"qiime feature-classifier classify-sklearn\",\n \"--i-classifier /home/db/GreenGenes/qiime2_13.8.99_515.806_nb.classifier.qza\",\n \"--i-reads \"+directory+\"/output/\"+name+\".rep.seqs.final.qza\",\n \"--o-classification \"+directory+\"/output/\"+name+\".taxonomy.final.qza\"\n ]))\n\n if domain == 'fungi':\n os.system(' '.join([\n \"qiime feature-classifier classify-sklearn\",\n \"--i-classifier /home/db/UNITE/qiime2_unite_ver7.99_20.11.2016_classifier.qza\",\n \"--i-reads \"+directory+\"/output/\"+name+\".rep.seqs.final.qza\",\n \"--o-classification \"+directory+\"/output/\"+name+\".taxonomy.final.qza\"\n ]))\n\n # Output Summary\n os.system(' '.join([\n \"qiime metadata tabulate\",\n \"--m-input-file \"+directory+\"/output/\"+name+\".taxonomy.final.qza\",\n \"--o-visualization \"+directory+\"/output/\"+name+\".taxonomy.final.summary.qzv\"\n ])) ", "Step 11: Prepare Data for Import to Phyloseq", "## Make Function to Re-Format Taxonomy File to Contain Full Column Information \n# and factor in the certain of the taxonomic assignment\n\ndef format_taxonomy(tax_file, min_support):\n output = open(re.sub(\".tsv\",\".fixed.tsv\",tax_file), \"w\")\n output.write(\"\\t\".join([\"OTU\",\"Domain\",\"Phylum\",\"Class\",\"Order\",\"Family\",\"Genus\",\"Species\"])+\"\\n\")\n \n with open(tax_file, \"r\") as f:\n next(f) #skip header\n\n for line in f:\n line = line.strip()\n line = line.split(\"\\t\")\n\n read_id = line[0]\n tax_string = line[1]\n\n # Annotate those strings which do not meet minimum support\n if float(line[2]) < float(min_support):\n tax_string = re.sub(\"__\",\"__putative \",tax_string)\n\n # Remove All Underscore Garbage (gimmie aesthetics)\n tax_string = re.sub(\"k__|p__|c__|o__|f__|g__|s__\",\"\",tax_string) \n\n # Add in columns containing unclassified taxonomic information\n # Predicated on maximum 7 ranks (Domain -> Species)\n full_rank = tax_string.split(\";\")\n last_classified = full_rank[len(full_rank)-1]\n\n count = 1\n while last_classified == \" \":\n last_classified = full_rank[len(full_rank)-count]\n count = count + 1\n\n\n for n in range(full_rank.index(last_classified)+1, 7, 1):\n try:\n full_rank[n] = \"unclassifed \"+last_classified\n except:\n full_rank.append(\"unclassifed \"+last_classified)\n\n output.write(read_id+\"\\t\"+'\\t'.join(full_rank)+\"\\n\")\n \n return()\n\n#####################\n## Export from QIIME2\n\nfor dataset in datasets:\n name = dataset[0]\n directory = dataset[1]\n metadata = dataset[2]\n domain = dataset[3]\n\n ## Final Output Names\n fasta_file = directory+\"/output/\"+name+\".rep.seqs.final.fasta\"\n tree_file = directory+\"/output/\"+name+\".tree.final.nwk\"\n tax_file = directory+\"/output/\"+name+\".taxonomy.final.tsv\"\n count_table = directory+\"/output/\"+name+\".counts.final.biom\"\n\n # Export Classifications\n os.system(' '.join([\n \"qiime tools export\",\n directory+\"/output/\"+name+\".taxonomy.final.qza\",\n \"--output-dir \"+directory+\"/output/\"\n ]))\n \n # Reformat Classifications to meet phyloseq format\n format_taxonomy(directory+\"/output/taxonomy.tsv\", min_support)\n\n # Export SV Table\n os.system(' '.join([\n \"qiime tools export\",\n directory+\"/output/\"+name+\".table.qza\",\n \"--output-dir \"+directory+\"/output/\"\n ]))\n\n # Export SV Sequences\n os.system(' '.join([\n \"qiime tools export\",\n directory+\"/output/\"+name+\".rep.seqs.final.qza\",\n \"--output-dir \"+directory+\"/output/\"\n ]))\n \n # Export Tree\n os.system(' '.join([\n \"qiime tools export\",\n directory+\"/output/\"+name+\".rep.seqs.tree.final.qza\",\n \"--output-dir \"+directory+\"/output/\"\n ]))\n \n # Rename Exported Files\n %mv $directory/output/dna-sequences.fasta $fasta_file\n %mv $directory/output/feature-table.biom $count_table\n %mv $directory/output/taxonomy.fixed.tsv $tax_file\n \n if domain == \"bacteria\":\n %mv $directory/output/tree.nwk $tree_file\n \n", "Step 13: Get 16S rRNA Gene Copy Number (rrn)", "## This step is based on the database contructed for the software 'copyrighter'\n## The software itself lacked information about datastructure (and, the import of a biom from QIIME2 failed, likely because there are multiple versions of the biom format)\ndownloaded = \"N\"\nfor dataset in datasets:\n name = dataset[0]\n directory = dataset[1]\n domain = dataset[3]\n\n if domain == 'bacteria':\n if downloaded == \"N\":\n ## Download copyrighter database\n !git clone https://github.com/fangly/AmpliCopyrighter $directory/temp/\n\n ## There are multiple GreenGenes ID numbers for a given taxonomic string.\n ## However, the copyrighter database uses the same average rrn copy number.\n ## We will therefore just use the taxonomic strings, since QIIME2 does not output the ID numbers\n\n !sed -e '1,1075178d; 1078115d' $directory/temp/data/201210/ssu_img40_gg201210.txt > $directory/output/copyrighter.tax.strings.tsv\n\n ## Create Dictionary of rrnDB\n rrnDB = {}\n\n with open(directory+\"/output/copyrighter.tax.strings.tsv\", \"r\") as f:\n for line in f:\n line = line.strip()\n line = line.split(\"\\t\")\n\n try:\n rrnDB[line[0]] = line[1]\n\n except:\n pass\n\n downloaded = \"Y\"\n \n ## Attribute rrn to readID from taxonomy.tsv\n output = open(directory+\"/output/\"+name+\".seqID.to.rrn.final.tsv\",\"w\")\n output.write(\"Feature ID\\trrn\\n\")\n\n with open(directory+\"/output/taxonomy.tsv\", \"r\") as f:\n missing = 0\n total = 0\n next(f) # Skip Header\n\n for line in f:\n line = line.strip()\n line = line.split(\"\\t\")\n\n seqID = line[0]\n\n try:\n rrn = rrnDB[line[1]]\n\n except:\n rrn = \"NA\"\n missing = missing + 1\n\n total = total + 1\n output.write(seqID+\"\\t\"+rrn+\"\\n\")\n\n print(\"\\nPercent of OTUs Missing {:.1%}\".format(float(missing)/total))\n print(\"Don't Panic! The majority of missing OTUs could be low abundance.\")", "Step 14: Import into Phyloseq", "## Setup R-Magic for Jupyter Notebooks\nimport rpy2\n%load_ext rpy2.ipython\n\ndef fix_biom_conversion(file):\n with open(file, 'r') as fin:\n data = fin.read().splitlines(True)\n with open(file, 'w') as fout:\n fout.writelines(data[1:])\n\nimport pandas as pd\n%R library(phyloseq)\n%R library(ape)\n\n\nfor dataset in datasets:\n name = dataset[0]\n directory = dataset[1]\n metadata = dataset[2]\n domain = dataset[3]\n \n #### IMPORT DATA to R\n ## For '.tsv' files, use Pandas to create a dataframe and then pipe that to R\n ## For '.biom' files, first convert using 'biom convert' on the command-line\n ## Had problems importing the count table with pandas, opted for using read.table in R\n \n # Import Taxonomy File\n tax_file = pd.read_csv(directory+\"/output/\"+name+\".taxonomy.final.tsv\", sep=\"\\t\")\n %R -i tax_file\n %R rownames(tax_file) = tax_file$OTU\n %R tax_file$OTU <- NULL\n %R tax_file <- tax_file[sort(row.names(tax_file)),] #read names must match the count_table\n\n # Import Sample Data\n #sample_file = pd.read_csv(directory+\"/\"+metadata, sep=\"\\t\")\n sample_file = pd.read_table(directory+metadata, keep_default_na=False)\n %R -i sample_file\n %R rownames(sample_file) = sample_file$X.SampleID \n %R sample_file$X.SampleID <- NULL\n %R sample_file$LinkerPrimerSequence <- NULL ## Clean-up some other stuff\n \n # Import Count Data\n os.system(' '.join([\n \"biom convert\",\n \"-i\",\n directory+\"/output/\"+name+\".counts.final.biom\",\n \"-o\",\n directory+\"/output/\"+name+\".counts.final.tsv\",\n \"--to-tsv\"\n ]))\n \n # The biom converter adds a stupid line that messes with the table formatting\n fix_biom_conversion(directory+\"/output/\"+name+\".counts.final.tsv\")\n\n # Finally import\n count_table = pd.read_csv(directory+\"/output/\"+name+\".counts.final.tsv\", sep=\"\\t\")\n %R -i count_table\n %R rownames(count_table) = count_table$X.OTU.ID \n %R count_table$X.OTU.ID <- NULL \n %R count_table <- count_table[sort(row.names(count_table)),] #read names must match the tax_table\n \n # Convert to Phyloseq Objects\n %R p_counts = otu_table(count_table, taxa_are_rows = TRUE) \n %R p_samples = sample_data(sample_file) \n %R p_tax = tax_table(tax_file)\n %R taxa_names(p_tax) <- rownames(tax_file) # phyloseq throws out rownames\n %R colnames(p_tax) <- colnames(tax_file) # phyloseq throws out colnames\n \n # Merge Phyloseq Objects\n %R p = phyloseq(p_counts, p_tax)\n\n # Import Phylogenetic Tree\n if domain == \"bacteria\":\n tree_file = directory+\"/output/\"+name+\".tree.final.nwk\"\n %R -i tree_file \n %R p_tree <- read.tree(tree_file)\n \n # Combine All Objects into One Phyloseq\n %R p_final <- merge_phyloseq(p, p_samples, p_tree)\n \n else:\n # Combine All Objects into One Phyloseq\n %R p_final <- merge_phyloseq(p, p_samples)\n \n # Save Phyloseq Object as '.rds'\n output = directory+\"/output/p_\"+name+\".final.rds\"\n %R -i output\n %R saveRDS(p_final, file = output)\n \n # Confirm Output\n %R print(p_final)", "Step 15: Clean-up Intermediate Files and Final Outputs", "for dataset in datasets:\n directory = dataset[1]\n metadata = dataset[2]\n \n # Remove Files\n if domain == \"bacteria\":\n %rm -r $directory/output/*tree.unrooted.qza \n %rm -r $directory/output/*aligned.masked.qza \n \n %rm $directory/output/*.biom \n %rm -r $directory/temp/\n %rm $directory/output/*barcodes.fastq.gz \n %rm $directory/output/taxonomy.tsv\n %rm $directory/output/forward.fastq.gz # Just the symlink\n %rm $directory/output/reverse.fastq.gz # Just the symlink\n %rm $directory/output/copyrighter.tax.strings.tsv\n \n # Separate Final Files\n %mkdir $directory/final/ \n %mv $directory/output/*.final.rds $directory/final/\n %mv $directory/output/*.taxonomy.final.tsv $directory/final/ \n %mv $directory/output/*.counts.final.tsv $directory/final/\n %mv $directory/output/*.final.fasta $directory/final/\n %cp $directory$metadata $directory/final/\n %mv $directory/output/*.seqID.to.rrn.final.tsv $directory/final/ \n %mv $directory/output/*.nwk $directory/final/ \n \n # Gzip and Move Intermediate Files\n !pigz -p 10 $directory/output/*.qza\n !pigz -p 10 $directory/output/*.qzv\n \n %mv $directory/output/ $directory/intermediate_files\n\nprint(\"Your sequences have been successfully saved to 'final' and 'intermediate_files'\")" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
maxis42/ML-DA-Coursera-Yandex-MIPT
4 Stats for data analysis/Lectures notebooks/14 regression/stat.regression.ipynb
mit
[ "Линейная регрессия", "import statsmodels\nimport scipy as sc\nimport numpy as np\nimport pandas as pd\nimport statsmodels.formula.api as smf\nimport statsmodels.stats.api as sms\nfrom statsmodels.graphics.regressionplots import plot_leverage_resid2\nimport matplotlib.pyplot as plt\n\n%pylab inline", "Постановка\nПо 1260 опрошенным имеются следующие данные:\n\nзаработная плата за час работы, $;\nопыт работы, лет;\nобразование, лет;\nвнешняя привлекательность, в баллах от 1 до 5;\nбинарные признаки: пол, семейное положение, состояние здоровья (хорошее/плохое), членство в профсоюзе, цвет кожи (белый/чёрный), занятость в сфере обслуживания (да/нет).\n\nТребуется оценить влияние внешней привлекательности на уровень заработка с учётом всех остальных факторов.\nHamermesh D.S., Biddle J.E. (1994) Beauty and the Labor Market, American Economic Review, 84, 1174–1194.\nДанные:", "raw = pd.read_csv(\"beauty.csv\", sep=\";\", index_col=False) \nraw.head()", "Посмотрим на матрицу диаграмм рассеяния по количественным признакам:", "pd.tools.plotting.scatter_matrix(raw[['wage', 'exper', 'educ', 'looks']], alpha=0.2, \n figsize=(15, 15), diagonal='hist')\npylab.show()", "Оценим сбалансированность выборки по категориальным признакам:", "print raw.union.value_counts()\nprint raw.goodhlth.value_counts()\nprint raw.black.value_counts()\nprint raw.female.value_counts()\nprint raw.married.value_counts()\nprint raw.service.value_counts()", "У каждого признака все значения встречаются достаточно много раз, так что всё в порядке.\nПредобработка", "data = raw", "Посмотрим на распределение целевого признака — уровня заработной платы:", "plt.figure(figsize(16,7))\nplt.subplot(121)\ndata['wage'].plot.hist()\nplt.xlabel('Wage', fontsize=14)\n\nplt.subplot(122)\nnp.log(data['wage']).plot.hist()\nplt.xlabel('Log wage', fontsize=14)\npylab.show()", "Один человек в выборке получает 77.72\\$ в час, остальные — меньше 45\\$; удалим этого человека, чтобы регрессия на него не перенастроилась.", "data = data[data['wage'] < 77]", "Посмотрим на распределение оценок привлекательности:", "plt.figure(figsize(8,7))\ndata.groupby('looks')['looks'].agg(lambda x: len(x)).plot(kind='bar', width=0.9)\nplt.xticks(rotation=0)\nplt.xlabel('Looks', fontsize=14)\npylab.show()", "В группах looks=1 и looks=5 слишком мало наблюдений. Превратим признак looks в категориальный и закодируем с помощью фиктивных переменных:", "data['belowavg'] = data['looks'].apply(lambda x : 1 if x < 3 else 0)\ndata['aboveavg'] = data['looks'].apply(lambda x : 1 if x > 3 else 0)\ndata.drop('looks', axis=1, inplace=True)", "Данные теперь:", "data.head()", "Построение модели\nПростейшая модель\nПостроим линейную модель по всем признакам.", "m1 = smf.ols('wage ~ exper + union + goodhlth + black + female + married +'\\\n 'service + educ + belowavg + aboveavg', \n data=data)\nfitted = m1.fit()\nprint fitted.summary()", "Посмотрим на распределение остатков:", "plt.figure(figsize(16,7))\nplt.subplot(121)\nsc.stats.probplot(fitted.resid, dist=\"norm\", plot=pylab)\nplt.subplot(122)\nnp.log(fitted.resid).plot.hist()\nplt.xlabel('Residuals', fontsize=14)\npylab.show()", "Оно скошенное, как и исходный признак. В таких ситуациях часто помогает перейти от регрессии исходного признака к регрессии его логарифма.\nЛогарифмируем отклик", "m2 = smf.ols('np.log(wage) ~ exper + union + goodhlth + black + female + married +'\\\n 'service + educ + belowavg + aboveavg', data=data)\nfitted = m2.fit()\nprint fitted.summary()\n\nplt.figure(figsize(16,7))\nplt.subplot(121)\nsc.stats.probplot(fitted.resid, dist=\"norm\", plot=pylab)\nplt.subplot(122)\nnp.log(fitted.resid).plot.hist()\nplt.xlabel('Residuals', fontsize=14)\npylab.show()", "Теперь стало лучше. Посмотрим теперь на зависимость остатков от непрерывных признаков:", "plt.figure(figsize(16,7))\nplt.subplot(121)\nscatter(data['educ'],fitted.resid)\nplt.xlabel('Education', fontsize=14)\nplt.ylabel('Residuals', fontsize=14)\nplt.subplot(122)\nscatter(data['exper'],fitted.resid)\nplt.xlabel('Experience', fontsize=14)\nplt.ylabel('Residuals', fontsize=14)\npylab.show()", "На втором графике видна квадратичная зависимость остатков от опыта работы. Попробуем добавить к признакам квадрат опыта работы, чтобы учесть этот эффект.\nДобавляем квадрат опыта работы", "m3 = smf.ols('np.log(wage) ~ exper + np.power(exper,2) + union + goodhlth + black + female +'\\\n 'married + service + educ + belowavg + aboveavg', data=data)\nfitted = m3.fit()\nprint fitted.summary()\n\nplt.figure(figsize(16,7))\nplt.subplot(121)\nsc.stats.probplot(fitted.resid, dist=\"norm\", plot=pylab)\nplt.subplot(122)\nnp.log(fitted.resid).plot.hist()\nplt.xlabel('Residuals', fontsize=14)\nplt.figure(figsize(16,5))\nplt.subplot(131)\nscatter(data['educ'],fitted.resid)\nplt.xlabel('Education', fontsize=14)\nplt.ylabel('Residuals', fontsize=14)\nplt.subplot(132)\nscatter(data['exper'],fitted.resid)\nplt.xlabel('Experience', fontsize=14)\nplt.ylabel('Residuals', fontsize=14)\nplt.subplot(133)\nscatter(data['exper']**2,fitted.resid)\nplt.xlabel('Experience^2', fontsize=14)\nplt.ylabel('Residuals', fontsize=14)\npylab.show()", "Используем критерий Бройша-Пагана для проверки гомоскедастичности ошибок:", "print 'Breusch-Pagan test: p=%f' % sms.het_breushpagan(fitted.resid, fitted.model.exog)[1]", "Ошибки гетероскедастичны, значит, значимость признаков может определяться неверно. Сделаем поправку Уайта:", "m4 = smf.ols('np.log(wage) ~ exper + np.power(exper,2) + union + goodhlth + black + female +'\\\n 'married + service + educ + belowavg + aboveavg', data=data)\nfitted = m4.fit(cov_type='HC1')\nprint fitted.summary()\n\nplt.figure(figsize(16,7))\nplt.subplot(121)\nsc.stats.probplot(fitted.resid, dist=\"norm\", plot=pylab)\nplt.subplot(122)\nnp.log(fitted.resid).plot.hist()\nplt.xlabel('Residuals', fontsize=14)\npylab.show()", "Удаляем незначимые признаки\nВ предыдущей модели незначимы: цвет кожи, здоровье, семейное положение. Удалим их. Индикатор привлекательности выше среднего тоже незначим, но удалять его не будем, потому что это одна из переменных, по которым на нужно в конце ответить на вопрос.", "m5 = smf.ols('np.log(wage) ~ exper + np.power(exper,2) + union + female + service + educ +'\\\n 'belowavg + aboveavg', data=data)\nfitted = m5.fit(cov_type='HC1')\nprint fitted.summary()\n\nplt.figure(figsize(16,7))\nplt.subplot(121)\nsc.stats.probplot(fitted.resid, dist=\"norm\", plot=pylab)\nplt.subplot(122)\nnp.log(fitted.resid).plot.hist()\nplt.xlabel('Residuals', fontsize=14)\npylab.show()", "Посмотрим, не стала ли модель от удаления трёх признаков значимо хуже, с помощью критерия Фишера:", "print \"F=%f, p=%f, k1=%f\" % m4.fit().compare_f_test(m5.fit())", "Не стала.\nПроверим, нет ли наблюдений, которые слишком сильно влияют на регрессионное уравнение:", "plt.figure(figsize(8,7))\nplot_leverage_resid2(fitted)\npylab.show()\n\ndata.loc[[1122]]\n\ndata.loc[[269]]", "Выводы\nИтоговая модель объясняет 40% вариации логарифма отклика.", "plt.figure(figsize(16,7))\nplt.subplot(121)\nscatter(data['wage'],np.exp(fitted.fittedvalues))\nplt.xlabel('Wage', fontsize=14)\nplt.ylabel('Exponentiated predictions', fontsize=14)\nplt.xlim([0,50])\n\nplt.subplot(122)\nscatter(np.log(data['wage']),fitted.fittedvalues)\nplt.xlabel('Log wage', fontsize=14)\nplt.ylabel('Predictions', fontsize=14)\nplt.xlim([0,4])\npylab.show()", "При интересующих нас факторах привлекательности стоят коэффициенты -0.1307 (ниже среднего) и -0.0010 (выше среднего). \nПоскольку регрессия делалась на логарифм отклика, интерпретировать их можно как прирост в процентах. С учётом дополнительных факторов представители генеральной совокупности, из которой взята выборка, получают в среднем:\n\nна 13% меньше, если их привлекательность ниже среднего (p=0.001, 95% доверительный интервал — [5,21]%);\nстолько же, если их привлекательность выше среднего (p=0.972, 95% доверительный интервал — [-6,6]%)." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
tpin3694/tpin3694.github.io
machine-learning/accuracy.ipynb
mit
[ "Title: Accuracy\nSlug: accuracy\nSummary: How to evaluate a Python machine learning using accuracy. \nDate: 2017-09-15 12:00\nCategory: Machine Learning\nTags: Model Evaluation\nAuthors: Chris Albon\n<a alt=\"Accuracy\" href=\"https://machinelearningflashcards.com\">\n <img src=\"accuracy/Accuracy_print.png\" class=\"flashcard center-block\">\n</a>\nPreliminaries", "# Load libraries\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.datasets import make_classification", "Generate Features And Target Data", "# Generate features matrix and target vector\nX, y = make_classification(n_samples = 10000,\n n_features = 3,\n n_informative = 3,\n n_redundant = 0,\n n_classes = 2,\n random_state = 1)", "Create Logistic Regression", "# Create logistic regression\nlogit = LogisticRegression()", "Cross-Validate Model Using Accuracy", "# Cross-validate model using accuracy\ncross_val_score(logit, X, y, scoring=\"accuracy\")" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
AEW2015/PYNQ_PR_Overlay
Pynq-Z1/notebooks/examples/pmod_dac_adc.ipynb
bsd-3-clause
[ "DAC-ADC Pmod Examples using Matplotlib and Widget\n\nContents\nPmod DAC-ADC Feedback\nTracking the IO Error\nError plot with Matplotlib\nXKCD Plot\nWidget controlled plot\n\nPmod DAC-ADC Feedback\nThis example shows how to use the PmodDA4 DAC and the PmodAD2 ADC on the PYNQ-Z1 board, using the baord's two Pmod interfaces. The notebook then compares the DAC output to the ADC input and tracks the errors.\nThe errors are plotted using Matplotlib and an XKCD version of the plot is produced (for fun). Finally a slider widget is introduced to control the number of samples diaplayed in the error plot.\nNote: The output of the DAC (pin A) must be connected with a wire to the input of the ADC (V1 input).\n1. Import hardware libraries and classes", "from pynq import Overlay\nfrom pynq.iop import Pmod_ADC, Pmod_DAC", "2. Program the ZYNQ PL", "ol = Overlay('base.bit')\nol.download()", "3. Instantiate the Pmod peripherals as Python objects", "adc = Pmod_ADC(1)\ndac = Pmod_DAC(2)", "4. Write to DAC, read from ADC, print result", "dac.write(0.35)\nsample = adc.read()\nprint(sample)", "Contents\n\nTracking the IO Error\nReport DAC-ADC Pmod Loopback Measurement Error.", "from math import ceil\nfrom time import sleep\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom pynq import Overlay\nfrom pynq.iop import Pmod_ADC, Pmod_DAC\n\nol = Overlay('base.bit')\nol.download()\n\nadc = Pmod_ADC(1)\ndac = Pmod_DAC(2)\n\ndelay = 0.0\nvalues = np.linspace(0, 2, 20)\nsamples = []\nfor value in values:\n dac.write(value)\n sleep(delay)\n sample = adc.read()\n samples.append(sample[0])\n print('Value written: {:4.2f}\\tSample read: {:4.2f}\\tError: {:+4.4f}'.\n format(value, sample[0], sample[0]-value))", "Error plot with Matplotlib\nThis example shows plots in notebook (rather than in separate window).", "%matplotlib inline\n \nX = np.arange(len(values))\nplt.bar(X + 0.0, values, facecolor='blue', \n edgecolor='white', width=0.5, label=\"Written_to_DAC\")\nplt.bar(X + 0.25, samples, facecolor='red', \n edgecolor='white', width=0.5, label=\"Read_from_ADC\")\n\nplt.title('DAC-ADC Linearity')\nplt.xlabel('Sample_number')\nplt.ylabel('Volts')\nplt.legend(loc='upper left', frameon=False)\n\nplt.show()", "Contents\n\nXKCD Plot\nSame data plotted in XKCD format ...\n(http://xkcd.com)", "%matplotlib inline\n \n# xkcd comic book style plots\nwith plt.xkcd():\n X = np.arange(len(values))\n plt.bar(X + 0.0, values, facecolor='blue', \n edgecolor='white', width=0.5, label=\"Written_to_DAC\")\n plt.bar(X + 0.25, samples, facecolor='red', \n edgecolor='white', width=0.5, label=\"Read_from_ADC\")\n\n plt.title('DAC-ADC Linearity')\n plt.xlabel('Sample_number')\n plt.ylabel('Volts')\n plt.legend(loc='upper left', frameon=False)\n\nplt.show()", "Contents\n\nWidget controlled plot\nIn this example, we extend the IO plot with a slider widget to control the number of samples appearing in the output plot.\nWe use the ipwidgets library and the simple interact() method to launch a slider bar.\n\nThe interact function (ipywidgets.interact) automatically creates user interface (UI) controls for exploring code and data interactively. It is the easiest way to get started using IPython’s widgets.\n\nFor more details see Using ipwidgets interact()", "from math import ceil\nfrom time import sleep\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nfrom ipywidgets import interact\nimport ipywidgets as widgets\n\nfrom pynq import Overlay\nfrom pynq.iop import Pmod_ADC, Pmod_DAC\n\nol = Overlay('base.bit')\nol.download()\n\ndac = Pmod_DAC(2)\nadc = Pmod_ADC(1)\n\ndef capture_samples(nmbr_of_samples): \n # Write to DAC, read from ADC, write to OLED\n delay = 0.0\n values = np.linspace(0, 2, nmbr_of_samples)\n samples = []\n for value in values:\n dac.write(value)\n sleep(delay)\n sample = adc.read()\n samples.append(sample[0])\n\n X = np.arange(nmbr_of_samples)\n plt.bar(X + 0.0, values[:nmbr_of_samples+1], \n facecolor='blue', edgecolor='white', \n width=0.5, label=\"Written_to_DAC\")\n plt.bar(X + 0.25, samples[:nmbr_of_samples+1], \n facecolor='red', edgecolor='white', \n width=0.5, label=\"Read_from_ADC\")\n\n plt.title('DAC-ADC Linearity')\n plt.xlabel('Sample_number')\n plt.ylabel('Volts')\n plt.legend(loc='upper left', frameon=False)\n \ninteract(capture_samples, \n nmbr_of_samples=widgets.IntSlider(\n min=5, max=30, step=5,\n value=10, continuous_update=False));\n\nplt.show()", "Contents" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
csaladenes/csaladenes.github.io
present/mcc2/PythonDataScienceHandbook/05.09-Principal-Component-Analysis.ipynb
mit
[ "<!--BOOK_INFORMATION-->\n<img align=\"left\" style=\"padding-right:10px;\" src=\"figures/PDSH-cover-small.png\">\nThis notebook contains an excerpt from the Python Data Science Handbook by Jake VanderPlas; the content is available on GitHub.\nThe text is released under the CC-BY-NC-ND license, and code is released under the MIT license. If you find this content useful, please consider supporting the work by buying the book!\n<!--NAVIGATION-->\n< In-Depth: Decision Trees and Random Forests | Contents | In-Depth: Manifold Learning >\n<a href=\"https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.09-Principal-Component-Analysis.ipynb\"><img align=\"left\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\" title=\"Open and Execute in Google Colaboratory\"></a>\nIn Depth: Principal Component Analysis\nUp until now, we have been looking in depth at supervised learning estimators: those estimators that predict labels based on labeled training data.\nHere we begin looking at several unsupervised estimators, which can highlight interesting aspects of the data without reference to any known labels.\nIn this section, we explore what is perhaps one of the most broadly used of unsupervised algorithms, principal component analysis (PCA).\nPCA is fundamentally a dimensionality reduction algorithm, but it can also be useful as a tool for visualization, for noise filtering, for feature extraction and engineering, and much more.\nAfter a brief conceptual discussion of the PCA algorithm, we will see a couple examples of these further applications.\nWe begin with the standard imports:", "%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()", "Introducing Principal Component Analysis\nPrincipal component analysis is a fast and flexible unsupervised method for dimensionality reduction in data, which we saw briefly in Introducing Scikit-Learn.\nIts behavior is easiest to visualize by looking at a two-dimensional dataset.\nConsider the following 200 points:", "rng = np.random.RandomState(1)\nX = np.dot(rng.rand(2, 2), rng.randn(2, 200)).T\nplt.scatter(X[:, 0], X[:, 1])\nplt.axis('equal');", "By eye, it is clear that there is a nearly linear relationship between the x and y variables.\nThis is reminiscent of the linear regression data we explored in In Depth: Linear Regression, but the problem setting here is slightly different: rather than attempting to predict the y values from the x values, the unsupervised learning problem attempts to learn about the relationship between the x and y values.\nIn principal component analysis, this relationship is quantified by finding a list of the principal axes in the data, and using those axes to describe the dataset.\nUsing Scikit-Learn's PCA estimator, we can compute this as follows:", "from sklearn.decomposition import PCA\npca = PCA(n_components=2)\npca.fit(X)", "The fit learns some quantities from the data, most importantly the \"components\" and \"explained variance\":", "print(pca.components_)\n\nprint(pca.explained_variance_)", "To see what these numbers mean, let's visualize them as vectors over the input data, using the \"components\" to define the direction of the vector, and the \"explained variance\" to define the squared-length of the vector:", "def draw_vector(v0, v1, ax=None):\n ax = ax or plt.gca()\n arrowprops=dict(arrowstyle='->',\n linewidth=2,\n shrinkA=0, shrinkB=0)\n ax.annotate('', v1, v0, arrowprops=arrowprops)\n\n# plot data\nplt.scatter(X[:, 0], X[:, 1], alpha=0.2)\nfor length, vector in zip(pca.explained_variance_, pca.components_):\n v = vector * 3 * np.sqrt(length)\n draw_vector(pca.mean_, pca.mean_ + v)\nplt.axis('equal');", "These vectors represent the principal axes of the data, and the length of the vector is an indication of how \"important\" that axis is in describing the distribution of the data—more precisely, it is a measure of the variance of the data when projected onto that axis.\nThe projection of each data point onto the principal axes are the \"principal components\" of the data.\nIf we plot these principal components beside the original data, we see the plots shown here:\n\nfigure source in Appendix\nThis transformation from data axes to principal axes is an affine transformation, which basically means it is composed of a translation, rotation, and uniform scaling.\nWhile this algorithm to find principal components may seem like just a mathematical curiosity, it turns out to have very far-reaching applications in the world of machine learning and data exploration.\nPCA as dimensionality reduction\nUsing PCA for dimensionality reduction involves zeroing out one or more of the smallest principal components, resulting in a lower-dimensional projection of the data that preserves the maximal data variance.\nHere is an example of using PCA as a dimensionality reduction transform:", "pca = PCA(n_components=1)\npca.fit(X)\nX_pca = pca.transform(X)\nprint(\"original shape: \", X.shape)\nprint(\"transformed shape:\", X_pca.shape)", "The transformed data has been reduced to a single dimension.\nTo understand the effect of this dimensionality reduction, we can perform the inverse transform of this reduced data and plot it along with the original data:", "X_new = pca.inverse_transform(X_pca)\nplt.scatter(X[:, 0], X[:, 1], alpha=0.2)\nplt.scatter(X_new[:, 0], X_new[:, 1], alpha=0.8)\nplt.axis('equal');", "The light points are the original data, while the dark points are the projected version.\nThis makes clear what a PCA dimensionality reduction means: the information along the least important principal axis or axes is removed, leaving only the component(s) of the data with the highest variance.\nThe fraction of variance that is cut out (proportional to the spread of points about the line formed in this figure) is roughly a measure of how much \"information\" is discarded in this reduction of dimensionality.\nThis reduced-dimension dataset is in some senses \"good enough\" to encode the most important relationships between the points: despite reducing the dimension of the data by 50%, the overall relationship between the data points are mostly preserved.\nPCA for visualization: Hand-written digits\nThe usefulness of the dimensionality reduction may not be entirely apparent in only two dimensions, but becomes much more clear when looking at high-dimensional data.\nTo see this, let's take a quick look at the application of PCA to the digits data we saw in In-Depth: Decision Trees and Random Forests.\nWe start by loading the data:", "from sklearn.datasets import load_digits\ndigits = load_digits()\ndigits.data.shape", "Recall that the data consists of 8×8 pixel images, meaning that they are 64-dimensional.\nTo gain some intuition into the relationships between these points, we can use PCA to project them to a more manageable number of dimensions, say two:", "pca = PCA(2) # project from 64 to 2 dimensions\nprojected = pca.fit_transform(digits.data)\nprint(digits.data.shape)\nprint(projected.shape)\n\ndigits.target\n\ni=int(np.random.random()*1797)\nplt.imshow(digits.data[i].reshape(8,8),cmap='Blues')\ndigits.target[i]\n\ndigits.data[i].reshape(8,8)", "We can now plot the first two principal components of each point to learn about the data:", "plt.scatter(projected[:, 0], projected[:, 1],\n c=digits.target, edgecolor='none', alpha=0.5,\n cmap=plt.cm.get_cmap('Spectral', 10))\nplt.xlabel('component 1')\nplt.ylabel('component 2')\nplt.colorbar();", "Recall what these components mean: the full data is a 64-dimensional point cloud, and these points are the projection of each data point along the directions with the largest variance.\nEssentially, we have found the optimal stretch and rotation in 64-dimensional space that allows us to see the layout of the digits in two dimensions, and have done this in an unsupervised manner—that is, without reference to the labels.\nWhat do the components mean?\nWe can go a bit further here, and begin to ask what the reduced dimensions mean.\nThis meaning can be understood in terms of combinations of basis vectors.\nFor example, each image in the training set is defined by a collection of 64 pixel values, which we will call the vector $x$:\n$$\nx = [x_1, x_2, x_3 \\cdots x_{64}]\n$$\nOne way we can think about this is in terms of a pixel basis.\nThat is, to construct the image, we multiply each element of the vector by the pixel it describes, and then add the results together to build the image:\n$$\n{\\rm image}(x) = x_1 \\cdot{\\rm (pixel~1)} + x_2 \\cdot{\\rm (pixel~2)} + x_3 \\cdot{\\rm (pixel~3)} \\cdots x_{64} \\cdot{\\rm (pixel~64)}\n$$\nOne way we might imagine reducing the dimension of this data is to zero out all but a few of these basis vectors.\nFor example, if we use only the first eight pixels, we get an eight-dimensional projection of the data, but it is not very reflective of the whole image: we've thrown out nearly 90% of the pixels!\n\nfigure source in Appendix\nThe upper row of panels shows the individual pixels, and the lower row shows the cumulative contribution of these pixels to the construction of the image.\nUsing only eight of the pixel-basis components, we can only construct a small portion of the 64-pixel image.\nWere we to continue this sequence and use all 64 pixels, we would recover the original image.\nBut the pixel-wise representation is not the only choice of basis. We can also use other basis functions, which each contain some pre-defined contribution from each pixel, and write something like\n$$\nimage(x) = {\\rm mean} + x_1 \\cdot{\\rm (basis~1)} + x_2 \\cdot{\\rm (basis~2)} + x_3 \\cdot{\\rm (basis~3)} \\cdots\n$$\nPCA can be thought of as a process of choosing optimal basis functions, such that adding together just the first few of them is enough to suitably reconstruct the bulk of the elements in the dataset.\nThe principal components, which act as the low-dimensional representation of our data, are simply the coefficients that multiply each of the elements in this series.\nThis figure shows a similar depiction of reconstructing this digit using the mean plus the first eight PCA basis functions:\n\nfigure source in Appendix\nUnlike the pixel basis, the PCA basis allows us to recover the salient features of the input image with just a mean plus eight components!\nThe amount of each pixel in each component is the corollary of the orientation of the vector in our two-dimensional example.\nThis is the sense in which PCA provides a low-dimensional representation of the data: it discovers a set of basis functions that are more efficient than the native pixel-basis of the input data.\nChoosing the number of components\nA vital part of using PCA in practice is the ability to estimate how many components are needed to describe the data.\nThis can be determined by looking at the cumulative explained variance ratio as a function of the number of components:", "pca = PCA().fit(digits.data)\nplt.plot(np.cumsum(pca.explained_variance_ratio_))\nplt.xlabel('number of components')\nplt.ylabel('cumulative explained variance');", "This curve quantifies how much of the total, 64-dimensional variance is contained within the first $N$ components.\nFor example, we see that with the digits the first 10 components contain approximately 75% of the variance, while you need around 50 components to describe close to 100% of the variance.\nHere we see that our two-dimensional projection loses a lot of information (as measured by the explained variance) and that we'd need about 20 components to retain 90% of the variance. Looking at this plot for a high-dimensional dataset can help you understand the level of redundancy present in multiple observations.\nPCA as Noise Filtering\nPCA can also be used as a filtering approach for noisy data.\nThe idea is this: any components with variance much larger than the effect of the noise should be relatively unaffected by the noise.\nSo if you reconstruct the data using just the largest subset of principal components, you should be preferentially keeping the signal and throwing out the noise.\nLet's see how this looks with the digits data.\nFirst we will plot several of the input noise-free data:", "def plot_digits(data):\n fig, axes = plt.subplots(4, 10, figsize=(10, 4),\n subplot_kw={'xticks':[], 'yticks':[]},\n gridspec_kw=dict(hspace=0.1, wspace=0.1))\n for i, ax in enumerate(axes.flat):\n ax.imshow(data[i].reshape(8, 8),\n cmap='binary', interpolation='nearest',\n clim=(0, 16))\nplot_digits(digits.data)", "Now lets add some random noise to create a noisy dataset, and re-plot it:", "np.random.seed(42)\nnoisy = np.random.normal(digits.data, 4)\nplot_digits(noisy)", "It's clear by eye that the images are noisy, and contain spurious pixels.\nLet's train a PCA on the noisy data, requesting that the projection preserve 50% of the variance:", "pca = PCA(0.50).fit(noisy)\npca.n_components_", "Here 50% of the variance amounts to 12 principal components.\nNow we compute these components, and then use the inverse of the transform to reconstruct the filtered digits:", "components = pca.transform(noisy)\nfiltered = pca.inverse_transform(components)\nplot_digits(filtered)", "This signal preserving/noise filtering property makes PCA a very useful feature selection routine—for example, rather than training a classifier on very high-dimensional data, you might instead train the classifier on the lower-dimensional representation, which will automatically serve to filter out random noise in the inputs.\nExample: Eigenfaces\nEarlier we explored an example of using a PCA projection as a feature selector for facial recognition with a support vector machine (see In-Depth: Support Vector Machines).\nHere we will take a look back and explore a bit more of what went into that.\nRecall that we were using the Labeled Faces in the Wild dataset made available through Scikit-Learn:", "from sklearn.datasets import fetch_lfw_people\nfaces = fetch_lfw_people(min_faces_per_person=60)\nprint(faces.target_names)\nprint(faces.images.shape)", "Let's take a look at the principal axes that span this dataset.\nBecause this is a large dataset, we will use RandomizedPCA—it contains a randomized method to approximate the first $N$ principal components much more quickly than the standard PCA estimator, and thus is very useful for high-dimensional data (here, a dimensionality of nearly 3,000).\nWe will take a look at the first 150 components:", "# from sklearn.decomposition import RandomizedPCA\nfrom sklearn.decomposition import PCA as RandomizedPCA\npca = RandomizedPCA(150)\npca.fit(faces.data)", "In this case, it can be interesting to visualize the images associated with the first several principal components (these components are technically known as \"eigenvectors,\"\nso these types of images are often called \"eigenfaces\").\nAs you can see in this figure, they are as creepy as they sound:", "fig, axes = plt.subplots(3, 8, figsize=(9, 4),\n subplot_kw={'xticks':[], 'yticks':[]},\n gridspec_kw=dict(hspace=0.1, wspace=0.1))\nfor i, ax in enumerate(axes.flat):\n ax.imshow(pca.components_[i].reshape(62, 47), cmap='bone')", "The results are very interesting, and give us insight into how the images vary: for example, the first few eigenfaces (from the top left) seem to be associated with the angle of lighting on the face, and later principal vectors seem to be picking out certain features, such as eyes, noses, and lips.\nLet's take a look at the cumulative variance of these components to see how much of the data information the projection is preserving:", "plt.plot(np.cumsum(pca.explained_variance_ratio_))\nplt.xlabel('number of components')\nplt.ylabel('cumulative explained variance');", "We see that these 150 components account for just over 90% of the variance.\nThat would lead us to believe that using these 150 components, we would recover most of the essential characteristics of the data.\nTo make this more concrete, we can compare the input images with the images reconstructed from these 150 components:", "# Compute the components and projected faces\npca = RandomizedPCA(150).fit(faces.data)\ncomponents = pca.transform(faces.data)\nprojected = pca.inverse_transform(components)\n\n# Plot the results\nfig, ax = plt.subplots(2, 10, figsize=(10, 2.5),\n subplot_kw={'xticks':[], 'yticks':[]},\n gridspec_kw=dict(hspace=0.1, wspace=0.1))\nfor i in range(10):\n ax[0, i].imshow(faces.data[i].reshape(62, 47), cmap='binary_r')\n ax[1, i].imshow(projected[i].reshape(62, 47), cmap='binary_r')\n \nax[0, 0].set_ylabel('full-dim\\ninput')\nax[1, 0].set_ylabel('150-dim\\nreconstruction');", "The top row here shows the input images, while the bottom row shows the reconstruction of the images from just 150 of the ~3,000 initial features.\nThis visualization makes clear why the PCA feature selection used in In-Depth: Support Vector Machines was so successful: although it reduces the dimensionality of the data by nearly a factor of 20, the projected images contain enough information that we might, by eye, recognize the individuals in the image.\nWhat this means is that our classification algorithm needs to be trained on 150-dimensional data rather than 3,000-dimensional data, which depending on the particular algorithm we choose, can lead to a much more efficient classification.\nPrincipal Component Analysis Summary\nIn this section we have discussed the use of principal component analysis for dimensionality reduction, for visualization of high-dimensional data, for noise filtering, and for feature selection within high-dimensional data.\nBecause of the versatility and interpretability of PCA, it has been shown to be effective in a wide variety of contexts and disciplines.\nGiven any high-dimensional dataset, I tend to start with PCA in order to visualize the relationship between points (as we did with the digits), to understand the main variance in the data (as we did with the eigenfaces), and to understand the intrinsic dimensionality (by plotting the explained variance ratio).\nCertainly PCA is not useful for every high-dimensional dataset, but it offers a straightforward and efficient path to gaining insight into high-dimensional data.\nPCA's main weakness is that it tends to be highly affected by outliers in the data.\nFor this reason, many robust variants of PCA have been developed, many of which act to iteratively discard data points that are poorly described by the initial components.\nScikit-Learn contains a couple interesting variants on PCA, including RandomizedPCA and SparsePCA, both also in the sklearn.decomposition submodule.\nRandomizedPCA, which we saw earlier, uses a non-deterministic method to quickly approximate the first few principal components in very high-dimensional data, while SparsePCA introduces a regularization term (see In Depth: Linear Regression) that serves to enforce sparsity of the components.\nIn the following sections, we will look at other unsupervised learning methods that build on some of the ideas of PCA.\n<!--NAVIGATION-->\n< In-Depth: Decision Trees and Random Forests | Contents | In-Depth: Manifold Learning >\n<a href=\"https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.09-Principal-Component-Analysis.ipynb\"><img align=\"left\" src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\" title=\"Open and Execute in Google Colaboratory\"></a>" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
GoogleCloudPlatform/training-data-analyst
courses/machine_learning/deepdive2/recommendation_systems/solutions/content_based_preproc.ipynb
apache-2.0
[ "Create Datasets for the Content-based Filter\nThis notebook builds the data you will use for creating our content based model. You'll collect the data via a collection of SQL queries from the publicly available Kurier.at dataset in BigQuery.\nKurier.at is an Austrian newsite. The goal of these labs is to recommend an article for a visitor to the site. In this notebook, you collect the data for training, in the subsequent notebook you train the recommender model. \nThis notebook illustrates:\n* How to pull data from BigQuery table and write to local files.\n* How to make reproducible train and test splits.", "import os\nimport tensorflow as tf\nimport numpy as np\nfrom google.cloud import bigquery \n\nPROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID\nBUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME\nREGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1\n\n# do not change these\nos.environ['PROJECT'] = PROJECT\nos.environ['BUCKET'] = BUCKET\nos.environ['REGION'] = REGION\nos.environ['TFVERSION'] = '2.1'\n\n%%bash\ngcloud config set project $PROJECT\ngcloud config set compute/region $REGION", "You will use this helper function to write lists containing article ids, categories, and authors for each article in our database to local file.", "def write_list_to_disk(my_list, filename):\n with open(filename, 'w') as f:\n for item in my_list:\n line = \"%s\\n\" % item\n f.write(line)", "Pull data from BigQuery\nThe cell below creates a local text file containing all the article ids (i.e. 'content ids') in the dataset. \nHave a look at the original dataset in BigQuery. Then read through the query below and make sure you understand what it is doing.", "sql=\"\"\"\n#standardSQL\n\nSELECT \n (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS content_id \nFROM `cloud-training-demos.GA360_test.ga_sessions_sample`, \n UNNEST(hits) AS hits\nWHERE \n # only include hits on pages\n hits.type = \"PAGE\"\n AND (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL\nGROUP BY\n content_id\n \n\"\"\"\n\ncontent_ids_list = bigquery.Client().query(sql).to_dataframe()['content_id'].tolist()\nwrite_list_to_disk(content_ids_list, \"content_ids.txt\")\nprint(\"Some sample content IDs {}\".format(content_ids_list[:3]))\nprint(\"The total number of articles is {}\".format(len(content_ids_list)))", "There should be 15,634 articles in the database.\nNext, you'll create a local file which contains a list of article categories and a list of article authors.\nNote the change in the index when pulling the article category or author information. Also, you are using the first author of the article to create our author list.\nRefer back to the original dataset, use the hits.customDimensions.index field to verify the correct index.", "sql=\"\"\"\n#standardSQL\nSELECT \n (SELECT MAX(IF(index=7, value, NULL)) FROM UNNEST(hits.customDimensions)) AS category \nFROM `cloud-training-demos.GA360_test.ga_sessions_sample`, \n UNNEST(hits) AS hits\nWHERE \n # only include hits on pages\n hits.type = \"PAGE\"\n AND (SELECT MAX(IF(index=7, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL\nGROUP BY \n category\n\"\"\"\ncategories_list = bigquery.Client().query(sql).to_dataframe()['category'].tolist()\nwrite_list_to_disk(categories_list, \"categories.txt\")\nprint(categories_list)", "The categories are 'News', 'Stars & Kultur', and 'Lifestyle'.\nWhen creating the author list, you'll only use the first author information for each article.", "sql=\"\"\"\n#standardSQL\nSELECT\n REGEXP_EXTRACT((SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)), r\"^[^,]+\") AS first_author \nFROM `cloud-training-demos.GA360_test.ga_sessions_sample`, \n UNNEST(hits) AS hits\nWHERE \n # only include hits on pages\n hits.type = \"PAGE\"\n AND (SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL\nGROUP BY \n first_author\n\"\"\"\nauthors_list = bigquery.Client().query(sql).to_dataframe()['first_author'].tolist()\nwrite_list_to_disk(authors_list, \"authors.txt\")\nprint(\"Some sample authors {}\".format(authors_list[:10]))\nprint(\"The total number of authors is {}\".format(len(authors_list)))", "There should be 385 authors in the database. \nCreate train and test sets\nIn this section, you will create the train/test split of our data for training our model. You use the concatenated values for visitor id and content id to create a farm fingerprint, taking approximately 90% of the data for the training set and 10% for the test set.", "sql=\"\"\"\nWITH site_history as (\n SELECT\n fullVisitorId as visitor_id,\n (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS content_id,\n (SELECT MAX(IF(index=7, value, NULL)) FROM UNNEST(hits.customDimensions)) AS category, \n (SELECT MAX(IF(index=6, value, NULL)) FROM UNNEST(hits.customDimensions)) AS title,\n (SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)) AS author_list,\n SPLIT(RPAD((SELECT MAX(IF(index=4, value, NULL)) FROM UNNEST(hits.customDimensions)), 7), '.') as year_month_array,\n LEAD(hits.customDimensions, 1) OVER (PARTITION BY fullVisitorId ORDER BY hits.time ASC) as nextCustomDimensions\n FROM \n `cloud-training-demos.GA360_test.ga_sessions_sample`, \n UNNEST(hits) AS hits\n WHERE \n # only include hits on pages\n hits.type = \"PAGE\"\n AND\n fullVisitorId IS NOT NULL\n AND\n hits.time != 0\n AND\n hits.time IS NOT NULL\n AND\n (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL\n)\nSELECT\n visitor_id,\n content_id,\n category,\n REGEXP_REPLACE(title, r\",\", \"\") as title,\n REGEXP_EXTRACT(author_list, r\"^[^,]+\") as author,\n DATE_DIFF(DATE(CAST(year_month_array[OFFSET(0)] AS INT64), CAST(year_month_array[OFFSET(1)] AS INT64), 1), DATE(1970,1,1), MONTH) as months_since_epoch,\n (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) as next_content_id\nFROM\n site_history\nWHERE (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) IS NOT NULL\n AND ABS(MOD(FARM_FINGERPRINT(CONCAT(visitor_id, content_id)), 10)) < 9\n\"\"\"\ntraining_set_df = bigquery.Client().query(sql).to_dataframe()\ntraining_set_df.to_csv('training_set.csv', header=False, index=False, encoding='utf-8')\ntraining_set_df.head()\n\nsql=\"\"\"\nWITH site_history as (\n SELECT\n fullVisitorId as visitor_id,\n (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS content_id,\n (SELECT MAX(IF(index=7, value, NULL)) FROM UNNEST(hits.customDimensions)) AS category, \n (SELECT MAX(IF(index=6, value, NULL)) FROM UNNEST(hits.customDimensions)) AS title,\n (SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)) AS author_list,\n SPLIT(RPAD((SELECT MAX(IF(index=4, value, NULL)) FROM UNNEST(hits.customDimensions)), 7), '.') as year_month_array,\n LEAD(hits.customDimensions, 1) OVER (PARTITION BY fullVisitorId ORDER BY hits.time ASC) as nextCustomDimensions\n FROM \n `cloud-training-demos.GA360_test.ga_sessions_sample`, \n UNNEST(hits) AS hits\n WHERE \n # only include hits on pages\n hits.type = \"PAGE\"\n AND\n fullVisitorId IS NOT NULL\n AND\n hits.time != 0\n AND\n hits.time IS NOT NULL\n AND\n (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL\n)\nSELECT\n visitor_id,\n content_id,\n category,\n REGEXP_REPLACE(title, r\",\", \"\") as title,\n REGEXP_EXTRACT(author_list, r\"^[^,]+\") as author,\n DATE_DIFF(DATE(CAST(year_month_array[OFFSET(0)] AS INT64), CAST(year_month_array[OFFSET(1)] AS INT64), 1), DATE(1970,1,1), MONTH) as months_since_epoch,\n (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) as next_content_id\nFROM\n site_history\nWHERE (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) IS NOT NULL\n AND ABS(MOD(FARM_FINGERPRINT(CONCAT(visitor_id, content_id)), 10)) >= 9\n\"\"\"\ntest_set_df = bigquery.Client().query(sql).to_dataframe()\ntest_set_df.to_csv('test_set.csv', header=False, index=False, encoding='utf-8')\ntest_set_df.head()", "Let's have a look at the two csv files you just created containing the training and test set. You'll also do a line count of both files to confirm that you have achieved an approximate 90/10 train/test split.\nIn the next notebook, Content Based Filtering you will build a model to recommend an article given information about the current article being read, such as the category, title, author, and publish date.", "%%bash\nwc -l *_set.csv\n\n!head *_set.csv" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
wuafeing/Python3-Tutorial
01 data structures and algorithms/01.07 keep dict in order.ipynb
gpl-3.0
[ "Previous\n1.7 字典排序\n问题\n你想创建一个字典,并且在迭代或序列化这个字典的时候能够控制元素的顺序。\n解决方案\n为了能控制一个字典中元素的顺序,你可以使用 collections 模块中的 OrderedDict 类。 在迭代操作的时候它会保持元素被插入时的顺序,示例如下:", "from collections import OrderedDict\n\nd = OrderedDict()\nd[\"foo\"] = 1\nd[\"bar\"] = 2\nd[\"spam\"] = 3\nd[\"grok\"] = 4\n# Outputs \"foo 1\", \"bar 2\", \"spam 3\", \"grok 4\"\nfor key in d:\n print(key, d[key])", "当你想要构建一个将来需要序列化或编码成其他格式的映射的时候, OrderedDict 是非常有用的。 比如,你想精确控制以 JSON 编码后字段的顺序,你可以先使用 OrderedDict 来构建这样的数据:", "import json\njson.dumps(d)", "讨论\nOrderedDict 内部维护着一个根据键插入顺序排序的双向链表。每次当一个新的元素插入进来的时候, 它会被放到链表的尾部。对于一个已经存在的键的重复赋值不会改变键的顺序。\n需要注意的是,一个 OrderedDict 的大小是一个普通字典的两倍,因为它内部维护着另外一个链表。 所以如果你要构建一个需要大量 OrderedDict 实例的数据结构的时候(比如读取 100,000 行 CSV 数据到一个 OrderedDict 列表中去), 那么你就得仔细权衡一下是否使用 OrderedDict 带来的好处要大过额外内存消耗的影响。\nNext" ]
[ "markdown", "code", "markdown", "code", "markdown" ]
diegocavalca/Studies
phd-thesis/Benchmarking 2 - Identificação de Cargas através de Representação Visual de Séries Temporais-Copy1.ipynb
cc0-1.0
[ "Identificação de Cargas através de Representação Visual de Séries Temporais\n\nArtigo: Imaging NILM Time-series\nURL: https://link.springer.com/chapter/10.1007/978-3-030-20257-6_16\nSource-code: https://github.com/LampriniKyrk/Imaging-NILM-time-series\nEstratégia proposta: converter série-temporal em imagens, extrair características com DNN (VG16) e classificação supervisionada.\n\nCarregando ambiente e parâmetros", "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nplt.style.use('ggplot')\nplt.rc('text', usetex=False)\nfrom matplotlib.image import imsave\nimport pandas as pd\nimport pickle as cPickle\nimport os, sys\nfrom math import *\nfrom pprint import pprint\nfrom tqdm import tqdm_notebook\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom PIL import Image\nfrom glob import glob\nfrom IPython.display import display\n\nfrom tensorflow.keras.applications.vgg16 import VGG16\nfrom tensorflow.keras.preprocessing import image as keras_image\nfrom tensorflow.keras.applications.vgg16 import preprocess_input\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score\n\nREDD_RESOURCES_PATH = 'datasets/REDD'\nBENCHMARKING_RESOURCES_PATH = 'benchmarkings/Imaging-NILM-time-series/'\n\nsys.path.append(os.path.join(BENCHMARKING_RESOURCES_PATH, ''))\n\nfrom serie2QMlib import *", "Pré-processamento dos dados", "# Define sliding window\ndef window_time_series(series, n, step=1):\n # print \"in window_time_series\",series\n if step < 1.0:\n step = max(int(step * n), 1)\n return [series[i:i + n] for i in range(0, len(series) - n + 1, step)]\n\n# PAA function\ndef paa(series, now, opw):\n if now == None:\n now = len(series) / opw\n if opw == None:\n opw = ceil(len(series) / now)\n return [sum(series[i * opw: (i + 1) * opw]) / float(opw) for i in range(now)]\n\n\ndef standardize(serie):\n dev = np.sqrt(np.var(serie))\n mean = np.mean(serie)\n return [(each - mean) / dev for each in serie]\n\n# Rescale data into [0,1]\ndef rescale(serie):\n maxval = max(serie)\n minval = min(serie)\n gap = float(maxval - minval)\n return [(each - minval) / gap for each in serie]\n\n# Rescale data into [-1,1]\ndef rescaleminus(serie):\n maxval = max(serie)\n minval = min(serie)\n gap = float(maxval - minval)\n return [(each - minval) / gap * 2 - 1 for each in serie]\n\n\n# Generate quantile bins\ndef QMeq(series, Q):\n q = pd.qcut(list(set(series)), Q)\n dic = dict(zip(set(series), q.labels))\n MSM = np.zeros([Q, Q])\n label = []\n for each in series:\n label.append(dic[each])\n for i in range(0, len(label) - 1):\n MSM[label[i]][label[i + 1]] += 1\n for i in xrange(Q):\n if sum(MSM[i][:]) == 0:\n continue\n MSM[i][:] = MSM[i][:] / sum(MSM[i][:])\n return np.array(MSM), label, q.levels\n\n\n# Generate quantile bins when equal values exist in the array (slower than QMeq)\ndef QVeq(series, Q):\n q = pd.qcut(list(set(series)), Q)\n dic = dict(zip(set(series), q.labels))\n qv = np.zeros([1, Q])\n label = []\n for each in series:\n label.append(dic[each])\n for i in range(0, len(label)):\n qv[0][label[i]] += 1.0\n return np.array(qv[0][:] / sum(qv[0][:])), label\n\n\n# Generate Markov Matrix given a spesicif number of quantile bins\ndef paaMarkovMatrix(paalist, level):\n paaindex = []\n for each in paalist:\n for k in range(len(level)):\n lower = float(level[k][1:-1].split(',')[0])\n upper = float(level[k][1:-1].split(',')[-1])\n if each >= lower and each <= upper:\n paaindex.append(k)\n return paaindex\n\n\n# Generate Image (.png) files of generated images\ndef gengramImgs(image, paaimages, label, name, path):\n import operator\n index = zip(range(len(label)), label)\n index.sort(key=operator.itemgetter(1))\n count = 0\n for p, q in index:\n count += 1\n #print 'generate fig of pdfs:', p\n plt.ioff();\n fig = plt.figure();\n fig.set_size_inches((1,1))\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n plt.imshow(paaimages[p], aspect='equal');\n plt.savefig(path+\"/fig-\"+name+\".png\")\n plt.close(fig)\n if count > 30:\n break\n\n# Generate pdf files of trainsisted array in porlar coordinates\ndef genpolarpdfs(raw, label, name):\n import matplotlib.backends.backend_pdf as bpdf\n import operator\n index = zip(range(len(label)), label)\n index.sort(key=operator.itemgetter(1))\n with bpdf.PdfPages(name) as pdf:\n for p, q in index:\n #print 'generate fig of pdfs:', p\n plt.ioff();\n r = np.array(range(1, length + 1));\n r = r / 100.0;\n theta = np.arccos(np.array(rescaleminus(standardize(raw[p][1:])))) * 2;\n fig = plt.figure();\n plt.suptitle(datafile + '_' + str(label[p]));\n ax = plt.subplot(111, polar=True);\n ax.plot(theta, r, color='r', linewidth=3);\n pdf.savefig(fig)\n plt.close(fig)\n pdf.close\n\n\n# return the max value instead of mean value in PAAs\ndef maxsample(mat, s):\n retval = []\n x, y, z = mat.shape\n l = np.int(np.floor(y / float(s)))\n for each in mat:\n block = []\n for i in range(s):\n block.append([np.max(each[i * l:(i + 1) * l, j * l:(j + 1) * l]) for j in xrange(s)])\n retval.append(np.asarray(block))\n return np.asarray(retval)\n\n\n# Pickle the data and save in the pkl file\ndef pickledata(mat, label, train, name):\n #print '..pickling data:', name\n traintp = (mat[:train], label[:train])\n testtp = (mat[train:], label[train:])\n f = file('fridge/' + name + '.pkl', 'wb')\n pickletp = [traintp, testtp]\n cPickle.dump(pickletp, f, protocol=cPickle.HIGHEST_PROTOCOL)\n\n\ndef pickle3data(mat, label, train, name):\n #print '..pickling data:', name\n traintp = (mat[:train], label[:train])\n validtp = (mat[:train], label[:train])\n testtp = (mat[train:], label[train:])\n f = file(name + '.pkl', 'wb')\n pickletp = [traintp, validtp, testtp]\n cPickle.dump(pickletp, f, protocol=cPickle.HIGHEST_PROTOCOL)\n \n\n", "Parâmetros gerais dos dados utilizados na modelagem (treino e teste)", "#################################\n###Define the parameters here####\n#################################\n\ndatafiles = ['dish washer1-1'] # Data file name (TODO: alterar aqui)\ntrains = [250] # Number of training instances (because we assume training and test data are mixed in one file)\nsize = [32] # PAA size\nGAF_type = 'GADF' # GAF type: GASF, GADF\nsave_PAA = True # Save the GAF with or without dimension reduction by PAA: True, False\nrescale_type = 'Zero' # Rescale the data into [0,1] or [-1,1]: Zero, Minusone\n\ndirectory = os.path.join(BENCHMARKING_RESOURCES_PATH, 'GeneratedImages') #the directory will be created if it does not already exist. Here the images will be stored\nif not os.path.exists(directory):\n os.makedirs(directory)", "Gerando dados\nA fim de normalizar os benchmarkings, serão utilizados os dados das séries do bechmarking 1 para o processo de Extração de Características (conversão serie2image - benchmarking 2).\nExtração de Características", "def serie2image(serie, GAF_type = 'GADF', scaling = False, s = 32):\n \"\"\"\n Customized function to perform Series to Image conversion.\n \n Args:\n serie : original input data (time-serie chunk of appliance/main data - REDD - benchmarking 1)\n GAF_type : GADF / GASF (Benchmarking 2 process)\n s : Size of output paaimage originated from serie [ INFO: PAA = (32, 32) / noPAA = (50, 50) ]\n \"\"\"\n image = None\n paaimage = None\n patchimage = None\n matmatrix = None\n fullmatrix = None\n\n std_data = serie\n if scaling:\n std_data = rescale(std_data)\n paalistcos = paa(std_data, s, None)\n # paalistcos = rescale(paa(each[1:],s,None))\n \n # paalistcos = rescaleminus(paa(each[1:],s,None))\n\n ################raw###################\n datacos = np.array(std_data)\n #print(datacos)\n datasin = np.sqrt(1 - np.array(std_data) ** 2)\n #print(datasin)\n\n paalistcos = np.array(paalistcos)\n paalistsin = np.sqrt(1 - paalistcos ** 2)\n\n datacos = np.matrix(datacos)\n datasin = np.matrix(datasin)\n\n paalistcos = np.matrix(paalistcos)\n paalistsin = np.matrix(paalistsin)\n if GAF_type == 'GASF':\n paamatrix = paalistcos.T * paalistcos - paalistsin.T * paalistsin\n matrix = np.array(datacos.T * datacos - datasin.T * datasin)\n elif GAF_type == 'GADF':\n paamatrix = paalistsin.T * paalistcos - paalistcos.T * paalistsin\n matrix = np.array(datasin.T * datacos - datacos.T * datasin)\n else:\n sys.exit('Unknown GAF type!')\n \n #label = np.asarray(label)\n image = matrix\n paaimage = np.array(paamatrix)\n matmatrix = np.asarray(matmatrix)\n fullmatrix = np.asarray(fullmatrix)\n #\n # maximage = maxsample(image, s)\n # maxmatrix = np.asarray(np.asarray([each.flatten() for each in maximage]))\n \n if save_PAA == False:\n finalmatrix = matmatrix\n else:\n finalmatrix = fullmatrix\n\n # uncomment below if needed data in pickled form\n # pickledata(finalmatrix, label, train, datafilename)\n\n #gengramImgs(image, paaimage, label, directory)\n \n return image, paaimage, matmatrix, fullmatrix, finalmatrix\n\n# Reading power dataset (benchmark 1)\nBENCHMARKING1_RESOURCES_PATH = \"benchmarkings/cs446 project-electric-load-identification-using-machine-learning/\"\n\nsize_paa = 32\nsize_without_paa = 30\n\n# devices to be used in training and testing\nuse_idx = np.array([3,4,6,7,10,11,13,17,19])\n\nlabel_columns_idx = [\"APLIANCE_{}\".format(i) for i in use_idx]", "Conjunto de Treino", "print(\"Processing train dataset (Series to Images)...\")\n\n# Train...\ntrain_power_chunks = np.load( os.path.join(BENCHMARKING1_RESOURCES_PATH, 'datasets/train_power_chunks.npy') )\ntrain_labels_binary = np.load( os.path.join(BENCHMARKING1_RESOURCES_PATH, 'datasets/train_labels_binary.npy') )\n\ndata_paa_train = []\ndata_without_paa_train = []\n\n#for idx, row in tqdm_notebook(df_power_chunks.iterrows(), total = df_power_chunks.shape[0]):\nfor idx, power_chunk in tqdm_notebook(enumerate(train_power_chunks), total = train_power_chunks.shape[0]):\n\n #serie = row[attr_columns_idx].tolist() \n #print(serie)\n #labels = row[label_columns_idx].astype('int').astype('str').tolist()\n serie = power_chunk\n labels = train_labels_binary[idx, :].astype('str').tolist()\n labels_str = ''.join(labels)\n \n for g_Type in ['GASF', 'GADF']:\n\n #image, paaimage, matmatrix, fullmatrix, finalmatrix = serie2image(serie, g_Type)\n image, paaimage, _, _, _ = serie2image(serie, g_Type, scaling=True)\n \n # Persist image data files (PAA - noPAA)\n np.save(\n os.path.join( \n BENCHMARKING_RESOURCES_PATH, \n \"GeneratedMatrixImages\", \n \"{}_WITHOUTPAA_{}_train_{}.npy\".format(idx, g_Type, labels_str) \n ), \n image\n )\n # x is the array you want to save \n imsave(\n os.path.join( \n BENCHMARKING_RESOURCES_PATH, \n \"GeneratedImages\", \n \"{}_WITHOUTPAA_{}_train_{}.png\".format(idx, g_Type, labels_str) \n ), \n image\n )\n data_without_paa_train.append( list([idx, g_Type]) + list(image.flatten()) + list(labels) )\n \n np.save(\n os.path.join( \n BENCHMARKING_RESOURCES_PATH, \n \"GeneratedMatrixImages\", \n \"{}_PAA_{}_train_{}.npy\".format(idx, g_Type, labels_str) \n ), \n paaimage\n )\n imsave(\n os.path.join( \n BENCHMARKING_RESOURCES_PATH, \n \"GeneratedImages\", \n \"{}_PAA_{}_train_{}.png\".format(idx, g_Type, labels_str) \n ),\n paaimage\n )\n data_paa_train.append( list([idx, g_Type]) + list(paaimage.flatten()) + list(labels) )\n\n# VIsualizgin some results...\nplt.figure(figsize=(8,6));\n\nplt.suptitle(g_Type + ' series');\nax1 = plt.subplot(121);\nplt.title(g_Type + ' without PAA');\nplt.imshow(image);\ndivider = make_axes_locatable(ax1);\ncax = divider.append_axes(\"right\", size=\"2.5%\", pad=0.2);\nplt.colorbar(cax=cax);\n\nax2 = plt.subplot(122);\nplt.title(g_Type + ' with PAA');\nplt.imshow(paaimage);\n\nprint('Saving processed data...')\ndf_without_paa_train = pd.DataFrame(\n data = data_without_paa_train,\n columns = list([\"IDX\", \"TYPE\"]) + [\"DIMESION_{}\".format(d) for d in range(size_without_paa*size_without_paa)] + list(label_columns_idx)\n)\ndf_without_paa_train.to_csv(os.path.join( BENCHMARKING_RESOURCES_PATH, \"datasets\", \"df_without_paa_train.csv\"))\n\ndf_paa_train = pd.DataFrame(\n data = data_paa_train,\n columns = list([\"IDX\", \"TYPE\"]) + [\"DIMESION_{}\".format(d) for d in range(size_paa*size_paa)] + list(label_columns_idx)\n)\ndf_paa_train.to_csv(os.path.join( BENCHMARKING_RESOURCES_PATH, \"datasets\", \"df_paa_train.csv\"))", "Conjunto de teste", "print(\"Processing test dataset (Series to Images)...\")\n\n# Test...\ntest_power_chunks = np.load( os.path.join(BENCHMARKING1_RESOURCES_PATH, 'datasets/test_power_chunks.npy') )\ntest_labels_binary = np.load( os.path.join(BENCHMARKING1_RESOURCES_PATH, 'datasets/test_labels_binary.npy') )\n\ndata_paa_test = []\ndata_without_paa_test = []\n\n#for idx, row in tqdm_notebook(df_power_chunks.iterrows(), total = df_power_chunks.shape[0]):\nfor idx, power_chunk in tqdm_notebook(enumerate(test_power_chunks), total = test_power_chunks.shape[0]):\n\n #serie = row[attr_columns_idx].tolist() \n #print(serie)\n #labels = row[label_columns_idx].astype('int').astype('str').tolist()\n serie = power_chunk\n labels = test_labels_binary[idx, :].astype('str').tolist()\n labels_str = ''.join(labels)\n \n for g_Type in ['GASF', 'GADF']:\n\n #image, paaimage, matmatrix, fullmatrix, finalmatrix = serie2image(serie, g_Type)\n image, paaimage, _, _, _ = serie2image(serie, g_Type, scaling=True)\n \n # Persist image data files (PAA - noPAA)\n np.save(\n os.path.join( \n BENCHMARKING_RESOURCES_PATH, \n \"GeneratedMatrixImages\", \n \"{}_WITHOUTPAA_{}_test_{}.npy\".format(idx, g_Type, labels_str) \n ), \n image\n )\n # x is the array you want to save \n imsave(\n os.path.join( \n BENCHMARKING_RESOURCES_PATH, \n \"GeneratedImages\", \n \"{}_WITHOUTPAA_{}_test_{}.png\".format(idx, g_Type, labels_str) \n ), \n image\n )\n data_without_paa_test.append( list([idx, g_Type]) + list(image.flatten()) + list(labels) )\n \n np.save(\n os.path.join( \n BENCHMARKING_RESOURCES_PATH, \n \"GeneratedMatrixImages\", \n \"{}_PAA_{}_test_{}.npy\".format(idx, g_Type, labels_str) \n ), \n paaimage\n )\n imsave(\n os.path.join( \n BENCHMARKING_RESOURCES_PATH, \n \"GeneratedImages\", \n \"{}_PAA_{}_test_{}.png\".format(idx, g_Type, labels_str) \n ),\n paaimage\n )\n data_paa_test.append( list([idx, g_Type]) + list(paaimage.flatten()) + list(labels) )\n\n# VIsualizgin some results...\nplt.figure(figsize=(8,6));\n\nplt.suptitle(g_Type + ' series');\nax1 = plt.subplot(121);\nplt.title(g_Type + ' without PAA');\nplt.imshow(image);\ndivider = make_axes_locatable(ax1);\ncax = divider.append_axes(\"right\", size=\"2.5%\", pad=0.2);\nplt.colorbar(cax=cax);\n\nax2 = plt.subplot(122);\nplt.title(g_Type + ' with PAA');\nplt.imshow(paaimage);\n\nprint('Saving processed data...')\ndf_without_paa_test = pd.DataFrame(\n data = data_without_paa_test,\n columns = list([\"IDX\", \"TYPE\"]) + [\"DIMESION_{}\".format(d) for d in range(size_without_paa*size_without_paa)] + list(label_columns_idx)\n)\ndf_without_paa_test.to_csv(os.path.join( BENCHMARKING_RESOURCES_PATH, \"datasets\", \"df_without_paa_test.csv\"))\n\ndf_paa_test = pd.DataFrame(\n data = data_paa_test,\n columns = list([\"IDX\", \"TYPE\"]) + [\"DIMESION_{}\".format(d) for d in range(size_paa*size_paa)] + list(label_columns_idx)\n)\ndf_paa_test.to_csv(os.path.join( BENCHMARKING_RESOURCES_PATH, \"datasets\", \"df_paa_test.csv\"))", "Modelagem", "def metrics(test, predicted):\n ##CLASSIFICATION METRICS\n\n acc = accuracy_score(test, predicted)\n prec = precision_score(test, predicted)\n rec = recall_score(test, predicted) \n f1 = f1_score(test, predicted)\n f1m = f1_score(test, predicted, average='macro')\n \n\n # print('f1:',f1)\n # print('acc: ',acc)\n # print('recall: ',rec)\n # print('precision: ',prec)\n\n # # to copy paste print\n #print(\"{:.4}\\t{:.4}\\t{:.4}\\t{:.4}\\t{:.4}\".format(acc, prec, rec, f1, f1m))\n\n # ##REGRESSION METRICS\n # mae = mean_absolute_error(test_Y,pred)\n # print('mae: ',mae)\n # E_pred = sum(pred)\n # E_ground = sum(test_Y)\n # rete = abs(E_pred-E_ground)/float(max(E_ground,E_pred))\n # print('relative error total energy: ',rete)\n return acc, prec, rec, f1, f1m\n\n\ndef plot_predicted_and_ground_truth(test, predicted):\n #import matplotlib.pyplot as plt\n plt.plot(predicted.flatten(), label = 'pred')\n plt.plot(test.flatten(), label= 'Y')\n plt.show()\n return\n\ndef embedding_images(images, model):\n \n # Feature extraction process with VGG16\n vgg16_feature_list = [] # Attributes array (vgg16 embedding)\n y = [] # Extract labels from name of image path[]\n\n for path in tqdm_notebook(images):\n\n img = keras_image.load_img(path, target_size=(100, 100))\n x = keras_image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n # \"Extracting\" features...\n vgg16_feature = vgg16_model.predict(x)\n vgg16_feature_np = np.array(vgg16_feature)\n vgg16_feature_list.append(vgg16_feature_np.flatten())\n\n # Image (chuncked serie) \n file_name = path.split(\"\\\\\")[-1].split(\".\")[0]\n image_labels = [int(l) for l in list(file_name.split(\"_\")[-1])]\n y.append(image_labels)\n\n X = np.array(vgg16_feature_list)\n \n return X, y", "Benchmarking (replicando estudo)", "# Building dnn model (feature extraction)\nvgg16_model = VGG16(\n include_top=False, \n weights='imagenet', \n input_tensor=None, \n input_shape=(100, 100, 3), \n pooling='avg',\n classes=1000\n)", "Embedding das imagens de Treino", "# GAFD Images with PAA (Train)\nimages = sorted(glob( \n os.path.join(\n BENCHMARKING_RESOURCES_PATH, \n \"GeneratedImages\",\n \"*_PAA_GADF_train_*.png\"\n ) \n))\nX_train, y_train = embedding_images(images, vgg16_model)\n\n# Data persistence\nnp.save( os.path.join(BENCHMARKING_RESOURCES_PATH, 'datasets/X_train.npy'), X_train)\nnp.save( os.path.join(BENCHMARKING_RESOURCES_PATH, 'datasets/y_train.npy'), y_train)", "Embedding das imagens de Teste", "# GAFD Images with PAA (Train)\nimages = sorted(glob( \n os.path.join(\n BENCHMARKING_RESOURCES_PATH, \n \"GeneratedImages\",\n \"*_PAA_GADF_test_*.png\"\n ) \n))\nX_test, y_test = embedding_images(images, vgg16_model)\n\n# Data persistence\nnp.save( os.path.join(BENCHMARKING_RESOURCES_PATH, 'datasets/X_test.npy'), X_test)\nnp.save( os.path.join(BENCHMARKING_RESOURCES_PATH, 'datasets/y_test.npy'), y_test)", "Treinando Classificador Supervisionado", "# Training supervised classifier\nclf = DecisionTreeClassifier(max_depth=15)\n\n# Train classifier\nclf.fit(X_train, y_train)\n\n# Save classifier for future use\n#joblib.dump(clf, 'Tree'+'-'+device+'-redd-all.joblib')", "Avaliando Classificador", "# Predict test data\ny_pred = clf.predict(X_test)\n\n# Print metrics\nfinal_performance = []\ny_test = np.array(y_test)\ny_pred = np.array(y_pred)\n\nprint(\"\")\nprint(\"RESULT ANALYSIS\\n\\n\")\nprint(\"ON/OFF State Charts\")\nprint(\"-\" * 115)\nfor i in range(y_test.shape[1]):\n \n fig = plt.figure(figsize=(15, 2))\n plt.title(\"Appliance #{}\".format( label_columns_idx[i]))\n plt.plot(y_test[:, i].flatten(), label = \"True Y\")\n plt.plot( y_pred[:, i].flatten(), label = \"Predicted Y\")\n plt.xlabel('Sample')\n plt.xticks(range(0, y_test.shape[0], 50))\n plt.xlim(0, y_test.shape[0])\n plt.ylabel('Status')\n plt.yticks([0, 1])\n plt.ylim(0,1)\n plt.legend()\n plt.show()\n \n acc, prec, rec, f1, f1m = metrics(y_test[:, i], y_pred[:, i])\n final_performance.append([\n label_columns_idx[i], \n round(acc*100, 2), \n round(prec*100, 2), \n round(rec*100, 2), \n round(f1*100, 2), \n round(f1m*100, 2)\n ])\n\nprint(\"-\" * 115)\nprint(\"\")\nprint(\"FINAL PERFORMANCE BY APPLIANCE (LABEL):\")\ndf_metrics = pd.DataFrame(\n data = final_performance,\n columns = [\"Appliance\", \"Accuracy\", \"Precision\", \"Recall\", \"F1-score\", \"F1-macro\"]\n)\ndisplay(df_metrics)\n\nprint(\"\")\nprint(\"OVERALL AVERAGE PERFORMANCE:\")\nfinal_performance = np.mean(np.array(final_performance)[:, 1:].astype(float), axis = 0)\ndisplay(pd.DataFrame(\n data = {\n \"Metric\": [\"Accuracy\", \"Precision\", \"Recall\", \"F1-score\", \"F1-macro\"],\n \"Result (%)\": [round(p, 2) for p in final_performance]\n }\n))\n# print(\"-----------------\")\n# print(\"Accuracy : {0:.2f}%\".format( final_performance[0] ))\n# print(\"Precision : {0:.2f}%\".format( final_performance[1] ))\n# print(\"Recall : {0:.2f}%\".format( final_performance[2] ))\n# print(\"F1-score : {0:.2f}%\".format( final_performance[3] ))\n# print(\"F1-macro : {0:.2f}%\".format( final_performance[4] ))\n# print(\"-----------------\")", "Conclusões\nAssim como no benchmarking 1, foi possível reproduzir a abordagem proposta no trabalho Imaging NILM Time-Series. Todavia, como esperado, alguns dados utilizados no estudo não foram disponibilizados, estando acessível apenas o código-fonte da abordagem, o que tomamos como base para realizar esse experimento. Com os códigos em mãos, implementei as mesmas rotinas dentro de um pipeline similar ao benchmarking 1, a fim de que o processos de Geração de dados e Extração de Características fosse <u>exatamente</u> os mesmos propostos nos trabalhos originais.\nA partir desta perspectiva, visando inclusive potencializar um estudo comparativo entre Benchmarkinks e a Proposta do doutorado, utilizei os mesmos dados do Benchmarking anterior para a Geração de Imagens GAF e Extração de características com a rede VGG16 (abordagem proposta pelos autores). Por fim, a partir do pipeline implementado, foi treinado um classificador baseado em Árvore de Decisão (multilabel) e avaliado a performance do mesmo, considerando métricas tradicionais de abordagens supervisionadas de ML.\nPortanto, a partir deste ponto, temos implementado e avaliado duas abordagens de referências, os quais permitiram exportar os dados, realizar o pré-processamento dos mesmos e extrair as características - cada um dentro de sua estratégia - para os dados da base REDD, permitindo estabelecer um cenário razoável de comparação para a classificação de séries temporais no contexto de NILM. Como próximos passos, está a definição dos cenários de testes (janela temporal, SEED, algoritmos de classificação, métricas de desempenho, etc.) e o consequente desenvolvimento da abordagem baseada em Gráfico de Recorrência para o problema de TSC, considerando os dados da residência 1 da base REDD." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
tensorflow/text
docs/tutorials/text_similarity.ipynb
apache-2.0
[ "Copyright 2020 The TensorFlow Authors.", "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "TF.Text Metrics\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/text/tutorials/text_similarity\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/text/blob/master/docs/tutorials/text_similarity.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/text/blob/master/docs/tutorials/text_similarity.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/text/docs/tutorials/text_similarity.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>\n\nOverview\nTensorFlow Text provides a collection of text-metrics-related classes and ops ready to use with TensorFlow 2.0. The library contains implementations of text-similarity metrics such as ROUGE-L, required for automatic evaluation of text generation models.\nThe benefit of using these ops in evaluating your models is that they are compatible with TPU evaluation and work nicely with TF streaming metric APIs.\nSetup", "!pip install -q \"tensorflow-text==2.8.*\"\n\nimport tensorflow as tf\nimport tensorflow_text as text", "ROUGE-L\nThe Rouge-L metric is a score from 0 to 1 indicating how similar two sequences are, based on the length of the longest common subsequence (LCS). In particular, Rouge-L is the weighted harmonic mean (or f-measure) combining the LCS precision (the percentage of the hypothesis sequence covered by the LCS) and the LCS recall (the percentage of the reference sequence covered by the LCS).\nSource: https://www.microsoft.com/en-us/research/publication/rouge-a-package-for-automatic-evaluation-of-summaries/\nThe TF.Text implementation returns the F-measure, Precision, and Recall for each (hypothesis, reference) pair.\nConsider the following hypothesis/reference pair:", "hypotheses = tf.ragged.constant([['captain', 'of', 'the', 'delta', 'flight'],\n ['the', '1990', 'transcript']])\nreferences = tf.ragged.constant([['delta', 'air', 'lines', 'flight'],\n ['this', 'concludes', 'the', 'transcript']])", "The hypotheses and references are expected to be tf.RaggedTensors of tokens. Tokens are required instead of raw sentences because no single tokenization strategy fits all tasks.\nNow we can call text.metrics.rouge_l and get our result back:", "result = text.metrics.rouge_l(hypotheses, references)\nprint('F-Measure: %s' % result.f_measure)\nprint('P-Measure: %s' % result.p_measure)\nprint('R-Measure: %s' % result.r_measure)", "ROUGE-L has an additional hyperparameter, alpha, which determines the weight of the harmonic mean used for computing the F-Measure. Values closer to 0 treat Recall as more important and values closer to 1 treat Precision as more important. alpha defaults to .5, which corresponds to equal weight for Precision and Recall.", "# Compute ROUGE-L with alpha=0\nresult = text.metrics.rouge_l(hypotheses, references, alpha=0)\nprint('F-Measure (alpha=0): %s' % result.f_measure)\nprint('P-Measure (alpha=0): %s' % result.p_measure)\nprint('R-Measure (alpha=0): %s' % result.r_measure)\n\n# Compute ROUGE-L with alpha=1\nresult = text.metrics.rouge_l(hypotheses, references, alpha=1)\nprint('F-Measure (alpha=1): %s' % result.f_measure)\nprint('P-Measure (alpha=1): %s' % result.p_measure)\nprint('R-Measure (alpha=1): %s' % result.r_measure)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Weenkus/Machine-Learning-University-of-Washington
Regression/examples/week-2-multiple-regression-assignment-1-blank.ipynb
mit
[ "Regression Week 2: Multiple Regression (Interpretation)\nThe goal of this first notebook is to explore multiple regression and feature engineering with existing graphlab functions.\nIn this notebook you will use data on house sales in King County to predict prices using multiple regression. You will:\n* Use SFrames to do some feature engineering\n* Use built-in graphlab functions to compute the regression weights (coefficients/parameters)\n* Given the regression weights, predictors and outcome write a function to compute the Residual Sum of Squares\n* Look at coefficients and interpret their meanings\n* Evaluate multiple models via RSS\nFire up graphlab create", "import graphlab", "Load in house sales data\nDataset is from house sales in King County, the region where the city of Seattle, WA is located.", "sales = graphlab.SFrame('kc_house_data.gl/')", "Split data into training and testing.\nWe use seed=0 so that everyone running this notebook gets the same results. In practice, you may set a random seed (or let GraphLab Create pick a random seed for you).", "train_data,test_data = sales.random_split(.8,seed=0)", "Learning a multiple regression model\nRecall we can use the following code to learn a multiple regression model predicting 'price' based on the following features:\nexample_features = ['sqft_living', 'bedrooms', 'bathrooms'] on training data with the following code:\n(Aside: We set validation_set = None to ensure that the results are always the same)", "example_features = ['sqft_living', 'bedrooms', 'bathrooms']\nexample_model = graphlab.linear_regression.create(train_data, target = 'price', features = example_features, \n validation_set = None)", "Now that we have fitted the model we can extract the regression weights (coefficients) as an SFrame as follows:", "example_weight_summary = example_model.get(\"coefficients\")\nprint example_weight_summary", "Making Predictions\nIn the gradient descent notebook we use numpy to do our regression. In this book we will use existing graphlab create functions to analyze multiple regressions. \nRecall that once a model is built we can use the .predict() function to find the predicted values for data we pass. For example using the example model above:", "example_predictions = example_model.predict(train_data)\nprint example_predictions[0] # should be 271789.505878", "Compute RSS\nNow that we can make predictions given the model, let's write a function to compute the RSS of the model. Complete the function below to calculate RSS given the model, data, and the outcome.", "def get_residual_sum_of_squares(model, data, outcome):\n # First get the predictions\n\n # Then compute the residuals/errors\n\n # Then square and add them up\n\n return(RSS) ", "Test your function by computing the RSS on TEST data for the example model:", "rss_example_train = get_residual_sum_of_squares(example_model, test_data, test_data['price'])\nprint rss_example_train # should be 2.7376153833e+14", "Create some new features\nAlthough we often think of multiple regression as including multiple different features (e.g. # of bedrooms, squarefeet, and # of bathrooms) but we can also consider transformations of existing features e.g. the log of the squarefeet or even \"interaction\" features such as the product of bedrooms and bathrooms.\nYou will use the logarithm function to create a new feature. so first you should import it from the math library.", "from math import log", "Next create the following 4 new features as column in both TEST and TRAIN data:\n* bedrooms_squared = bedrooms*bedrooms\n* bed_bath_rooms = bedrooms*bathrooms\n* log_sqft_living = log(sqft_living)\n* lat_plus_long = lat + long \nAs an example here's the first one:", "train_data['bedrooms_squared'] = train_data['bedrooms'].apply(lambda x: x**2)\ntest_data['bedrooms_squared'] = test_data['bedrooms'].apply(lambda x: x**2)\n\n# create the remaining 3 features in both TEST and TRAIN data\n\n", "Squaring bedrooms will increase the separation between not many bedrooms (e.g. 1) and lots of bedrooms (e.g. 4) since 1^2 = 1 but 4^2 = 16. Consequently this feature will mostly affect houses with many bedrooms.\nbedrooms times bathrooms gives what's called an \"interaction\" feature. It is large when both of them are large.\nTaking the log of squarefeet has the effect of bringing large values closer together and spreading out small values.\nAdding latitude to longitude is totally non-sensical but we will do it anyway (you'll see why)\n\nQuiz Question: What is the mean (arithmetic average) value of your 4 new features on TEST data? (round to 2 digits)\nLearning Multiple Models\nNow we will learn the weights for three (nested) models for predicting house prices. The first model will have the fewest features the second model will add one more feature and the third will add a few more:\n* Model 1: squarefeet, # bedrooms, # bathrooms, latitude & longitude\n* Model 2: add bedrooms*bathrooms\n* Model 3: Add log squarefeet, bedrooms squared, and the (nonsensical) latitude + longitude", "model_1_features = ['sqft_living', 'bedrooms', 'bathrooms', 'lat', 'long']\nmodel_2_features = model_1_features + ['bed_bath_rooms']\nmodel_3_features = model_2_features + ['bedrooms_squared', 'log_sqft_living', 'lat_plus_long']", "Now that you have the features, learn the weights for the three different models for predicting target = 'price' using graphlab.linear_regression.create() and look at the value of the weights/coefficients:", "# Learn the three models: (don't forget to set validation_set = None)\n\n\n# Examine/extract each model's coefficients:\n", "Quiz Question: What is the sign (positive or negative) for the coefficient/weight for 'bathrooms' in model 1?\nQuiz Question: What is the sign (positive or negative) for the coefficient/weight for 'bathrooms' in model 2?\nThink about what this means.\nComparing multiple models\nNow that you've learned three models and extracted the model weights we want to evaluate which model is best.\nFirst use your functions from earlier to compute the RSS on TRAINING Data for each of the three models.", "# Compute the RSS on TRAINING data for each of the three models and record the values:\n", "Quiz Question: Which model (1, 2 or 3) has lowest RSS on TRAINING Data? Is this what you expected?\nNow compute the RSS on on TEST data for each of the three models.", "# Compute the RSS on TESTING data for each of the three models and record the values:\n", "Quiz Question: Which model (1, 2 or 3) has lowest RSS on TESTING Data? Is this what you expected?Think about the features that were added to each model from the previous." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
cassiogreco/udacity-data-analyst-nanodegree
P1/P1_Cassio.ipynb
mit
[ "1. What is our independent variable? What is our dependent variable?\nThe independent and dependent variables of the experiment are: \n\nIndependent\n\nWord/Color congruency\n\n\nDependent\n\nTime to name ink\n\n2. What is an appropriate set of hypotheses for this task? What kind of statistical test do you expect to perform? Justify your choices.\nWe have as starting data two samples gathered from the same test (time taken to say the name of the color a given word is printed in) applied in different conditions: one for Congruent word/colors (the word and color are the same. I.e. the word \"blue\" printed in blue) and one for Incongruent word/colors (the word is a different color than the printed color. I.e. The word \"blue\" is printed in red).\nFrom the sampled data, we want to infer whether or not the time taken to say a Congruent word/color is less than the time taken to say an Incongruent word/color.\nHaving Con be the symbol of the Congruent words and Incon be the symbol of the Incongruent words, and Diff be the symbol of the difference between Con and Incon (Con - Incon), we have:\nH0 (HNULL): muCon = muIncon <=> muDiff = 0\nHa (HALTERNATIVE): muCon != muIncon <=> muDiff != 0\nHNULL hypothesis: The population mean time it takes to say the correct ink color in the Congruent condition is equal to the population mean time it takes to say the correct ink color in the Incongruent condition, based on the sample means.\nHALTERNATIVE hypothesis: The population mean time it takes to say the correct ink color in the Congruent is different than the population mean time it takes to say the correct ink color in the Incongruent condition, based on the sample means.\nI will be performing a two-tailed Dependent T-Test because:\n- The sample size is smaller than 30\n- The standard deviation of the entire population is unknown\n- I am measuring the results between the same test based on two different conditions on the same subject group.\nI will evaluate the results based on a confidence level of 99% (T-Critical value of 2.807, for 23 degrees of freedom).\nI expect to reject the HNULL hypothesis that states that the mean time it takes to say the name of the ink colors in the Congruent group will be equal to the mean time it takes to say the name of the ink colors in the Incongruent group\n3. Report some descriptive statistics regarding this dataset. Include at least one measure of central tendency and at least one measure of variability.", "import pandas as pd\nimport math\n%pylab inline\nimport matplotlib.pyplot as plt\nCONGRUENT = 'Congruent'\nINCONGRUENT = 'Incongruent'\nTCRITICAL = 2.807 # two-tailed difference with 99% Confidence and Degree of Freedom of 23\n\npath = r'~/udacity-data-analyst-nanodegree/P1/stroopdata.csv'\n\ninitialData = pd.read_csv(path)\n\ndataDifference = [initialData[CONGRUENT][i] - initialData[INCONGRUENT][i] for i in range(0, len(initialData[CONGRUENT]))]\n\ncongruentMean = mean(initialData[CONGRUENT])\nincongruentMean = mean(initialData[INCONGRUENT])\ndifferenceMean = mean(dataDifference)\n\ndef mean(data):\n return sum(data) / len(data)\n\ndef valuesMinusMean(data):\n meanOfData = mean(data)\n return [value - meanOfData for value in data]\n\ndef valuesToPower(data, power):\n return [value ** power for value in data]\n\ndef variance(data):\n return sum(data) / (len(data) - 1)\n\ndef standardDeviation(variance):\n return math.sqrt(variance)\n\nprint('Mean of Congruent values:', congruentMean)\nprint('Mean of Incongruent values:', incongruentMean)\nprint('Mean of Difference values:', differenceMean)\nprint()\nprint('Range of Congruent values:', max(initialData[CONGRUENT] - min(initialData[CONGRUENT])))\nprint('Range of Incongruent values:', max(initialData[INCONGRUENT] - min(initialData[INCONGRUENT])))\nprint('Range of Difference values:', max(dataDifference - min(dataDifference)))\nprint()\nprint('Standard Deviation of Congruent values:', standardDeviation(variance(valuesToPower(valuesMinusMean(initialData[CONGRUENT]), 2))))\nprint('Standard Deviation of Incongruent values:', standardDeviation(variance(valuesToPower(valuesMinusMean(initialData[INCONGRUENT]), 2))))\nprint('Standard Deviation of Difference values:', standardDeviation(variance(valuesToPower(valuesMinusMean(dataDifference), 2))))", "4. Provide one or two visualizations that show the distribution of the sample data. Write one or two sentences noting what you observe about the plot or plots.", "plt.hist(\n x=[initialData[CONGRUENT], initialData[INCONGRUENT]], \n normed=False, \n range=(min(initialData[CONGRUENT]), max(initialData[INCONGRUENT])),\n bins=10,\n label='Time to name'\n)\n\nplt.hist(\n x=initialData[CONGRUENT],\n normed=False, \n range=(min(initialData[CONGRUENT]), max(initialData[CONGRUENT])),\n bins=10,\n label='Time to name'\n)\n\nplt.hist(\n x=initialData[INCONGRUENT], \n normed=False, \n range=(min(initialData[INCONGRUENT]), max(initialData[INCONGRUENT])),\n bins=10,\n label='Time to name',\n color='Green'\n)\n\nplt.hist(\n x=dataDifference, \n normed=False, \n range=(min(dataDifference), max(dataDifference)),\n bins=10,\n label='Time to name',\n color='Red'\n)", "From analyzing the histograms of both the Congruent and Incongruent datasets we can visualy see that the Incongruent dataset contains a greater number of higher time-to-name values than the Congruent datasets.\nThis is evident from looking at the values of the mean values of both datasets, previously calculated (14.051125 and 22.0159166667 for Congruent and Incongruent datasets, respectively)\n5. Now, perform the statistical test and report your results. What is your confidence level and your critical statistic value? Do you reject the null hypothesis or fail to reject it? Come to a conclusion in terms of the experiment task. Did the results match up with your expectations?", "degreesOfFreedom = len(initialData[CONGRUENT]) - 1\n\ndef standardError(standardDeviation, sampleSize):\n return standardDeviation / math.sqrt(sampleSize)\n\ndef getTValue(mean, se):\n return mean / se\n\nse = standardError(standardDeviation(variance(valuesToPower(valuesMinusMean(dataDifference), 2))), len(dataDifference))\ntValue = getTValue(differenceMean, se)\n\ndef marginOfError(t, standardError):\n return t * standardError\n\ndef getConfidenceInterval(mean, t, standardError):\n return (mean - marginOfError(t, standardError), mean + marginOfError(t, standardError))\n\nprint('Degrees of Freedom:', degreesOfFreedom)\nprint('Standard Error:', se)\nprint('T Value:', tValue)\nprint('T Critical Regions: Less than', -TCRITICAL, 'and Greater than', TCRITICAL)\nprint('Is the T Value inside of the critical region?', tValue >= TCRITICAL or tValue < TCRITICAL)\nprint('Is p < 0.005?', tValue >= TCRITICAL or tValue < TCRITICAL)\nprint('Confidence Interval:', getConfidenceInterval(differenceMean, TCRITICAL, se))", "Based on the data calculated above, we have that the T Value of the difference of the two conditions (Congruent and Incongruent) is inside of the critical region of 99% Confidence.\nWith this, I reject the HNULL Hypothesis (H0). Since the T Value falls inside of the critical region, it is statistically significant to say that muCon != muIncon\n6. Optional: What do you think is responsible for the effects observed? Can you think of an alternative or similar task that would result in a similar effect? Some research about the problem will be helpful for thinking about these two questions!\nI think that the reason behind this effect is that the brain already has associated the name of the color with it's visual representation (the actual color). When we are shown the name of a color, but it is in a different color our brain can't process the two at the same time (as the logical and the creative side of our brain are each giving a different response as to what we are seeing).\nSimilar tasks that will have similar results could be a Spatial Stroop Effect (as described in the wikipedia article referenced at the bottom) where show words like Big, Small, Up, Down in different sizes and positions can also trigger this effect.\nSources\n\nMatplot documentation: http://matplotlib.org/api/pyplot_api.html\nT-Table: https://drive.google.com/file/d/0B8LCYo988pznaUs4dDE5dkJrOEk/view?usp=sharing\nMore information on the study: https://en.wikipedia.org/wiki/Stroop_effect\nWhen to use T Score vs Z Score: http://www.statisticshowto.com/when-to-use-a-t-score-vs-z-score/\nTypes of T Tests: http://support.minitab.com/en-us/minitab/17/topic-library/basic-statistics-and-graphs/hypothesis-tests/tests-of-means/types-of-t-tests/\nAbout Null and Alternative Hypothesis: http://support.minitab.com/en-us/minitab/17/topic-library/basic-statistics-and-graphs/hypothesis-tests/basics/null-and-alternative-hypotheses/\nWhat is a hypothesis test?: http://support.minitab.com/en-us/minitab/17/topic-library/basic-statistics-and-graphs/hypothesis-tests/basics/what-is-a-hypothesis-test/" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
diging/tethne-notebooks
2. Working with data from JSTOR Data-for-Research.ipynb
gpl-3.0
[ "%matplotlib inline\n\nfrom pprint import pprint\nimport matplotlib.pyplot as plt", "Introduction to Tethne: Working with data from the Web of Science\nNow that we have the basics down, in this notebook we'll begin working with data from the JSTOR Data-for-Research (DfR) portal.\nThe JSTOR DfR portal gives researchers access to\nbibliographic data and N-grams for the entire JSTOR database.\nTethne can use DfR data to generate coauthorship networks, and to improve\nmetadata for Web of Science records. Tethne is also able to use\nN-gram counts to add information to networks, and can interface with MALLET to perform LDA topic modeling.\nMethods in Digital & Computational Humanities\nThis notebook is part of a cluster of learning resources developed by the Laubichler Lab and the Digital Innovation Group at Arizona State University as part of an initiative for digital and computational humanities (d+cH). For more information, see our evolving online methods course at https://diging.atlassian.net/wiki/display/DCH.\nGetting Help\nDevelopment of the Tethne project is led by Erick Peirson. To get help, first check our issue tracking system on GitHub. There, you can search for questions and problems reported by other users, or ask a question of your own. You can also reach Erick via e-mail at [email protected].\nGetting bibliographic data from JSTOR Data-for-Research\nFor the purpose of this tutorial, you can use the sample dataset from https://www.dropbox.com/s/q2jy87pmy9r6bsa/tethne_workshop_data.zip?dl=0.\nAccess the DfR portal at http://dfr.jstor.org/ If you don't already have an\naccount, you will need to create a new account.\nAfter you've logged in, perform a search using whatever criteria you please.\nWhen you have achieved the result that you desire, create a new dataset request.\nUnder the \"Dataset Request\" menu in the upper-right corner of the page, click\n\"Submit new request\".\n\nOn the Download Options page, select your desired Data Type. If you do\nnot intend to make use of the contents of the papers themselves, then \"Citations\nOnly\" is sufficient. Otherwise, choose word counts, bigrams, etc.\nOutput Format should be set to XML.\nGive your request a title, and set the maximum number of articles. Note that\nthe maximum documents allowed per request is 1,000. Setting Maximum Articles\nto a value less than the number of search results will yield a random sample of\nyour results.\n\nYour request should now appear in your list of Data Requests. When your\nrequest is ready (hours to days later), you will receive an e-mail with a\ndownload link. When downloading from the Data Requests list, be sure to use\nthe link in the full dataset column.\n\nWhen your dataset download is complete, unzip it. The contents should look\nsomething like those shown below.\n\ncitations.XML contains bibliographic data in XML format. The bigrams,\ntrigrams, wordcounts folders contain N-gram counts for each document.\nIf you were to open one of the XML files in the wordcounts folder, say, you would see some XML that looks like this:\n```\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<article id=\"10.2307/4330482\" >\n <wordcount weight=\"21\" > of </wordcount>\n <wordcount weight=\"16\" > the </wordcount>\n <wordcount weight=\"10\" > university </wordcount>\n <wordcount weight=\"10\" > a </wordcount>\n <wordcount weight=\"9\" > s </wordcount>\n <wordcount weight=\"9\" > d </wordcount>\n <wordcount weight=\"9\" > harvard </wordcount>\n <wordcount weight=\"8\" > m </wordcount>\n <wordcount weight=\"7\" > and </wordcount>\n <wordcount weight=\"6\" > u </wordcount>\n <wordcount weight=\"6\" > press </wordcount>\n <wordcount weight=\"5\" > cambridge </wordcount>\n <wordcount weight=\"5\" > massachusetts </wordcount>\n <wordcount weight=\"5\" > journal </wordcount>\n <wordcount weight=\"4\" > by </wordcount>\n ...\n <wordcount weight=\"1\" > stephen </wordcount>\n <wordcount weight=\"1\" > liver </wordcount>\n <wordcount weight=\"1\" > committee </wordcount>\n <wordcount weight=\"1\" > school </wordcount>\n <wordcount weight=\"1\" > lewontin </wordcount>\n <wordcount weight=\"1\" > canguilhem </wordcount>\n <wordcount weight=\"1\" > assistant </wordcount>\n <wordcount weight=\"1\" > jay </wordcount>\n <wordcount weight=\"1\" > state </wordcount>\n <wordcount weight=\"1\" > morgan </wordcount>\n <wordcount weight=\"1\" > advertising </wordcount>\n <wordcount weight=\"1\" > animal </wordcount>\n <wordcount weight=\"1\" > is </wordcount>\n <wordcount weight=\"1\" > species </wordcount>\n <wordcount weight=\"1\" > claude </wordcount>\n <wordcount weight=\"1\" > review </wordcount>\n <wordcount weight=\"1\" > hunt </wordcount>\n <wordcount weight=\"1\" > founder </wordcount>\n</article>\n```\nEach word is represented by a &lt;wordcount&gt;&lt;/wordcount&gt; tag. The \"weight\" attribute gives the number of times that the word occurs in the document, and the word itself is between the tags. We'll come back to this in just a moment.\nParsing DfR datasets\nJust as for WoS data, there is a module in tethne.readers for working with DfR data. We can import it with:", "from tethne.readers import dfr", "Once again, read() accepts a string containing a path to either a single DfR dataset, or a directory containing several. Here, \"DfR dataset\" refers to the folder containing the file \"citations.xml\", and the contents of that folder.\nThis will take considerably more time than loading a WoS dataset. The reason is that Tethne automatically detects and parses all of the wordcount data.", "dfr_corpus = dfr.read('/Users/erickpeirson/Dropbox/HSS ThatCamp Workshop/sample_data/DfR')", "Combining DfR and WoS data\nWe can combine our datasets using the merge() function. First, we load our WoS data in a separate Corpus:", "from tethne.readers import wos\nwos_corpus = wos.read('/Users/erickpeirson/Dropbox/HSS ThatCamp Workshop/sample_data/wos')", "Both of these datasets are for the Journal of the History of Biology. But note that the WoS and DfR corpora have different numbers of Papers:", "len(dfr_corpus), len(wos_corpus)", "Then import merge() from tethne.readers:", "from tethne.readers import merge", "We then create a new Corpus by passing both Corpus objects to merge(). If there is conflicting information in the two corpora, the first Corpus gets priority.", "corpus = merge(dfr_corpus, wos_corpus)", "merge() has combined data where possible, and discarded any duplicates in the original datasets.", "len(corpus)", "FeatureSets\nOur wordcount data are represented by a FeatureSet. A FeatureSet is a description of how certain sets of elements are distributed across a Corpus. This is kind of like an inversion of an index. For example, we might be interested in which words (elements) are found in which Papers. We can think of authors as a FeatureSet, too.\nAll of the available FeatureSets are available in the features attribute (a dictionary) of our Corpus. We can see the available FeatureSets by inspecting its:", "corpus.features", "Note that citations and authors are also FeatureSets. In fact, the majority of network-building functions in Tethne operate on FeatureSets -- including the coauthors() and bibliographic_coupling() functions that we used in the WoS notebook.\nEach FeatureSet has several attributes. The features attribute contains the distribution data itself. These data themselves are (element, value) tuples. In this case, the elements are words, and the values are wordcounts.", "corpus.features['wordcounts'].features.items()[0] # Just show data for the first Paper.", "The index contains our \"vocabulary\":", "print 'There are %i words in the wordcounts featureset' % len(corpus.features['wordcounts'].index)", "We can use the feature_distribution() method of our Corpus to look at the distribution of words over time. In the example below I used MatPlotLib to visualize the distribution.", "plt.figure(figsize=(10, 5))\n\nplt.bar(*corpus.feature_distribution('wordcounts', 'evolutionary')) # <-- The action.\n\nplt.ylabel('Frequency of the word ``evolutionary`` in this Corpus')\nplt.xlabel('Publication Date')\nplt.show()", "If we add the argument mode='documentCounts', we get the number of documents in which 'evolutionary' occurs.", "plt.figure(figsize=(10, 5))\n\nplt.bar(*corpus.feature_distribution('wordcounts', 'evolutionary', mode='documentCounts')) # <-- The action.\n\nplt.ylabel('Documents containing ``evolutionary``')\nplt.xlabel('Publication Date')\nplt.show()", "Note that we can look how documents themselves are distributed using the distribution() method.", "plt.figure(figsize=(10, 5))\n\nplt.bar(*corpus.distribution()) # <-- The action.\n\nplt.ylabel('Number of Documents')\nplt.xlabel('Publication Date')\nplt.show()", "So, putting these together, we can normalize our feature_distribution() data to get a sense of the relative use of the word 'evolution'.", "dates, N_evolution = corpus.feature_distribution('wordcounts', 'evolutionary', mode='documentCounts')\ndates, N = corpus.distribution()\nnormalized_frequency = [f/N[i] for i, f in enumerate(N_evolution)]\nplt.figure(figsize=(10, 5))\n\nplt.bar(dates, normalized_frequency) # <-- The action.\n\nplt.ylabel('Proportion of documents containing ``evolutionary``')\nplt.xlabel('Publication Date')\nplt.show()", "Topic Modeling with DfR wordcounts\nLatent Dirichlet Allocation is a popular approach to discovering latent \"topics\" in large corpora. Many digital humanists use a software package called MALLET to fit LDA to text data. Tethne uses MALLET to fit LDA topic models.\nBefore we use LDA, however, we need to do some preprocessing. \"Preprocessing\" refers to anything that we do to filter or transform our FeatureSet prior to analysis. \nPre-processing\nTwo important preprocessing steps are:\n1. Removing \"stopwords\" -- common words like \"the\", \"and\", \"but\", \"for\", that don't yield much insight into the contents of documents.\n2. Removing words that are too common or too rare. These include typos or OCR artifacts.\n\nWe can do both of these by using the transform() method on our FeatureSet.\nFirst, we need a stoplist. NLTK provides a great stoplist.", "from nltk.corpus import stopwords\nstoplist = stopwords.words()", "We then need to define what elements to keep, and what elements to discard. We will use a function that will evaluate whether or not a word is in our stoplist. The function should take three arguments:\n\nf -- the feature itself (the word)\nv -- the number of instances of that feature in a specific document\nc -- the number of instances of that feature in the whole FeatureSet\ndc -- the number of documents that contain that feature\n\nThis function will be applied to each word in each document. If it returns 0 or None, the word will be excluded. Otherwise, it should return a numeric value (in this case, the count for that document).\nIn addition to applying the stoplist, we'll also exclude any word that occurs in more than 500 of the documents and less than 3 documents, and is less than 4 characters in length.", "def apply_stoplist(f, v, c, dc):\n if f in stoplist or dc > 500 or dc < 3 or len(f) < 4:\n return None # Discard the element.\n return v", "We apply the stoplist using the transform() method. FeatureSets are not modified in place; instead, a new FeatureSet is generated that reflects the specified changes. We'll call the new FeatureSet 'wordcounts_filtered'.", "corpus.features['wordcounts_filtered'] = corpus.features['wordcounts'].transform(apply_stoplist)", "There should be significantly fewer words in our new \"wordcounts_filtered\" FeatureSet.", "print 'There are %i words in the wordcounts featureset' % len(corpus.features['wordcounts'].index)\nprint 'There are %i words in the wordcounts_filtered featureset' % len(corpus.features['wordcounts_filtered'].index)", "The LDA topic model\nTethne provides a class called LDAModel. You should be able to import it directly from the tethne package:", "from tethne import LDAModel", "Now we'll create a new LDAModel for our Corpus. The featureset_name parameter tells the LDAModel which FeatureSet we want to use. We'll use our filtered wordcounts.", "model = LDAModel(corpus, featureset_name='wordcounts_filtered')", "Next we'll fit the model. We need to tell MALLET how many topics to fit (the hyperparameter Z), and how many iterations (max_iter) to perform. This step may take a little while, depending on the size of your corpus.", "model.fit(Z=50, max_iter=500)", "You can inspect the inferred topics using the model's print_topics() method. By default, this will print the top ten words for each topic.", "model.print_topics()", "We can also look at the representation of a topic over time using the topic_over_time() method. In the example below we'll print the first five of the topics on the same plot.", "plt.figure(figsize=(15, 5))\nfor k in xrange(5): # Generates numbers k in [0, 4].\n x, y = model.topic_over_time(k) # Gets topic number k.\n plt.plot(x, y, label='topic {0}'.format(k), lw=2, alpha=0.7)\nplt.legend(loc='best')\nplt.show() ", "Generating networks from topic models\nThe features module in the tethne.networks subpackage contains some useful methods for visualizing topic models as networks. You can import it just like the authors or papers modules.", "from tethne.networks import topics", "The terms function generates a network of words connected on the basis of shared affinity with a topic. If two words i and j are both associated with a topic z with $\\Phi(i|z) >= 0.01$ and $\\Phi(j|z) >= 0.01$, then an edge is drawn between them.", "termGraph = topics.terms(model, threshold=0.01)\n\ntermGraph.order(), termGraph.size()\n\ntermGraph.name = ''\n\nfrom tethne.writers.graph import to_graphml\nto_graphml(termGraph, '/Users/erickpeirson/Desktop/topic_terms.graphml')", "", "topicCoupling = topics.topic_coupling(model, threshold=0.2)\n\nprint '%i nodes and %i edges' % (topicCoupling.order(), topicCoupling.size())\n\nto_graphml(topicCoupling, '/Users/erickpeirson/Desktop/lda_topicCoupling.graphml')", "" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
milroy/Spark-Meetup
exercises/03_aggregation.ipynb
mit
[ "Simple Aggregation", "import numpy as np\n\ndata = np.arange(1000).reshape(100,10)\nprint data.shape", "Pandas", "import pandas as pd\n\npand_tmp = pd.DataFrame(data, \n columns=['x{0}'.format(i) for i in range(data.shape[1])])\npand_tmp.head()", "What is the row sum?", "pand_tmp.sum(axis=1)", "Column sum?", "pand_tmp.sum(axis=0)\n\npand_tmp.to_csv('numbers.csv', index=False)", "Spark", "import findspark\nimport os\nfindspark.init() # you need that before import pyspark.\n\nimport pyspark\nsc = pyspark.SparkContext('local[4]', 'pyspark')\n\nlines = sc.textFile('numbers.csv', 18)\nfor l in lines.take(3):\n print l\n\nlines.take(3)\n\ntype(lines.take(1))", "How do we skip the header? How about using find()? What is Boolean value for true with find()?", "lines = lines.filter(lambda x: x.find('x') != 0)\nfor l in lines.take(2):\n print l\n\ndata = lines.map(lambda x: x.split(','))\ndata.take(3)", "Row Sum\nCast to integer and sum!", "def row_sum(x):\n int_x = map(lambda x: int(x), x)\n return sum(int_x)\n\ndata_row_sum = data.map(row_sum)\n\nprint data_row_sum.collect()\n\nprint data_row_sum.count()", "Column Sum\nThis one's a bit trickier, and portends ill for large, complex data sets (like example 5)...\nLet's enumerate the list comprising each RDD \"line\" such that each value is indexed by the corresponding column number.", "def col_key(x):\n for i, value in enumerate(x):\n yield (i, int(value))\n\ntmp = data.flatMap(col_key)\ntmp.take(15)", "Notice how flatMap works here: the generator is returned per partition, meaning that the first element value of each tuple cycles.", "tmp.take(3)\n\ntmp = tmp.groupByKey()\nfor i in tmp.take(2):\n print i, type(i)\n\ndata_col_sum = tmp.map(lambda x: sum(x[1]))\nfor i in data_col_sum.take(2):\n print i\n\nprint data_col_sum.collect()\nprint data_col_sum.count()", "Column sum with Spark.sql.dataframe", "from pyspark.sql import SQLContext\nsqlContext = SQLContext(sc)\n\nsc\n\npyspark_df = sqlContext.createDataFrame(pand_tmp)\n\npyspark_df.take(2)", "groupBy() without arguments groups by all columns", "for i in pyspark_df.columns:\n print pyspark_df.groupBy().sum(i).collect()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
pauliacomi/pyGAPS
docs/examples/parsing.ipynb
mit
[ "Parsing examples\nSome examples on parsing to and from supported formats. More info about all\nparsing methods can be found in the manual section.\nDeclare paths\nFirst, let's do all the necessary imports and generate the paths that we'll use\nfor file import and export.", "from pathlib import Path\nimport pygaps.parsing as pgp\n\n# Get directory paths\nbase_path = Path.cwd() / 'data' / 'parsing'\n\n# Find files\naif_file_paths = list((base_path / 'aif').rglob('*.aif'))\njson_file_paths = list((base_path / 'json').rglob('*.json'))\nxl_file_paths = list((base_path / 'excel').rglob('*.xls'))\ncsv_file_paths = list((base_path / 'csv').rglob('*.csv'))", "Manufacturer import\nMany report files from various adsorption device manufacturers can be imported\ndirectly using pyGAPS. Here are some examples.", "cfld = base_path / \"commercial\"\nmicromeritics = pgp.isotherm_from_commercial(cfld / \"mic\" / \"Sample_A.xls\", 'mic', 'xl')\nbelsorp_dat = pgp.isotherm_from_commercial(cfld / \"bel\" / \"BF010_DUT-13_CH4_111K_run2.DAT\", 'bel', 'dat')\nbelsorp_xl = pgp.isotherm_from_commercial(cfld / \"bel\" / \"Sample_C.xls\", 'bel', 'xl')\nthreeP_xl = pgp.isotherm_from_commercial(cfld / \"3p\" / \"AC_ref_filter_Ar_87K_run 3_rep.xlsx\", '3p', 'xl')", "AIF Parsing\nAIF Import\nAdsorption information files are fully supported in pyGAPS, both for import and\nexports. Isotherms can be imported from an .aif as:", "# Import all\nisotherms = [pgp.isotherm_from_aif(path) for path in aif_file_paths]\n\n# Display an example file\nprint(isotherms[1])", "AIF Export\nSimilarly, an isotherm can be exported as an AIF file or a string, depending on\nwhether a path is passed. For this purpose use either the module\npygaps.isotherm_to_aif() function or the convenience class function\nto_aif().", "# module function\nfor isotherm in isotherms:\n filename = f'{isotherm.material} {isotherm.adsorbate} {isotherm.temperature}.aif'\n pgp.isotherm_to_aif(isotherm, base_path / 'aif' / filename)\n\n# save to file with convenience function\nisotherms[0].to_aif('isotherm.aif')\n\n# string\nisotherm_string = isotherms[0].to_aif()", "JSON Parsing\nJSON Import\nIsotherms can be imported either from a json file or from a json string. The\nsame function is used in both cases.", "# Import them\nisotherms = [pgp.isotherm_from_json(path) for path in json_file_paths]\n\n# Display an example file\nprint(isotherms[1])", "JSON Export\nExporting to JSON can be done to a file or a string, depending on whether a path\nis passed. For this purpose use either the module pygaps.isotherm_to_json()\nfunction or the convenience class function to_json().", "# module function\nfor isotherm in isotherms:\n filename = f'{isotherm.material} {isotherm.adsorbate} {isotherm.temperature}.json'\n pgp.isotherm_to_json(isotherm, base_path / 'json' / filename)\n\n# save to file with convenience function\nisotherms[0].to_json('isotherm.json')\n\n# string\nisotherm_string = isotherms[0].to_json()", "Excel Parsing\nExcel does not have to be installed on the system in use.\nExcel Import", "# Import them\nisotherms = [pgp.isotherm_from_xl(path) for path in xl_file_paths]\n\n# Display an example file\nprint(isotherms[1])\n\nisotherms[1].plot()", "Excel Export", "# Export each isotherm in turn\nfor isotherm in isotherms:\n filename = ' '.join([str(isotherm.material), str(isotherm.adsorbate), str(isotherm.temperature)]) + '.xls'\n pgp.isotherm_to_xl(isotherm, base_path / 'excel' / filename)\n\n# save to file with convenience function\nisotherms[0].to_xl('isotherm.xls')", "CSV Parsing\nCSV Import\nLike JSON, isotherms can be imported either from a CSV file or from a CSV string. The same function is used in both cases.", "# Import them\nisotherms = [pgp.isotherm_from_csv(path) for path in csv_file_paths]\n\n# Display an example file\nprint(isotherms[0])", "CSV Export", "# Export each isotherm in turn\nfor isotherm in isotherms:\n filename = ' '.join([str(isotherm.material), str(isotherm.adsorbate), str(isotherm.temperature)]) + '.csv'\n pgp.isotherm_to_csv(isotherm, base_path / 'csv' / filename)\n\n# save to file with convenience function\nisotherms[0].to_csv('isotherm.csv')\n\n# string representation\nisotherm_string = isotherms[0].to_csv()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
csampez/analisis-numerico-computo-cientifico
MNO/proyecto_final/MNO_2017/proyectos/equipos/equipo_6/avance_22_05_2017/code/Clase_SVD_Imagen.ipynb
apache-2.0
[ "SVD\nSVD a una imagen para validar resultados con CUDA cusolverDnDgesvd\nEquipo_6 \nIntegrantes:\n\n\nRicardo Lastra\n\n\nAdrián Vázquez\n\n\nAntecedentes:\nLa factorizacion $SVD$ es uno de los modelos de factorizaciones de matrices mas usados hoy en dia por muchas paqueterias computacionales, esta nos ayuda a hacer aproximaciones a matrices de una forma muy eficiente.\nUsando el metodo thin $SVD$ visto en clase, el cual dice que necesitamos encontrar la matriz $Vi$ ortogonal de $nxn$ y una matriz $Ui$ con columnas ortonormales de $mxn$ tales que $Ui^T A-V=B$ sea Bidiagonal.\nPosteriormente en una necesitamos multiplicar $U=U_1U_2$, $V=V_1V_2$ para obtener $A=U\\Sigma V^T$ y asi obtener valores singulares en la diagonal de $\\Sigma$.\nDesarrollo del programa:", "#SE IMPORTAN LIBRERIAS PARA GRAFICAR, PARA COMPUTO DE MATRICES Y PARA LEER IMAGENES\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\n\n#SE LEE UNA IMAGEN, SE LEE Y SE GUARDA COMO BLANCO Y NEGRO \nimg = Image.open('GTR.jpg')\n#img.rotate(45).show() #Para rotar la imagen cierto no. de grados, eje 45grados\nimggray = img.convert('LA')\nplt.figure(figsize=(6, 6))\nplt.imshow(imggray)\n\n# SE GUARDA EN UNA MATRIZ CON NUMPY Y SE GRAFICA LA IMAGEN\nimgmatriz = np.array(list(imggray.getdata(band=0)), float)\nimgmatriz.shape = (imggray.size[1], imggray.size[0])\nimgmatriz = np.matrix(imgmatriz)\nplt.figure(figsize=(6,6))\nplt.imshow(imgmatriz, cmap='gray')\n\n#VISUALIZAMOS LA IMAGEN ANTERIOR EN FORMA MATRICIAL\nprint(imgmatriz)\n\n#DIMENSION DEL ARREGLO\nimgmatriz.ndim\n\n#DIEMNSION DEL ARREGLO \"TUPLA\"\nimgmatriz.shape\n\n#NUMERO DE ENTRADAS DEL ARREGLO \nimgmatriz.size\n\n#LONGITUD DE UNA ENTRADA DEL ARREGLO EN bytes\nimgmatriz.itemsize\n\n#TOTAL DE bytes DEL ARREGLO\nimgmatriz.nbytes", "Se computa $SVD$ :", "#SE HACE LA DESCOMPOSICION DE VALORES SINGULARES\nU, sigma, Vt = np.linalg.svd(imgmatriz)", "Imprimimos resultados de las matrices $U$ $\\Sigma$ $Vt$ :", "print(\"U:\")\nprint(U)\nprint(\"sigma:\")\nprint(sigma)\nprint(\"Vt:\")\nprint(Vt)\n\n#TOTAL DE bytes DEL ARREGLO (solo sigma)\nsigma.nbytes", "Visualizamos la $/Sigma$ en una matriz diagonal:", "S = np.zeros(imgmatriz.shape, \"float\")\nS[:min(imgmatriz.shape), :min(imgmatriz.shape)] = np.diag(sigma)\nprint(S)", "Calculo y reconstrucción:\nSe calcula una aproximacion usando la primera columna de U y la primera fila de V reporduciendo la imagen, cada columna de pixeles es una ponderacion de los mismos valores originales $\\vec{u}_1 $ :", "reconstimg = np.matrix(U[:, :1]) * np.diag(sigma[:1]) * np.matrix(V[:1, :])\nplt.figure(figsize=(6,6))\nplt.imshow(reconstimg, cmap='gray');\n\n#RECONSTRUIMOS CON 8 Y 9 VECTORES\nfor i in range(8, 10):\n reconstimg = np.matrix(U[:, :i]) * np.diag(sigma[:i]) * np.matrix(V[:i, :])\n plt.imshow(reconstimg, cmap='gray')\n title = \"n = %s\" % i\n plt.title(title)\n plt.show()\n\n#RECONSTRUIMOS DE 10 EN 10 VECTORES PARA VER CUANDO SE REPRODUCE UNA IMAGEN SIMILAR A LA ORIGINAL...\nfor i in range(10,50, 10):\n reconstimg = np.matrix(U[:, :i]) * np.diag(sigma[:i]) * np.matrix(V[:i, :])\n plt.imshow(reconstimg, cmap='gray')\n title = \"n = %s\" % i\n plt.title(title)\n plt.show()", "Reconstruccion de matriz original:", "np.dot(U, np.dot(S, Vt)) #se usa Vt\n\nimgmatriz" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
zlpure/CS231n
assignment1/two_layer_net.ipynb
mit
[ "Implementing a Neural Network\nIn this exercise we will develop a neural network with fully-connected layers to perform classification, and test it out on the CIFAR-10 dataset.", "# A bit of setup\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom cs231n.classifiers.neural_net import TwoLayerNet\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# for auto-reloading external modules\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2\n\ndef rel_error(x, y):\n \"\"\" returns relative error \"\"\"\n return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))", "We will use the class TwoLayerNet in the file cs231n/classifiers/neural_net.py to represent instances of our network. The network parameters are stored in the instance variable self.params where keys are string parameter names and values are numpy arrays. Below, we initialize toy data and a toy model that we will use to develop your implementation.", "# Create a small net and some toy data to check your implementations.\n# Note that we set the random seed for repeatable experiments.\n\ninput_size = 4\nhidden_size = 10\nnum_classes = 3\nnum_inputs = 5\n\ndef init_toy_model():\n np.random.seed(0)\n return TwoLayerNet(input_size, hidden_size, num_classes, std=1e-1)\n\ndef init_toy_data():\n np.random.seed(1)\n X = 10 * np.random.randn(num_inputs, input_size)\n y = np.array([0, 1, 2, 2, 1])\n return X, y\n\nnet = init_toy_model()\nX, y = init_toy_data()", "Forward pass: compute scores\nOpen the file cs231n/classifiers/neural_net.py and look at the method TwoLayerNet.loss. This function is very similar to the loss functions you have written for the SVM and Softmax exercises: It takes the data and weights and computes the class scores, the loss, and the gradients on the parameters. \nImplement the first part of the forward pass which uses the weights and biases to compute the scores for all inputs.", "scores = net.loss(X)\nprint 'Your scores:'\nprint scores\nprint\nprint 'correct scores:'\ncorrect_scores = np.asarray([\n [-0.81233741, -1.27654624, -0.70335995],\n [-0.17129677, -1.18803311, -0.47310444],\n [-0.51590475, -1.01354314, -0.8504215 ],\n [-0.15419291, -0.48629638, -0.52901952],\n [-0.00618733, -0.12435261, -0.15226949]])\nprint correct_scores\nprint\n\n# The difference should be very small. We get < 1e-7\nprint 'Difference between your scores and correct scores:'\nprint np.sum(np.abs(scores - correct_scores))", "Forward pass: compute loss\nIn the same function, implement the second part that computes the data and regularizaion loss.", "loss, _ = net.loss(X, y, reg=0.1)\ncorrect_loss = 1.30378789133\n\n# should be very small, we get < 1e-12\nprint 'Difference between your loss and correct loss:'\nprint np.sum(np.abs(loss - correct_loss))", "Backward pass\nImplement the rest of the function. This will compute the gradient of the loss with respect to the variables W1, b1, W2, and b2. Now that you (hopefully!) have a correctly implemented forward pass, you can debug your backward pass using a numeric gradient check:", "from cs231n.gradient_check import eval_numerical_gradient\n\n# Use numeric gradient checking to check your implementation of the backward pass.\n# If your implementation is correct, the difference between the numeric and\n# analytic gradients should be less than 1e-8 for each of W1, W2, b1, and b2.\n\nloss, grads = net.loss(X, y, reg=0.1)\n\n# these should all be less than 1e-8 or so\nfor param_name in grads:\n f = lambda W: net.loss(X, y, reg=0.1)[0]\n param_grad_num = eval_numerical_gradient(f, net.params[param_name], verbose=False)\n print '%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name]))", "Train the network\nTo train the network we will use stochastic gradient descent (SGD), similar to the SVM and Softmax classifiers. Look at the function TwoLayerNet.train and fill in the missing sections to implement the training procedure. This should be very similar to the training procedure you used for the SVM and Softmax classifiers. You will also have to implement TwoLayerNet.predict, as the training process periodically performs prediction to keep track of accuracy over time while the network trains.\nOnce you have implemented the method, run the code below to train a two-layer network on toy data. You should achieve a training loss less than 0.2.", "net = init_toy_model()\nstats = net.train(X, y, X, y,\n learning_rate=1e-1, reg=1e-5,\n num_iters=100, verbose=False)\n\nprint 'Final training loss: ', stats['loss_history'][-1]\n\n# plot the loss history\nplt.plot(stats['loss_history'])\nplt.xlabel('iteration')\nplt.ylabel('training loss')\nplt.title('Training Loss history')\nplt.show()", "Load the data\nNow that you have implemented a two-layer network that passes gradient checks and works on toy data, it's time to load up our favorite CIFAR-10 data so we can use it to train a classifier on a real dataset.", "from cs231n.data_utils import load_CIFAR10\n\ndef get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):\n \"\"\"\n Load the CIFAR-10 dataset from disk and perform preprocessing to prepare\n it for the two-layer neural net classifier. These are the same steps as\n we used for the SVM, but condensed to a single function. \n \"\"\"\n # Load the raw CIFAR-10 data\n cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n \n # Subsample the data\n mask = range(num_training, num_training + num_validation)\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = range(num_training)\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = range(num_test)\n X_test = X_test[mask]\n y_test = y_test[mask]\n\n # Normalize the data: subtract the mean image\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_val -= mean_image\n X_test -= mean_image\n\n # Reshape data to rows\n X_train = X_train.reshape(num_training, -1)\n X_val = X_val.reshape(num_validation, -1)\n X_test = X_test.reshape(num_test, -1)\n\n return X_train, y_train, X_val, y_val, X_test, y_test\n\n\n# Invoke the above function to get our data.\nX_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()\nprint 'Train data shape: ', X_train.shape\nprint 'Train labels shape: ', y_train.shape\nprint 'Validation data shape: ', X_val.shape\nprint 'Validation labels shape: ', y_val.shape\nprint 'Test data shape: ', X_test.shape\nprint 'Test labels shape: ', y_test.shape", "Train a network\nTo train our network we will use SGD with momentum. In addition, we will adjust the learning rate with an exponential learning rate schedule as optimization proceeds; after each epoch, we will reduce the learning rate by multiplying it by a decay rate.", "input_size = 32 * 32 * 3\nhidden_size = 100\nnum_classes = 10\nnet = TwoLayerNet(input_size, hidden_size, num_classes)\n\n# Train the network\nstats = net.train(X_train, y_train, X_val, y_val,\n num_iters=10000, batch_size=200,\n learning_rate=1e-4, learning_rate_decay=0.95,\n reg=0.4, verbose=True)\n\n# Predict on the validation set\nval_acc = (net.predict(X_val) == y_val).mean()\nprint 'Validation accuracy: ', val_acc\n\n", "Debug the training\nWith the default parameters we provided above, you should get a validation accuracy of about 0.29 on the validation set. This isn't very good.\nOne strategy for getting insight into what's wrong is to plot the loss function and the accuracies on the training and validation sets during optimization.\nAnother strategy is to visualize the weights that were learned in the first layer of the network. In most neural networks trained on visual data, the first layer weights typically show some visible structure when visualized.", "# Plot the loss function and train / validation accuracies\nplt.subplot(2, 1, 1)\nplt.plot(stats['loss_history'])\nplt.title('Loss history')\nplt.xlabel('Iteration')\nplt.ylabel('Loss')\n\nplt.subplot(2, 1, 2)\nplt.plot(stats['train_acc_history'], label='train')\nplt.plot(stats['val_acc_history'], label='val')\nplt.title('Classification accuracy history')\nplt.xlabel('Epoch')\nplt.ylabel('Clasification accuracy')\nplt.show()\n\nfrom cs231n.vis_utils import visualize_grid\n\n# Visualize the weights of the network\n\ndef show_net_weights(net):\n W1 = net.params['W1']\n W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)\n plt.imshow(visualize_grid(W1, padding=3).astype('uint8'))\n plt.gca().axis('off')\n plt.show()\n\nshow_net_weights(net)", "Tune your hyperparameters\nWhat's wrong?. Looking at the visualizations above, we see that the loss is decreasing more or less linearly, which seems to suggest that the learning rate may be too low. Moreover, there is no gap between the training and validation accuracy, suggesting that the model we used has low capacity, and that we should increase its size. On the other hand, with a very large model we would expect to see more overfitting, which would manifest itself as a very large gap between the training and validation accuracy.\nTuning. Tuning the hyperparameters and developing intuition for how they affect the final performance is a large part of using Neural Networks, so we want you to get a lot of practice. Below, you should experiment with different values of the various hyperparameters, including hidden layer size, learning rate, numer of training epochs, and regularization strength. You might also consider tuning the learning rate decay, but you should be able to get good performance using the default value.\nApproximate results. You should be aim to achieve a classification accuracy of greater than 48% on the validation set. Our best network gets over 52% on the validation set.\nExperiment: You goal in this exercise is to get as good of a result on CIFAR-10 as you can, with a fully-connected Neural Network. For every 1% above 52% on the Test set we will award you with one extra bonus point. Feel free implement your own techniques (e.g. PCA to reduce dimensionality, or adding dropout, or adding features to the solver, etc.).", "best_net = None # store the best model into this \n\n#################################################################################\n# TODO: Tune hyperparameters using the validation set. Store your best trained #\n# model in best_net. #\n# #\n# To help debug your network, it may help to use visualizations similar to the #\n# ones we used above; these visualizations will have significant qualitative #\n# differences from the ones we saw above for the poorly tuned network. #\n# #\n# Tweaking hyperparameters by hand can be fun, but you might find it useful to #\n# write code to sweep through possible combinations of hyperparameters #\n# automatically like we did on the previous exercises. #\n#################################################################################\nbest_net=net\n#################################################################################\n# END OF YOUR CODE #\n#################################################################################\n\n# visualize the weights of the best network\nshow_net_weights(best_net)", "Run on the test set\nWhen you are done experimenting, you should evaluate your final trained network on the test set; you should get above 48%.\nWe will give you extra bonus point for every 1% of accuracy above 52%.", "test_acc = (best_net.predict(X_test) == y_test).mean()\nprint 'Test accuracy: ', test_acc" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
sastels/Onboarding
4 - Sorting.ipynb
mit
[ "Sorting\nThe easiest way to sort is with the sorted(list) function, which takes a list and returns a new list with those elements in sorted order. The original list is not changed.", "a = [5, 1, 4, 3]\nprint sorted(a)\nprint a", "It's most common to pass a list into the sorted() function, but in fact it can take as input any sort of iterable collection. The older list.sort() method is an alternative detailed below. The sorted() function seems easier to use compared to sort(), so I recommend using sorted().\nThe sorted() function can be customized though optional arguments. The sorted() optional argument reverse=True, e.g. sorted(list, reverse=True), makes it sort backwards.", "strs = ['aa', 'BB', 'zz', 'CC']\nprint sorted(strs)\nprint sorted(strs, reverse=True)", "Custom Sorting With key\nFor more complex custom sorting, sorted() takes an optional \"key=\" specifying a \"key\" function that transforms each element before comparison. The key function takes in 1 value and returns 1 value, and the returned \"proxy\" value is used for the comparisons within the sort.\nFor example with a list of strings, specifying key=len (the built in len() function) sorts the strings by length, from shortest to longest. The sort calls len() for each string to get the list of proxy length values, and the sorts with those proxy values.", "strs = ['ccc', 'aaaa', 'd', 'bb']\nprint sorted(strs, key=len)", "As another example, specifying \"str.lower\" as the key function is a way to force the sorting to treat uppercase and lowercase the same:", "strs = ['aa', 'BB', 'zz', 'CC']\nprint sorted(strs, key=str.lower)", "You can also pass in your own MyFn as the key function. Say we have a list of strings we want to sort by the last letter of the string.", "strs = ['xc', 'zb', 'yd' ,'wa']", "A little function that takes a string, and returns its last letter.\nThis will be the key function (takes in 1 value, returns 1 value).", "def MyFn(s):\n return s[-1]", "Now pass key=MyFn to sorted() to sort by the last letter.", "print sorted(strs, key=MyFn)", "To use key= custom sorting, remember that you provide a function that takes one value and returns the proxy value to guide the sorting. There is also an optional argument \"cmp=cmpFn\" to sorted() that specifies a traditional two-argument comparison function that takes two values from the list and returns negative/0/positive to indicate their ordering. The built in comparison function for strings, ints, ... is cmp(a, b), so often you want to call cmp() in your custom comparator. The newer one argument key= sorting is generally preferable.\nsort() method\nAs an alternative to sorted(), the sort() method on a list sorts that list into ascending order, e.g. list.sort(). The sort() method changes the underlying list and returns None, so use it like this:", "alist = [1,5,9,2,5]\nalist.sort()\nalist", "Incorrect (returns None):", "blist = alist.sort()\nblist", "The above is a very common misunderstanding with sort() -- it does not return the sorted list. The sort() method must be called on a list; it does not work on any enumerable collection (but the sorted() function above works on anything). The sort() method predates the sorted() function, so you will likely see it in older code. The sort() method does not need to create a new list, so it can be a little faster in the case that the elements to sort are already in a list.\nTuples\nA tuple is a fixed size grouping of elements, such as an (x, y) co-ordinate. Tuples are like lists, except they are immutable and do not change size (tuples are not strictly immutable since one of the contained elements could be mutable). Tuples play a sort of \"struct\" role in Python -- a convenient way to pass around a little logical, fixed size bundle of values. A function that needs to return multiple values can just return a tuple of the values. For example, if I wanted to have a list of 3-d coordinates, the natural python representation would be a list of tuples, where each tuple is size 3 holding one (x, y, z) group.\nTo create a tuple, just list the values within parenthesis separated by commas. The \"empty\" tuple is just an empty pair of parenthesis. Accessing the elements in a tuple is just like a list -- len(), [ ], for, in, etc. all work the same.", "tuple = (1, 2, 'hi')\nprint len(tuple)\nprint tuple[2]", "Tuples are immutable, i.e. they cannot be changed.", "tuple[2] = 'bye'", "If you want to change a tuple variable, you must reassign it to a new tuple:", "tuple = (1, 2, 'bye')\ntuple", "To create a size-1 tuple, the lone element must be followed by a comma.", "tuple = ('hi',)\ntuple", "It's a funny case in the syntax, but the comma is necessary to distinguish the tuple from the ordinary case of putting an expression in parentheses. In some cases you can omit the parenthesis and Python will see from the commas that you intend a tuple.\nAssigning a tuple to an identically sized tuple of variable names assigns all the corresponding values. If the tuples are not the same size, it throws an error. This feature works for lists too.", "(err_string, err_code) = ('uh oh', 666)\nprint err_code, ':', err_string", "List Comprehensions\nA list comprehension is a compact way to write an expression that expands to a whole list. Suppose we have a list nums [1, 2, 3], here is the list comprehension to compute a list of their squares [1, 4, 9]:", "nums = [1, 2, 3, 4]\nsquares = [ n * n for n in nums ]\nsquares", "The syntax is [ expr for var in list ] -- the for var in list looks like a regular for-loop, but without the colon (:). The expr to its left is evaluated once for each element to give the values for the new list. Here is an example with strings, where each string is changed to upper case with '!!!' appended:", "strs = ['hello', 'and', 'goodbye']\nshouting = [ s.upper() + '!!!' for s in strs ]\nshouting", "You can add an if test to the right of the for-loop to narrow the result. The if test is evaluated for each element, including only the elements where the test is true.", "## Select values <= 2\nnums = [2, 8, 1, 6]\nsmall = [ n for n in nums if n <= 2 ]\nsmall\n\n## Select fruits containing 'a', change to upper case\nfruits = ['apple', 'cherry', 'bannana', 'lemon']\nafruits = [ s.upper() for s in fruits if 'a' in s ]\nafruits", "Exercise\nFor practice with sorting, go to the notebook 3.5 - Sorting exercises\nNote: This notebook is based on Google's python tutorial https://developers.google.com/edu/python" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
Kaggle/learntools
notebooks/machine_learning/raw/tut_titanic.ipynb
apache-2.0
[ "In the final exercise of the Intro to Machine Learning course, you learned how to make a submission to a Kaggle competition. But some of the work was already completed for you, since you were provided a notebook with partially completed code. \nIn this tutorial, you'll explore a full workflow that you can use to get started (from the very beginning!) with creating a submission to any Kaggle competition. We'll use the Titanic competition as an example.\nPart 1: Get started\nIn this section, you'll learn more about the competition and make your first submission. \nJoin the competition!\nThe first thing to do is to join the competition! Open a new window with the competition page, and click on the \"Join Competition\" button, if you haven't already. (If you see a \"Submit Predictions\" button instead of a \"Join Competition\" button, you have already joined the competition, and don't need to do so again.)\n\nThis takes you to the rules acceptance page. You must accept the competition rules in order to participate. These rules govern how many submissions you can make per day, the maximum team size, and other competition-specific details. Then, click on \"I Understand and Accept\" to indicate that you will abide by the competition rules.\nThe challenge\nThe competition is simple: we want you to use the Titanic passenger data (name, age, price of ticket, etc) to try to predict who will survive and who will die.\nThe data\nTo take a look at the competition data, click on the <a href=\"https://www.kaggle.com/c/titanic/data\" target=\"_blank\" rel=\"noopener noreferrer\"><b>Data tab</b></a> at the top of the competition page. Then, scroll down to find the list of files. \n\nThere are three files in the data: (1) train.csv, (2) test.csv, and (3) gender_submission.csv.\n(1) train.csv\ntrain.csv contains the details of a subset of the passengers on board (891 passengers, to be exact -- where each passenger gets a different row in the table). To investigate this data, click on the name of the file under the \"Data Sources\" column (on the left of the screen). Once you've done this, all of the column names (along with a brief description of what they contain) are listed to the right of the screen, under the \"Columns\" heading. \n\nYou can view all of the data in the same window. \n\nThe values in the second column (\"Survived\") can be used to determine whether each passenger survived or not: \n- if it's a \"1\", the passenger survived.\n- if it's a \"0\", the passenger died.\nFor instance, the first passenger listed in train.csv is Mr. Owen Harris Braund. He was 22 years old when he died on the Titanic.\n(2) test.csv\nUsing the patterns you find in train.csv, you have to predict whether the other 418 passengers on board (in test.csv) survived. \nClick on test.csv (under the \"Data Sources\" column) to examine its contents. Note that test.csv does not have a \"Survived\" column - this information is hidden from you, and how well you do at predicting these hidden values will determine how highly you score in the competition! \n(3) gender_submission.csv\nThe gender_submission.csv file is provided as an example that shows how you should structure your predictions. It predicts that all female passengers survived, and all male passengers died. Your hypotheses regarding survival will probably be different, which will lead to a different submission file. But, just like this file, your submission should have:\n- a \"PassengerId\" column containing the IDs of each passenger from test.csv.\n- a \"Survived\" column (that you will create!) with a \"1\" for the rows where you think the passenger survived, and a \"0\" where you predict that the passenger died.\nYour first submission\nAs a benchmark, you'll download the gender_submission.csv file and submit it to the competition. Begin by clicking on the download link to the right of the name of the file. \n\nThis downloads the file to your computer. Then:\n- Click on the blue \"Submit Predictions\" button in the top right corner of the competition page. (This button now appears where the \"Join Competition\" button was.)\n- Scroll down to \"Step 1: Upload submission file\". Upload the file you just downloaded. Then, click on the blue \"Make Submission\" button. \nIn a few seconds, your submission will be scored, and you'll receive a spot on the leaderboard. Next, we'll walk you through how to outperform this initial submission!\nPart 2: Your coding environment\nIn this section, you'll train your own machine learning model to improve your predictions. \nThe Notebook\nThe first thing to do is to create a Kaggle Notebook where you'll store all of your code. You can use Kaggle Notebooks to getting up and running with writing code quickly, and without having to install anything on your computer. (If you are interested in deep learning, we also offer free GPU and TPU access!) \nBegin by clicking on the <a href=\"https://www.kaggle.com/c/titanic/kernels\" target=\"_blank\">Notebooks tab</a> on the competition page. Then, click on \"New Notebook\".\n\nNext, click on \"Create\". (Don't change the default settings: so, \"Python\" should appear under \"Select language\", and you should have \"Notebook\" selected under \"Select type\".)\n\nYour notebook will take a few seconds to load. In the top left corner, you can see the name of your notebook -- something like \"kernel2daed3cd79\".\n\nYou can edit the name by clicking on it. Change it to something more descriptive, like \"Getting Started with Titanic\". \n\nYour first lines of code\nWhen you start a new notebook, it has two gray boxes for storing code. We refer to these gray boxes as \"code cells\".\n\nThe first code cell already has some code in it. To run this code, put your cursor in the code cell. (If your cursor is in the right place, you'll notice a blue vertical line to the left of the gray box.) Then, either hit the play button (which appears to the left of the blue line), or hit [Shift] + [Enter] on your keyboard.\nIf the code runs successfully, three lines of output are returned. Below, you can see the same code that you just ran, along with the output that you should see in your notebook.", "# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n\n# Any results you write to the current directory are saved as output.", "This shows us where the competition data is stored, so that we can load the files into the notebook. We'll do that next.\nLoad the data\nThe second code cell in your notebook now appears below the three lines of output with the file locations.\n\nType the two lines of code below into your second code cell. Then, once you're done, either click on the blue play button, or hit [Shift] + [Enter].", "train_data = pd.read_csv(\"../input/titanic/train.csv\")\ntrain_data.head()", "Your code should return the output above, which corresponds to the first five rows of the table in train.csv. It's very important that you see this output in your notebook before proceeding with the tutorial!\n\nIf your code does not produce this output, double-check that your code is identical to the two lines above. And, make sure your cursor is in the code cell before hitting [Shift] + [Enter].\n\nThe code that you've just written is in the Python programming language. It uses a Python \"module\" called pandas (abbreviated as pd) to load the table from the train.csv file into the notebook. To do this, we needed to plug in the location of the file (which we saw was /kaggle/input/titanic/train.csv). \n\nIf you're not already familiar with Python (and pandas), the code shouldn't make sense to you -- but don't worry! The point of this tutorial is to (quickly!) make your first submission to the competition. At the end of the tutorial, we suggest resources to continue your learning.\n\nAt this point, you should have at least three code cells in your notebook.\n\nCopy the code below into the third code cell of your notebook to load the contents of the test.csv file. Don't forget to click on the play button (or hit [Shift] + [Enter])!", "test_data = pd.read_csv(\"../input/titanic/test.csv\")\ntest_data.head()", "As before, make sure that you see the output above in your notebook before continuing. \nOnce all of the code runs successfully, all of the data (in train.csv and test.csv) is loaded in the notebook. (The code above shows only the first 5 rows of each table, but all of the data is there -- all 891 rows of train.csv and all 418 rows of test.csv!)\nPart 3: Improve your score\nRemember our goal: we want to find patterns in train.csv that help us predict whether the passengers in test.csv survived.\nIt might initially feel overwhelming to look for patterns, when there's so much data to sort through. So, we'll start simple.\nExplore a pattern\nRemember that the sample submission file in gender_submission.csv assumes that all female passengers survived (and all male passengers died). \nIs this a reasonable first guess? We'll check if this pattern holds true in the data (in train.csv).\nCopy the code below into a new code cell. Then, run the cell.", "women = train_data.loc[train_data.Sex == 'female'][\"Survived\"]\nrate_women = sum(women)/len(women)\n\nprint(\"% of women who survived:\", rate_women)", "Before moving on, make sure that your code returns the output above. The code above calculates the percentage of female passengers (in train.csv) who survived.\nThen, run the code below in another code cell:", "men = train_data.loc[train_data.Sex == 'male'][\"Survived\"]\nrate_men = sum(men)/len(men)\n\nprint(\"% of men who survived:\", rate_men)", "The code above calculates the percentage of male passengers (in train.csv) who survived.\nFrom this you can see that almost 75% of the women on board survived, whereas only 19% of the men lived to tell about it. Since gender seems to be such a strong indicator of survival, the submission file in gender_submission.csv is not a bad first guess, and it makes sense that it performed reasonably well!\nBut at the end of the day, this gender-based submission bases its predictions on only a single column. As you can imagine, by considering multiple columns, we can discover more complex patterns that can potentially yield better-informed predictions. Since it is quite difficult to consider several columns at once (or, it would take a long time to consider all possible patterns in many different columns simultaneously), we'll use machine learning to automate this for us.\nYour first machine learning model\nWe'll build a random forest model. This model is constructed of several \"trees\" (there are three trees in the picture below, but we'll construct 100!) that will individually consider each passenger's data and vote on whether the individual survived. Then, the random forest model makes a democratic decision: the outcome with the most votes wins!\n\nThe code cell below looks for patterns in four different columns (\"Pclass\", \"Sex\", \"SibSp\", and \"Parch\") of the data. It constructs the trees in the random forest model based on patterns in the train.csv file, before generating predictions for the passengers in test.csv. The code also saves these new predictions in a CSV file my_submission.csv.\nCopy this code into your notebook, and run it in a new code cell.", "from sklearn.ensemble import RandomForestClassifier\n\ny = train_data[\"Survived\"]\n\nfeatures = [\"Pclass\", \"Sex\", \"SibSp\", \"Parch\"]\nX = pd.get_dummies(train_data[features])\nX_test = pd.get_dummies(test_data[features])\n\nmodel = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)\nmodel.fit(X, y)\npredictions = model.predict(X_test)\n\noutput = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})\noutput.to_csv('my_submission.csv', index=False)\nprint(\"Your submission was successfully saved!\")", "Make sure that your notebook outputs the same message above (Your submission was successfully saved!) before moving on.\n\nAgain, don't worry if this code doesn't make sense to you! For now, we'll focus on how to generate and submit predictions.\n\nOnce you're ready, click on the blue \"Save Version\" button in the top right corner of your notebook. This will generate a pop-up window.\n- Ensure that the \"Save and Run All\" option is selected, and then click on the blue \"Save\" button.\n- This generates a window in the bottom left corner of the notebook. After it has finished running, click on the number to the right of the \"Save Version\" button. This pulls up a list of versions on the right of the screen. Click on the ellipsis (...) to the right of the most recent version, and select Open in Viewer.\n- Click on the Output tab on the right of the screen. Then, click on the \"Submit to Competition\" button to submit your results.\n\nOnce your file is successfully submitted, you should receive a message saying that you've moved up the leaderboard. Great work!\nPart 4: Keep learning!\nCan you use what you learned about random forests in the Intro to Machine Learning course to generate even better predictions? \nCheck out the Intermediate Machine Learning course to learn about more advanced techniques!" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
palandatarxcom/sklearn_tutorial_cn
notebooks/02.1-Machine-Learning-Intro.ipynb
bsd-3-clause
[ "这个分析笔记由Jake Vanderplas编辑汇总。 源代码和license文件在GitHub。 中文翻译由派兰数据在派兰大数据分析平台上完成。 源代码在GitHub上。\nScikit-learn简介: 基于Python的机器学习\n在本节中会介绍Scikit-learn的基本原理,它是一个集成了很多机器学习工具并被广泛使用的包,用Python实现。详情请参考http://scikit-learn.org 。\n概述\n主要目标:介绍机器学习的中心思想以及它们是怎样通过Scikit-learn集成进Python的。\n\n机器学习的定义\nScikit-learn中的数据表示\nScikit-learn的API的介绍\n\n关于Scikit-Learn\nScikit-Learn是一个采用简洁并灵活的 API,向用户提供广为人知的机器学习算法的Python包。它由上百个贡献者所开发,并且已经广泛运用至工业界和学术界中。\nScikit-Learn依赖于Python的 NumPy (Numerical Python) 和 SciPy (Scientific Python)库,它们为Python中高效的数值和科学计算提供了支持。scikit-learn本身并不是为极大的数据集量身定做的,但是也有一些工作是基于此的。\n在这个教程中,我将会主要关注于Scikit-learn中,运用于中小型数据集的问题。\n什么是机器学习?\n在这一节里面我们会去探索机器学习的本质。机器学习是一种构造程序的过程,让机器根据已有的数据,自动适应和调整程序的可变参数(一般来说是一个浮点数的列表)以提高程序的某种行为。\n机器学习可以看成是人工智能的一个分支。这些算法可以让电脑变得更加智能,从某种程度上电脑会自动生成数据,而不是仅仅像一个数据库一样进行数据存储和数据获取。\n我们在这里会举两个特别简单的关于机器学习的小任务。第一个是一个分类的任务:从图片上我们可以看出有一组两维的数据,根据类别分成了两种颜色。这个分类的算法可以在两类数据之间画出分割线:", "%matplotlib inline\n\n# 设置 seaborn 绘图库的默认参数.\n# 可以安全地注释掉\nimport seaborn; seaborn.set()\n\n# 导入样例\nimport matplotlib.pyplot as plt\nfrom fig_code import plot_sgd_separator\n\nplot_sgd_separator()", "这些可能看起来是很小的任务,但是它体现了一个非常重要的概念。通过画出分割线,我们已经学习了一个可以生成新数据的模型。如果您往这张图上添加一个没有被分类的点,这个算法现在可以预测它应是一个红色的点还是一个蓝色的点。\n如果你希望看到生成这个的源代码,你也可以在fig_code文件夹中打开代码,或者你可以用%load命令加载这段代码。\n下一个简单的例子我们看一个回归的算法,为一组数据拟合一条最佳的直线。", "from fig_code import plot_linear_regression\nplot_linear_regression()", "这也是一个从数据中建立模型的例子,所以这个模型可以被用来生成新的数据。这个模型从训练数据中被学习出来,而且可以用来预测测试数据的结果:我们给出一个点的x坐标值,这个模型可以让我们去预测对应的y坐标值。同样的,这看起来是一个简单的例子,但是它是机器学习算法的一个基础的操作。\nScikit-learn中的数据表示\n机器学习是从数据中建立模型的,我们将会从怎样让用电脑理解的方式去表示数据开始。同时,我们会用matplotlib的例子讲解如何将数据用图表的形式显示出来。\n在Scikit-learn中,大多数的机器学习算法的数据在二维的数组或者矩阵中存储。这些数据可能是numpy数组,在某些情况下也可能是scipy.sparse矩阵。数组的大小应该是[样本数,特征数] (【译者注】sample - 样本,feature - 特征)\n\n样本数(n_sample): 样本的数目。每一个样本都是一个需要处理的独立个体(例如:需要被分类),一个样本可能是一个文档、一幅图片、一段音频、一段视频、一个天文学数据、数据库或者CSV文件中的一行,或者任意一个确定的数值的集合。\n特征数(n_feature): 特征的数目,特征是描述一个样本的数值表达。特征一般是实数,不过在某些情况下也会是布尔值或者是离散数据。\n\n特征数必须提前确定。但是对于给定的样本,特征可以是很大(百万级)的一个零占大多数的集合。这种情况下,scipy.sparse矩阵就派上了用场,用这个矩阵比numpy矩阵在存储上会更加高效。\n\n(图片来自 Python Data Science Handbook)\n!\n一个简单的例子:Iris 数据集\n作为简单数据集的例子,我们将会介绍scikit-learn中存储的iris数据集。数据由3种不同品种的鸢尾花组成。下面是数据集中的3个品种,我们可以通过下面的代码显示出它们:", "from IPython.core.display import Image, display\ndisplay(Image(filename='images/iris_setosa.jpg'))\nprint(\"Iris Setosa\\n\")\n\ndisplay(Image(filename='images/iris_versicolor.jpg'))\nprint(\"Iris Versicolor\\n\")\n\ndisplay(Image(filename='images/iris_virginica.jpg'))\nprint(\"Iris Virginica\")", "问题:\n如果我们想设计一个算法去分辨iris的品种,数据可能是什么?\n记住:我们需要一个2D的数组,其大小为[样本数 * 特征数]\n\n\n样本数指的是什么?\n\n\n特征数指的是什么?\n\n\n记住每一个样本的特征数必须是固定的,而且对于每一个样本,特征数i必须是一个数值型的元素。\n用scikit-learn 加载 Iris 数据\nScikit-learn对于Iris数据有一个非常直接表示。数据表示如下:\n\n\nIris 数据集的特征:\n\n萼片长度(cm)\n萼片宽度(cm)\n花瓣长度(cm)\n花瓣宽度(cm)\n\n\n\n预测的目标类别\n\nIris Setosa\nIris Versicolour\nIris Virginica\n\n\n\nscikit-learn嵌入了一个iris CSV文件的拷贝和一个帮助函数去从numpy数组中加载它:", "from sklearn.datasets import load_iris\niris = load_iris()\n\niris.keys()\n\nn_samples, n_features = iris.data.shape\nprint((n_samples, n_features))\nprint(iris.data[0])\n\nprint(iris.data.shape)\nprint(iris.target.shape)\n\nprint(iris.target)\n\nprint(iris.target_names)", "这个数据是四维的,但是我们可以使用简单的scatter-plot一次显示出两维的数据:", "import numpy as np\nimport matplotlib.pyplot as plt\n\nx_index = 0\ny_index = 1\n\n# 这段代码使用iris的名字来标注颜色条(colorbar)\nformatter = plt.FuncFormatter(lambda i, *args: iris.target_names[int(i)])\n\nplt.scatter(iris.data[:, x_index], iris.data[:, y_index],\n c=iris.target, cmap=plt.cm.get_cmap('RdYlBu', 3))\nplt.colorbar(ticks=[0, 1, 2], format=formatter)\nplt.clim(-0.5, 2.5)\nplt.xlabel(iris.feature_names[x_index])\nplt.ylabel(iris.feature_names[y_index]);", "快速练习:\n在上面的脚本中改变 x_index 和 y_index, 找到一种可以最大化分隔出三个类别的它们的组合。\n这个练习是降维算法的一个预告,我们在之后会看到。\n其他数据\n它们分为如下三种:\n\n包内置数据: 这些小的数据集已经被集成在scikit-learn的安装包里面了,可以用sklearn.datasets.load_*去下载它\n供下载数据: 这些较大的数据可以供用户们下载,scikit-learn里面已经包含了下载这些数据集的流通道。这些数据可以在sklearn.datasets.fetch_*中找到。\n生成数据: 通过随机种子,可以通过现有模型随机生成一些数据集。它们可以在sklearn.datasets.make_*中找到\n\n你可以通过IPython的TAB自动补全来发现可能的数据集生成和加载工具。在从sklearn导入datasets之后,\n键入\ndatasets.load_ + TAB\n\n或者\ndatasets.fetch_ + TAB\n\n或者\ndatasets.make_ + TAB\n\n可以看到一列函数的组合。", "from sklearn import datasets\n\n# Type datasets.fetch_<TAB> or datasets.load_<TAB> in IPython to see all possibilities\n\n# datasets.fetch_\n\n# datasets.load_", "在下一节,我们将会使用一些数据集来研究机器学习的基本规则。" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
szitenberg/ReproPhyloVagrant
notebooks/Tutorials/Basic/3.8 Building a supermatrix.ipynb
mit
[ "This section shows how to build a supermatrix by providing minimal requirements for gene content per taxon (OTU). This approach is more suited for small scale analysis, because it relies on manual decisions, whereas large scale suprematrices are better constructed with the parameter space and data explorations tools of ReproPhylo. However, these are not addressed in this section. First, lets load our Project with the trimmed alignments:", "from reprophylo import *\npj = unpickle_pj('outputs/my_project.pkpj', git=False)", "3.8.1 Sorting out the metadata\nThe main decision to make when building a supermatrix is what metadata will be used to indicate that sequences of several genes belong to the same OTU in the tree. Obvious candidates would be the species name (stored as 'source_organism' if we read a GenBank file), or sample ID, voucher specimen and so on. Often, we would be required to modify the metadata in our Project, in a way that will correctly reflect the relationship between sequences that emerged from the same sample. \nIn the case of the Tetillidae.gb example file, sample IDs are stored either under 'source_specimen_voucher' or 'source_isolate'. In addition, identical voucher numbers are sometimes formatted differently for different genes. \nIn the file 'data/Tetillida_otus_corrected.csv', I have unified the columns 'source_specimen_voucher' and 'source_isolate' in a single column called 'source_otu' and also made sure to uniformly format all the voucher specimens:", "from IPython.display import Image\nImage('images/fix_otus.png', width = 400)", "Our Project has to be updated with the recent changes to the spreadsheet:", "pj.correct_metadata_from_file('data/Tetillida_otus_corrected.csv')", "Such fixes can also be done programmatically (see section 3.4)\n3.8.2 Designing the supermatrix\nSupermatrices are configured with objects of the class Concatenation. In a Concatenation object we can indicate the following:\n\nThe name of the concatenation\nThe loci it includes (here we pass locus objects rather than just Locus names)\nThe qualifier or metadata that stores the relationships among the records\nWhat loci all the OTUs must have\nGroups of loci from which each OTU must have at least one\nWhich trimmed alignment to use, if we have more than one for each locus in our Project\n\nHere is an example:", "concat = Concatenation('large_concat', # Any unique string\n \n pj.loci, # This is a list of Locus objects\n \n 'source_otu', # The values of this qualifier \n # flag sequences the belong to the same\n # sample\n \n otu_must_have_all_of=['MT-CO1'], # All the OTUS must have a cox1 sequence\n \n otu_must_have_one_of=[['18s','28s']], # All the OTUs must have either 18s or 28s or both\n \n define_trimmed_alns=[] # We only have one alignment per gene\n # so the list is empty (default value)\n )", "If we print this Concatenation object we get this message:", "print concat", "3.8.3 Building the supermatrix\nBuilding the suprematrix has two steps. First we need to mount the Concatenation object onto the Project where it will be stored in the list pj.concatenations. Second, we need to construct the MultipleSeqAlignment object, which will be stored in the pj.trimmed_alignments dictionary, under the key 'large_concat' in this case:", "pj.add_concatenation(concat)\npj.make_concatenation_alignments()\n\npickle_pj(pj, 'outputs/my_project.pkpj')", "Now that this supermatrix is stored as a trimmed alignment in the pj.trimmed_alignments dictionary, we can write it to a file or fetch the MultipleSeqAlignment object, as shown in section 3.7.\n3.8.4 Quick reference", "# Design a supermatrix\nconcat = Concatenation('concat_name', loci_list, 'otu_qualifier' **kwargs)\n\n# Add it to a project\npj.add_concatenation(concat)\n\n# Build supermatrices based on the Concatenation\n# objects in pj.concatenations\npj.make_concatenation_alignments()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ML4DS/ML4all
R2.kNN_Regression/regression_knn_student.ipynb
mit
[ "The k-nearest neighbors (kNN) regression algorithm\nAuthor: Jerónimo Arenas García ([email protected])\n Jesús Cid Sueiro ([email protected])\n\nNotebook version: 2.2 (Sep 08, 2017)\n\nChanges: v.1.0 - First version\nChanges: v.1.1 - Stock dataset included.\nChanges: v.2.0 - Notebook for UTAD course. Advertising data incorporated\nChanges: v.2.1 - Text and code revisited. General introduction removed.\nChanges: v.2.2 - Compatibility with python 2 and 3.", "# Import some libraries that will be necessary for working with data and displaying plots\n\n# To visualize plots in the notebook\n%matplotlib inline \n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pylab\n\n# Packages used to read datasets\nimport scipy.io # To read matlab files\nimport pandas as pd # To read datasets in csv format\n\n# For the student tests (only for python 2)\nimport sys\nif sys.version_info.major==2:\n from test_helper import Test\n\n# That's default image size for this interactive session\npylab.rcParams['figure.figsize'] = 9, 6 ", "1. The dataset\nWe describe next the regression task that we will use in the session. The dataset is an adaptation of the <a href=http://www.dcc.fc.up.pt/~ltorgo/Regression/DataSets.html> STOCK dataset</a>, taken originally from the <a href=http://lib.stat.cmu.edu/> StatLib Repository</a>. The goal of this problem is to predict the values of the stocks of a given airplane company, given the values of another 9 companies in the same day. \n<small> If you are reading this text from the python notebook with its full functionality, you can explore the results of the regression experiments using two alternative datasets:\n\n\nThe \n<a href=https://archive.ics.uci.edu/ml/datasets/Concrete+Compressive+Strength>CONCRETE dataset</a>, taken from the <a href=https://archive.ics.uci.edu/ml/index.html>Machine Learning Repository at the University of California Irvine</a>. The goal of the CONCRETE dataset tas is to predict the compressive strength of cement mixtures based on eight observed variables related to the composition of the mixture and the age of the material). \n\n\nThe Advertising dataset, taken from the book <a href= http://www-bcf.usc.edu/~gareth/ISL/data.html> An Introduction to Statistical Learning with applications in R</a>, with permission from the authors: G. James, D. Witten, T. Hastie and R. Tibshirani. The goal of this problem is to predict the sales of a given product, knowing the investment in different advertising sectors. More specifically, the input and output variables can be described as follows:\n\n\nInput features:\n\nTV: advertising dollars spent on TV for a single product in a given market (in thousands of dollars)\nRadio: advertising dollars spent on Radio\nNewspaper: advertising dollars spent on Newspaper\n\n\n\nResponse variable:\n\nSales: sales of a single product in a given market (in thousands of widgets)\n\n\n\nTo do so, just replace stock by concrete or advertising in the next cell. Remind that you must run the cells again to see the changes. \n</small>", "# SELECT dataset\n# Available options are 'stock', 'concrete' or 'advertising'\nds_name = 'stock'\n\n# Let us start by loading the data into the workspace, and visualizing the dimensions of all matrices\nif ds_name == 'stock':\n # STOCK DATASET\n data = scipy.io.loadmat('datasets/stock.mat')\n X_tr = data['xTrain']\n S_tr = data['sTrain']\n X_tst = data['xTest']\n S_tst = data['sTest']\n\nelif ds_name == 'concrete':\n # CONCRETE DATASET. \n data = scipy.io.loadmat('datasets/concrete.mat')\n X_tr = data['X_tr']\n S_tr = data['S_tr']\n X_tst = data['X_tst']\n S_tst = data['S_tst']\n\nelif ds_name == 'advertising': \n # ADVERTISING DATASET\n df = pd.read_csv('datasets/Advertising.csv', header=0)\n X_tr = df.values[:150, 1:4]\n S_tr = df.values[:150, [-1]] # The brackets around -1 is to make sure S_tr is a column vector, as in the other datasets\n X_tst = df.values[150:, 1:4]\n S_tst = df.values[150:, [-1]]\n\nelse:\n print('Unknown dataset')\n\n# Print the data dimension and the dataset sizes\nprint(\"SELECTED DATASET: \" + ds_name)\nprint(\"---- The size of the training set is {0}, that is: {1} samples with dimension {2}.\".format(\n X_tr.shape, X_tr.shape[0], X_tr.shape[1]))\nprint(\"---- The target variable of the training set contains {0} samples with dimension {1}\".format(\n S_tr.shape[0], S_tr.shape[1]))\nprint(\"---- The size of the test set is {0}, that is: {1} samples with dimension {2}.\".format(\n X_tst.shape, X_tst.shape[0], X_tst.shape[1]))\nprint(\"---- The target variable of the test set contains {0} samples with dimension {1}\".format(\n S_tst.shape[0], S_tst.shape[1]))", "1.1. Scatter plots\nWe can get a first rough idea about the regression task representing the scatter plot of each of the one-dimensional variables against the target data.", "pylab.subplots_adjust(hspace=0.2)\nfor idx in range(X_tr.shape[1]):\n ax1 = plt.subplot(3,3,idx+1)\n ax1.plot(X_tr[:,idx],S_tr,'.')\n ax1.get_xaxis().set_ticks([])\n ax1.get_yaxis().set_ticks([])\nplt.show()", "2. Baseline estimation. Using the average of the training set labels\nA first very simple method to build the regression model is to use the average of all the target values in the training set as the output of the model, discarding the value of the observation input vector.\nThis approach can be considered as a baseline, given that any other method making an effective use of the observation variables, statistically related to $s$, should improve the performance of this method.\nThe prediction is thus given by", "# Mean of all target values in the training set\ns_hat = np.mean(S_tr)\nprint(s_hat)", "for any input ${\\bf x}$.\nExercise 1\nCompute the mean square error over training and test sets, for the baseline estimation method.", "# We start by defining a function that calculates the average square error\ndef square_error(s, s_est):\n # Squeeze is used to make sure that s and s_est have the appropriate dimensions.\n y = np.mean(np.power((s - s_est), 2))\n # y = np.mean(np.power((np.squeeze(s) - np.squeeze(s_est)), 2))\n return y\n\n# Mean square error of the baseline prediction over the training data\n# MSE_tr = <FILL IN>\n\n# Mean square error of the baseline prediction over the test data\n# MSE_tst = <FILL IN>\n\nprint('Average square error in the training set (baseline method): {0}'.format(MSE_tr))\nprint('Average square error in the test set (baseline method): {0}'.format(MSE_tst)) ", "Note that in the previous piece of code, function 'square_error' can be used when the second argument is a number instead of a vector with the same length as the first argument. The value will be subtracted from each of the components of the vector provided as the first argument.", "if sys.version_info.major == 2:\n Test.assertTrue(np.isclose(MSE_tr, square_error(S_tr, s_hat)),'Incorrect value for MSE_tr')\n Test.assertTrue(np.isclose(MSE_tst, square_error(S_tst, s_hat)),'Incorrect value for MSE_tst')", "3. Unidimensional regression with the $k$-nn method\nThe principles of the $k$-nn method are the following:\n\nFor each point where a prediction is to be made, find the $k$ closest neighbors to that point (in the training set)\nObtain the estimation averaging the labels corresponding to the selected neighbors\n\nThe number of neighbors is a hyperparameter that plays an important role in the performance of the method. You can test its influence by changing $k$ in the following piece of code. In particular, you can sart with $k=1$ and observe the efect of increasing the value of $k$.", "# We implement unidimensional regression using the k-nn method\n# In other words, the estimations are to be made using only one variable at a time\n\nfrom scipy import spatial\n\nvar = 0 # pick a variable (e.g., any value from 0 to 8 for the STOCK dataset)\nk = 1 # Number of neighbors\nn_points = 1000 # Number of points in the 'x' axis (for representational purposes)\n\n# For representational purposes, we will compute the output of the regression model\n# in a series of equally spaced-points along the x-axis\ngrid_min = np.min([np.min(X_tr[:,var]), np.min(X_tst[:,var])])\ngrid_max = np.max([np.max(X_tr[:,var]), np.max(X_tst[:,var])])\nX_grid = np.linspace(grid_min,grid_max,num=n_points)\n\ndef knn_regression(X1, S1, X2, k):\n \"\"\" Compute the k-NN regression estimate for the observations contained in\n the rows of X2, for the training set given by the rows in X1 and the\n components of S1. k is the number of neighbours of the k-NN algorithm\n \"\"\"\n if X1.ndim == 1:\n X1 = np.asmatrix(X1).T\n if X2.ndim == 1:\n X2 = np.asmatrix(X2).T\n distances = spatial.distance.cdist(X1,X2,'euclidean')\n neighbors = np.argsort(distances, axis=0, kind='quicksort', order=None)\n closest = neighbors[range(k),:]\n \n est_values = np.zeros([X2.shape[0],1])\n for idx in range(X2.shape[0]):\n est_values[idx] = np.mean(S1[closest[:,idx]])\n \n return est_values\n\nest_tst = knn_regression(X_tr[:,var], S_tr, X_tst[:,var], k)\nest_grid = knn_regression(X_tr[:,var], S_tr, X_grid, k)\n\nplt.plot(X_tr[:,var], S_tr,'b.',label='Training points')\nplt.plot(X_tst[:,var], S_tst,'rx',label='Test points')\nplt.plot(X_grid, est_grid,'g-',label='Regression model')\nplt.axis('tight')\nplt.legend(loc='best')\nplt.show()", "3.1. Evolution of the error with the number of neighbors ($k$)\nWe see that a small $k$ results in a regression curve that exhibits many and large oscillations. The curve is capturing any noise that may be present in the training data, and <i>overfits</i> the training set. On the other hand, picking a too large $k$ (e.g., 200) the regression curve becomes too smooth, averaging out the values of the labels in the training set over large intervals of the observation variable.\nThe next code illustrates this effect by plotting the average training and test square errors as a function of $k$.", "var = 0\nk_max = 60\n\nk_max = np.minimum(k_max, X_tr.shape[0]) # k_max cannot be larger than the number of samples\n\n#Be careful with the use of range, e.g., range(3) = [0,1,2] and range(1,3) = [1,2]\nMSEk_tr = [square_error(S_tr, knn_regression(X_tr[:,var], S_tr, X_tr[:,var],k)) \n for k in range(1, k_max+1)]\nMSEk_tst = [square_error(S_tst,knn_regression(X_tr[:,var], S_tr, X_tst[:,var],k)) \n for k in range(1, k_max+1)]\n\nkgrid = np.arange(1, k_max+1)\nplt.plot(kgrid, MSEk_tr,'bo', label='Training square error')\nplt.plot(kgrid, MSEk_tst,'ro', label='Test square error')\nplt.xlabel('$k$')\nplt.ylabel('Square Error')\nplt.axis('tight')\n\nplt.legend(loc='best')\nplt.show()", "As we can see, the error initially decreases achiving a minimum (in the test set) for some finite value of $k$ ($k\\approx 10$ for the STOCK dataset). Increasing the value of $k$ beyond that value results in poorer performance.\nExercise 2\nAnalize the training MSE for $k=1$. Why is it smaller than for any other $k$? Under which conditions will it be exactly zero?\nExercise 3\nModify the code above to visualize the square error from $k=1$ up to $k$ equal to the number of training instances. Can you relate the square error of the $k$-NN method with that of the baseline method for certain value of $k$? \n3.1. Influence of the input variable\nHaving a look at the scatter plots, we can observe that some observation variables seem to have a more clear relationship with the target value. Thus, we can expect that not all variables are equally useful for the regression task. In the following plot, we carry out a study of the performance that can be achieved with each variable. \nNote that, in practice, the test labels are not available for the selection of hyperparameter\n$k$, so we should be careful about the conclusions of this experiment. A more realistic approach will be studied later when we introduce the concept of model validation.", "k_max = 20\n\nvar_performance = []\nk_values = []\n\nfor var in range(X_tr.shape[1]):\n \n MSE_tr = [square_error(S_tr, knn_regression(X_tr[:,var], S_tr, X_tr[:, var], k)) \n for k in range(1, k_max+1)]\n MSE_tst = [square_error(S_tst, knn_regression(X_tr[:,var], S_tr, X_tst[:, var], k)) \n for k in range(1, k_max+1)]\n MSE_tr = np.asarray(MSE_tr)\n MSE_tst = np.asarray(MSE_tst)\n\n # We select the variable associated to the value of k for which the training error is minimum\n pos = np.argmin(MSE_tr)\n k_values.append(pos + 1)\n var_performance.append(MSE_tst[pos])\n\nplt.stem(range(X_tr.shape[1]), var_performance, use_line_collection=True)\nplt.title('Results of unidimensional regression ($k$NN)')\nplt.xlabel('Variable')\nplt.ylabel('Test MSE')\n\nplt.figure(2)\nplt.stem(range(X_tr.shape[1]), k_values, use_line_collection=True)\nplt.xlabel('Variable')\nplt.ylabel('$k$')\nplt.title('Selection of the hyperparameter')\nplt.show()", "4. Multidimensional regression with the $k$-nn method\nIn the previous subsection, we have studied the performance of the $k$-nn method when using only one variable. Doing so was convenient, because it allowed us to plot the regression curves in a 2-D plot, and to get some insight about the consequences of modifying the number of neighbors.\nFor completeness, we evaluate now the performance of the $k$-nn method in this dataset when using all variables together. In fact, when designing a regression model, we should proceed in this manner, using all available information to make as accurate an estimation as possible. In this way, we can also account for correlations that might be present among the different observation variables, and that may carry very relevant information for the regression task.\nFor instance, in the STOCK dataset, it may be that the combination of the stock values of two airplane companies is more informative about the price of the target company, while the value for a single company is not enough.\n<small> Also, in the CONCRETE dataset, it may be that for the particular problem at hand the combination of a large proportion of water and a small proportion of coarse grain is a clear indication of certain compressive strength of the material, while the proportion of water or coarse grain alone are not enough to get to that result.</small>", "k_max = 20\n\nMSE_tr = [square_error(S_tr, knn_regression(X_tr, S_tr, X_tr, k)) for k in range(1, k_max+1)]\nMSE_tst = [square_error(S_tst, knn_regression(X_tr, S_tr, X_tst, k)) for k in range(1, k_max+1)]\n\nplt.plot(np.arange(k_max)+1, MSE_tr,'bo',label='Training square error')\nplt.plot(np.arange(k_max)+1, MSE_tst,'ro',label='Test square error')\nplt.xlabel('k')\nplt.ylabel('Square error')\n\nplt.legend(loc='best')\nplt.show()", "In this case, we can check that the average test square error is much lower than the error that was achieved when using only one variable, and also far better than the baseline method. It is also interesting to note that in this particular case the best performance is achieved for a small value of $k$, with the error increasing for larger values of the hyperparameter.\nNevertheless, as we discussed previously, these results should be taken carefully. How would we select the value of $k$, if test labels are (obvioulsy) not available for model validation?\n5. Hyperparameter selection via cross-validation\n5.1. Generalization\nAn inconvenient of the application of the $k$-nn method is that the selection of $k$ influences the final error of the algorithm. In the previous experiments, we kept the value of $k$ that minimized the square error on the training set. However, we also noticed that the location of the minimum is not necessarily the same from the perspective of the test data. Ideally, we would like that the designed regression model works as well as possible on future unlabeled patterns that are not available during the training phase. This property is known as <b>generalization</b>. \nFitting the training data is only pursued in the hope that we are also indirectly obtaining a model that generalizes well. In order to achieve this goal, there are some strategies that try to guarantee a correct generalization of the model. One of such approaches is known as <b>cross-validation</b> \n5.2. Cross-validation\nSince using the test labels during the training phase is not allowed (they should be kept aside to simultate the future application of the regression model on unseen patterns), we need to figure out some way to improve our estimation of the hyperparameter that requires only training data. Cross-validation allows us to do so by following the following steps:\n\nSplit the training data into several (generally non-overlapping) subsets. If we use $M$ subsets, the method is referred to as $M$-fold cross-validation. If we consider each pattern a different subset, the method is usually referred to as leave-one-out (LOO) cross-validation.\nCarry out the training of the system $M$ times. For each run, use a different partition as a <i>validation</i> set, and use the restating partitions as the training set. Evaluate the performance for different choices of the hyperparameter (i.e., for different values of $k$ for the $k$-NN method).\nAverage the validation error over all partitions, and pick the hyperparameter that provided the minimum validation error.\nRerun the algorithm using all the training data, keeping the value of the parameter that came out of the cross-validation process.\n\n<img src=\"https://chrisjmccormick.files.wordpress.com/2013/07/10_fold_cv.png\">", "### This fragment of code runs k-nn with M-fold cross validation\n\n# Parameters:\nM = 5 # Number of folds for M-cv\nk_max = 40 # Maximum value of the k-nn hyperparameter to explore\n\n# First we compute the train error curve, that will be useful for comparative visualization.\nMSE_tr = [square_error(S_tr, knn_regression(X_tr, S_tr, X_tr, k)) for k in range(1, k_max+1)]\n\n## M-CV\n# Obtain the indices for the different folds\nn_tr = X_tr.shape[0]\npermutation = np.random.permutation(n_tr)\n\n# Split the indices in M subsets with (almost) the same size. \nset_indices = {i: [] for i in range(M)}\ni = 0\nfor pos in range(n_tr):\n set_indices[i].append(permutation[pos])\n i = (i+1) % M\n \n# Obtain the validation errors\nMSE_val = np.zeros((1,k_max))\nfor i in range(M):\n val_indices = set_indices[i]\n \n # Take out the val_indices from the set of indices.\n tr_indices = list(set(permutation) - set(val_indices))\n \n MSE_val_iter = [square_error(S_tr[val_indices], \n knn_regression(X_tr[tr_indices, :], S_tr[tr_indices], \n X_tr[val_indices, :], k)) \n for k in range(1, k_max+1)]\n\n MSE_val = MSE_val + np.asarray(MSE_val_iter).T\n \nMSE_val = MSE_val/M\n\n# Select the best k based on the validation error\nk_best = np.argmin(MSE_val) + 1\n\n# Compute the final test MSE for the selecte k\nMSE_tst = square_error(S_tst, knn_regression(X_tr, S_tr, X_tst, k_best))\n\nplt.plot(np.arange(k_max)+1, MSE_tr, 'bo', label='Training square error')\nplt.plot(np.arange(k_max)+1, MSE_val.T, 'go', label='Validation square error')\nplt.plot([k_best, k_best], [0, MSE_tst],'r-')\nplt.plot(k_best, MSE_tst,'ro',label='Test error')\nplt.legend(loc='best')\nplt.show()", "Exercise 4\nModify the previous code to use only one of the variables in the input dataset\n - Following a cross-validation approach, select the best value of $k$ for the $k$-nn based in variable 0 only.\n - Compute the test error for the selected valua of $k$.\n6. Scikit-learn implementation\nIn practice, most well-known machine learning methods are implemented and available for python. Probably, the most complete module for machine learning tools is <a href=http://scikit-learn.org/stable/>Scikit-learn</a>. The following piece of code uses the method\nKNeighborsRegressor\n\navailable in Scikit-learn. The example has been taken from <a href=http://scikit-learn.org/stable/auto_examples/neighbors/plot_regression.html>here</a>. As you can check, this routine allows us to build the estimation for a particular point using a weighted average of the targets of the neighbors:\nTo obtain the estimation at a point ${\\bf x}$:\n\nFind $k$ closest points to ${\\bf x}$ in the training set\nAverage the corresponding targets, weighting each value according to the distance of each point to ${\\bf x}$, so that closer points have a larger influence in the estimation.", "# Author: Alexandre Gramfort <[email protected]>\n# Fabian Pedregosa <[email protected]>\n#\n# License: BSD 3 clause (C) INRIA\n\n###############################################################################\n# Generate sample data\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import neighbors\n\nnp.random.seed(0)\nX = np.sort(5 * np.random.rand(40, 1), axis=0)\nT = np.linspace(0, 5, 500)[:, np.newaxis]\ny = np.sin(X).ravel()\n\n# Add noise to targets\ny[::5] += 1 * (0.5 - np.random.rand(8))\n\n###############################################################################\n# Fit regression model\nn_neighbors = 5\n\nfor i, weights in enumerate(['uniform', 'distance']):\n knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)\n y_ = knn.fit(X, y).predict(T)\n\n plt.subplot(2, 1, i + 1)\n plt.scatter(X, y, c='k', label='data')\n plt.plot(T, y_, c='g', label='prediction')\n plt.axis('tight')\n plt.legend()\n plt.title(\"KNeighborsRegressor (k = %i, weights = '%s')\" % (n_neighbors,\n weights))\n\nplt.show()", "Exercise 5\nUse scikit-learn implementation of the $k$-nn method to compute the generalization error on the CONCRETE dataset. Compare the perfomance when using uniform and distance-based weights in the computation the estimates. Visualize the regression curves and error for different values of $k$." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
metpy/MetPy
v1.1/_downloads/5f6dfc4b913dc349eba9f04f6161b5f1/GINI_Water_Vapor.ipynb
bsd-3-clause
[ "%matplotlib inline", "GINI Water Vapor Imagery\nUse MetPy's support for GINI files to read in a water vapor satellite image and plot the\ndata using CartoPy.", "import cartopy.feature as cfeature\nimport matplotlib.pyplot as plt\nimport xarray as xr\n\nfrom metpy.cbook import get_test_data\nfrom metpy.io import GiniFile\nfrom metpy.plots import add_metpy_logo, add_timestamp, colortables\n\n# Open the GINI file from the test data\nf = GiniFile(get_test_data('WEST-CONUS_4km_WV_20151208_2200.gini'))\nprint(f)", "Get a Dataset view of the data (essentially a NetCDF-like interface to the\nunderlying data). Pull out the data and (x, y) coordinates. We use metpy.parse_cf to\nhandle parsing some netCDF Climate and Forecasting (CF) metadata to simplify working with\nprojections.", "ds = xr.open_dataset(f)\nx = ds.variables['x'][:]\ny = ds.variables['y'][:]\ndat = ds.metpy.parse_cf('WV')", "Plot the image. We use MetPy's xarray/cartopy integration to automatically handle parsing\nthe projection information.", "fig = plt.figure(figsize=(10, 12))\nadd_metpy_logo(fig, 125, 145)\nax = fig.add_subplot(1, 1, 1, projection=dat.metpy.cartopy_crs)\nwv_norm, wv_cmap = colortables.get_with_range('WVCIMSS', 100, 260)\nwv_cmap.set_under('k')\nim = ax.imshow(dat[:], cmap=wv_cmap, norm=wv_norm,\n extent=(x.min(), x.max(), y.min(), y.max()), origin='upper')\nax.add_feature(cfeature.COASTLINE.with_scale('50m'))\nadd_timestamp(ax, f.prod_desc.datetime, y=0.02, high_contrast=True)\n\nplt.show()" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
pmorissette/bt
examples/PTE.ipynb
mit
[ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport ffn\n\n#using this import until pip is updated to have the version of bt with the targetVol algo\n# you will need to change this be wherever your local version of bt is located.\nimport sys\nsys.path.insert(0, \"C:\\\\Users\\JPL09A\\\\Documents\\\\Code\\\\pmorissette\\\\bt\\\\\")\n\nimport bt\n\n%matplotlib inline", "Create Fake Index Data", "names = ['foo','bar','rf']\ndates = pd.date_range(start='2015-01-01',end='2018-12-31', freq=pd.tseries.offsets.BDay())\nn = len(dates)\nrdf = pd.DataFrame(\n np.zeros((n, len(names))),\n index = dates,\n columns = names\n)\n\nnp.random.seed(1)\nrdf['foo'] = np.random.normal(loc = 0.1/252,scale=0.2/np.sqrt(252),size=n)\nrdf['bar'] = np.random.normal(loc = 0.04/252,scale=0.05/np.sqrt(252),size=n)\nrdf['rf'] = 0.\n\npdf = 100*np.cumprod(1+rdf)\npdf.plot()", "Build and run Target Strategy\nI will first run a strategy that rebalances everyday.\nThen I will use those weights as target to rebalance to whenever the PTE is too high.", "selectTheseAlgo = bt.algos.SelectThese(['foo','bar'])\n\n# algo to set the weights to 1/vol contributions from each asset\n# with data over the last 3 months excluding yesterday\nweighInvVolAlgo = bt.algos.WeighInvVol(\n lookback=pd.DateOffset(months=3),\n lag=pd.DateOffset(days=1)\n)\n\n# algo to rebalance the current weights to weights set in target.temp\nrebalAlgo = bt.algos.Rebalance()\n\n# a strategy that rebalances daily to 1/vol weights\nstrat = bt.Strategy(\n 'Target',\n [\n selectTheseAlgo,\n weighInvVolAlgo,\n rebalAlgo\n ]\n)\n\n# set integer_positions=False when positions are not required to be integers(round numbers)\nbacktest = bt.Backtest(\n strat,\n pdf,\n integer_positions=False\n)\n\nres_target = bt.run(backtest)\n\nres_target.get_security_weights().plot()", "Now use the PTE rebalance algo to trigger a rebalance whenever predicted tracking error is greater than 1%.", "# algo to fire whenever predicted tracking error is greater than 1%\nwdf = res_target.get_security_weights()\n\nPTE_rebalance_Algo = bt.algos.PTE_Rebalance(\n 0.01,\n wdf,\n lookback=pd.DateOffset(months=3),\n lag=pd.DateOffset(days=1),\n covar_method='standard',\n annualization_factor=252\n)\n\nselectTheseAlgo = bt.algos.SelectThese(['foo','bar'])\n\n# algo to set the weights to 1/vol contributions from each asset\n# with data over the last 12 months excluding yesterday\nweighTargetAlgo = bt.algos.WeighTarget(\n wdf\n)\n\nrebalAlgo = bt.algos.Rebalance()\n\n# a strategy that rebalances monthly to specified weights\nstrat = bt.Strategy(\n 'PTE',\n [\n PTE_rebalance_Algo,\n selectTheseAlgo,\n weighTargetAlgo,\n rebalAlgo\n ]\n)\n\n# set integer_positions=False when positions are not required to be integers(round numbers)\nbacktest = bt.Backtest(\n strat,\n pdf,\n integer_positions=False\n)\n\nres_PTE = bt.run(backtest)\n\nfig, ax = plt.subplots(nrows=1,ncols=1)\nres_target.get_security_weights().plot(ax=ax)\n\nrealized_weights_df = res_PTE.get_security_weights()\nrealized_weights_df['PTE foo'] = realized_weights_df['foo']\nrealized_weights_df['PTE bar'] = realized_weights_df['bar']\nrealized_weights_df = realized_weights_df.loc[:,['PTE foo', 'PTE bar']]\nrealized_weights_df.plot(ax=ax)\n\nax.set_title('Target Weights vs PTE Weights')\nax.plot()\n\ntrans_df = pd.DataFrame(\n index=res_target.prices.index,\n columns=['Target','PTE']\n)\n\ntransactions = res_target.get_transactions()\ntransactions = (transactions['quantity'] * transactions['price']).reset_index()\n\nbar_mask = transactions.loc[:,'Security'] == 'bar'\nfoo_mask = transactions.loc[:,'Security'] == 'foo'\n\ntrans_df.loc[trans_df.index[4:],'Target'] = np.abs(transactions[bar_mask].iloc[:,2].values) + np.abs(transactions[foo_mask].iloc[:,2].values)\n\n\ntransactions = res_PTE.get_transactions()\ntransactions = (transactions['quantity'] * transactions['price']).reset_index()\n\nbar_mask = transactions.loc[:,'Security'] == 'bar'\nfoo_mask = transactions.loc[:,'Security'] == 'foo'\n\ntrans_df.loc[transactions[bar_mask].iloc[:,0],'PTE'] = np.abs(transactions[bar_mask].iloc[:,2].values)\ntrans_df.loc[transactions[foo_mask].iloc[:,0],'PTE'] += np.abs(transactions[foo_mask].iloc[:,2].values)\n\n\ntrans_df = trans_df.fillna(0)\n\nfig, ax = plt.subplots(nrows=1,ncols=1)\ntrans_df.cumsum().plot(ax=ax)\nax.set_title('Cumulative sum of notional traded')\nax.plot()", "If we plot the total risk contribution of each asset class and divide by the total volatility, then we can see that both strategy's contribute roughly similar amounts of volatility from both of the securities.", "weights_target = res_target.get_security_weights()\nrolling_cov_target = pdf.loc[:,weights_target.columns].pct_change().rolling(window=3*20).cov()*252\n\nweights_PTE = res_PTE.get_security_weights().loc[:,weights_target.columns]\nrolling_cov_PTE = pdf.loc[:,weights_target.columns].pct_change().rolling(window=3*20).cov()*252\n\n\ntrc_target = pd.DataFrame(\n np.nan,\n index = weights_target.index,\n columns = weights_target.columns\n)\n\ntrc_PTE = pd.DataFrame(\n np.nan,\n index = weights_PTE.index,\n columns = [x + \" PTE\" for x in weights_PTE.columns]\n)\n\nfor dt in pdf.index:\n trc_target.loc[dt,:] = weights_target.loc[dt,:].values*(rolling_cov_target.loc[dt,:].values@weights_target.loc[dt,:].values)/np.sqrt(weights_target.loc[dt,:].values@rolling_cov_target.loc[dt,:].values@weights_target.loc[dt,:].values)\n trc_PTE.loc[dt,:] = weights_PTE.loc[dt,:].values*(rolling_cov_PTE.loc[dt,:].values@weights_PTE.loc[dt,:].values)/np.sqrt(weights_PTE.loc[dt,:].values@rolling_cov_PTE.loc[dt,:].values@weights_PTE.loc[dt,:].values)\n\n\nfig, ax = plt.subplots(nrows=1,ncols=1)\ntrc_target.plot(ax=ax)\ntrc_PTE.plot(ax=ax)\nax.set_title('Total Risk Contribution')\nax.plot()", "Looking at the Target strategy's and PTE strategy's Total Risk they are very similar.", "fig, ax = plt.subplots(nrows=1,ncols=1)\ntrc_target.sum(axis=1).plot(ax=ax,label='Target')\ntrc_PTE.sum(axis=1).plot(ax=ax,label='PTE')\nax.legend()\nax.set_title('Total Risk')\nax.plot()\n\ntransactions = res_PTE.get_transactions()\ntransactions = (transactions['quantity'] * transactions['price']).reset_index()\n\nbar_mask = transactions.loc[:,'Security'] == 'bar'\ndates_of_PTE_transactions = transactions[bar_mask].iloc[:,0]\ndates_of_PTE_transactions\n\nfig, ax = plt.subplots(nrows=1,ncols=1)\nnp.sum(np.abs(trc_target.values - trc_PTE.values))\n #.abs().sum(axis=1).plot()\n\nax.set_title('Total Risk')\nax.plot(\n trc_target.index,\n np.sum(np.abs(trc_target.values - trc_PTE.values),axis=1),\n label='PTE'\n)\n\nfor i,dt in enumerate(dates_of_PTE_transactions):\n if i == 0:\n ax.axvline(x=dt,color='red',label='PTE Transaction')\n else:\n ax.axvline(x=dt,color='red')\n\nax.legend()\n", "We can see the Predicted Tracking Error of the PTE Strategy with each transaction marked." ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
wdbm/Psychedelic_Machine_Learning_in_the_Cenozoic_Era
Keras_CNN_newsgroups_text_classification.ipynb
gpl-3.0
[ "20 newsgroups classification\nHere we use the 20 newsgroups text dataset by Ken Lang, which is a dataset of 20,000 messages from 20 different newsgroups. One thousand messages from each newsgroup were sampled randomly and classified by newsgroup.\nThe standard GloVe (Global Vectors for Word Representation) word vector model of the Stanford NLP Group is used for this task.\n\nreference: GloVe: Global Vectors for Word Representation, Empirical Methods in Natural Language Processing (EMNLP), J. Pennington, R. Socher and C. D. Manning (2014)\n\nBash\nwget http://nlp.stanford.edu/data/glove.6B.zip\nunzip glove.6B.zip\nWe use this data to train 1D convolutional neural networks in Keras to classify the messages into one of two newsgroup classes.\nFor the case of one of the models, a dropout probability of 0.1 was applied to convolutional layers in towers and the more standard approach of a dropout probability of 0.5 was applied to the more output dense layer [ref].\nreferences/inspirations\n\nreference\nreference\nreference\nreference\n\nimports", "%autosave 120\nimport numpy as np\nnp.random.seed(1337)\nfrom IPython.display import SVG\nfrom keras.models import Model\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import (\n Concatenate,\n Conv1D,\n Dense,\n Dropout,\n Embedding,\n Flatten,\n Input,\n MaxPooling1D\n)\nfrom keras.utils.np_utils import to_categorical\nfrom keras.utils.vis_utils import model_to_dot\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\nfrom sklearn.datasets import fetch_20newsgroups\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef summary_and_diagram(model):\n model.summary()\n return SVG(model_to_dot(model).create(prog='dot', format='svg'))\n #SVG(model_to_dot(model, show_shapes=True, show_layer_names=True).create(prog='dot', format='svg'))\n\ndef model_training_plot(history):\n plt.plot(history.history['acc'], marker='.', label='train')\n plt.plot(history.history['val_acc'], marker='.', label='validation')\n plt.title('accuracy')\n plt.grid(True)\n plt.xlabel('epoch')\n plt.ylabel('accuracy')\n plt.legend(loc='best')\n plt.show();\n\n%matplotlib inline\nplt.rcParams[\"figure.figsize\"] = [10, 10]", "data", "categories = ['alt.atheism', 'soc.religion.christian'] \n\nnewsgroups_train = fetch_20newsgroups(subset='train',\n shuffle=True, \n categories=categories)\n\nprint(f'number of training samples: {len(newsgroups_train.data)}')\n\nexample_sample_data = \"\\n\".join(newsgroups_train.data[0].split(\"\\n\")[10:15])\nexample_sample_category = categories[newsgroups_train.target[0]]\nprint(f'\\nexample training sample of category {example_sample_category}:'\n f'\\n\\n{example_sample_data}')", "data preparation", "labels = newsgroups_train.target\ntexts = newsgroups_train.data\nmax_sequence_length = 1000\nmax_words = 20000\n\ntokenizer = Tokenizer(num_words=max_words)\ntokenizer.fit_on_texts(texts)\nsequences = tokenizer.texts_to_sequences(texts)\n\nword_index = tokenizer.word_index\n#print(sequences[0][:10])\nprint(f'{len(word_index)} unique tokens found')\n\nlabels = to_categorical(np.array(labels))\ndata = pad_sequences(sequences, maxlen=max_sequence_length)\n\nprint(f'data tensor shape: {data.shape}\\n'\n f'targets tensor shape: {labels.shape}')\n\nindices = np.arange(data.shape[0]); np.random.shuffle(indices) \ndata = data[indices] \nlabels = labels[indices]\ncross_validation_split = 0.3\nnb_validation_samples = int(cross_validation_split * data.shape[0])\n\nx_train = data[:-nb_validation_samples] \ny_train = labels[:-nb_validation_samples] \nx_val = data[-nb_validation_samples:] \ny_val = labels[-nb_validation_samples:] \n\nprint(f'training samples shape: {x_train.shape}\\n'\n f'validation samples shape: {y_train.shape}\\n\\n'\n f'training samples positive/negative reviews: {y_train.sum(axis=0)}\\n'\n f'validation samples positive/negative reviews: {y_val.sum(axis=0)}')\n\nembeddings_index = {}\nwith open('glove.6B.100d.txt') as f:\n for line in f:\n values = line.split(' ')\n word = values[0]\n embeddings_index[word] = np.asarray(values[1:], dtype='float32')\nprint(f'word vectors: {len(embeddings_index)}')\n\nword_vector_dimensionality = 100\n\nembedding_matrix = np.random.random(\n (len(word_index) + 1, word_vector_dimensionality))\n\nfor word, i in word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # Words not in the embedding index are all zero elements.\n embedding_matrix[i] = embedding_vector\n\nprint(f'embedding matrix shape: {embedding_matrix.shape}')", "model: convolutional neural network", "embedding_layer = Embedding(len(word_index) + 1,\n word_vector_dimensionality,\n weights=[embedding_matrix],\n input_length=max_sequence_length,\n trainable=False)\n\ninputs = Input(shape=(max_sequence_length,), dtype='int32') # inputs\nx = embedding_layer(inputs) # embedded sequences\nx = Conv1D(128, 5, activation='relu')(x)\nx = MaxPooling1D(5)(x) \nx = Conv1D(128, 5, activation='relu')(x) \nx = MaxPooling1D(5)(x) \nx = Conv1D(128, 5, activation='relu')(x) \nx = MaxPooling1D(35)(x) # global max pooling\nx = Flatten()(x)\nx = Dense(300, activation='relu')(x)\nx = Dropout(rate=0.5)(x)\npreds = Dense(2, activation='softmax', name='preds')(x)\nmodel = Model(input=inputs, output=preds)\nmodel.compile(loss='categorical_crossentropy',\n optimizer='nadam',\n metrics=['acc'])\nsummary_and_diagram(model)\n\n%%time\nhistory = model.fit(x_train, y_train, validation_data=(x_val, y_val),\n epochs=60, batch_size=32, verbose=False)\n\nmodel_training_plot(history)\nprint(f'max. validation accuracy observed: {max(model.history.history[\"val_acc\"])}')\nprint(f'max. validation accuracy history index: {model.history.history[\"val_acc\"].index(max(model.history.history[\"val_acc\"]))}')", "model: convolutional neural network with multiple towers of varying kernel sizes", "embedding_layer = Embedding(len(word_index) + 1,\n word_vector_dimensionality,\n weights=[embedding_matrix],\n input_length=max_sequence_length,\n trainable=False)\n\ninputs = Input(shape=(max_sequence_length,), dtype='int32') \nx = embedding_layer(inputs)\n\nconvolutional_layer_towers = [] \nfor kernel_size in [3, 4, 5]:\n _x = Conv1D(filters=128, kernel_size=kernel_size, activation='relu')(x)\n _x = Dropout(rate=0.1)(_x)\n _x = MaxPooling1D(5)(_x)\n convolutional_layer_towers.append(_x)\nx = Concatenate(axis=1)(convolutional_layer_towers)\nx = Conv1D(128, 5, activation='relu')(x) \nx = MaxPooling1D(5)(x) \nx = Conv1D(128, 5, activation='relu')(x) \nx = MaxPooling1D(30)(x) \nx = Flatten()(x) \nx = Dense(128, activation='relu')(x)\nx = Dropout(rate=0.5)(x)\npreds = Dense(2, activation='softmax', name='preds')(x)\nmodel = Model(input=inputs, output=preds)\nmodel.compile(loss='categorical_crossentropy',\n optimizer='nadam',\n metrics=['acc'])\nsummary_and_diagram(model)\n\n%%time\nhistory = model.fit(x_train, y_train, validation_data=(x_val, y_val),\n epochs=100, batch_size=32, verbose=False)\n\nmodel_training_plot(history)\nprint(f'max. validation accuracy observed: {max(model.history.history[\"val_acc\"])}')\nprint(f'max. validation accuracy history index: {model.history.history[\"val_acc\"].index(max(model.history.history[\"val_acc\"]))}')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
vravishankar/Jupyter-Books
Classes+and+Objects.ipynb
mit
[ "Object Oriented Programming\nAccording to Wikipedia, \"Object-oriented programming (OOP) is a programming paradigm based on the concept of 'objects', which may contain data, in the form of fields, often known as attributes; and code, in the form of procedures, often known as methods.\"\nClasses & Objects\nA class is a template for defining objects. It specifies the names and types of variables that can exist in an object, as well as \"methods\"--procedures for operating on those variables. A class can be thought of as a \"type\", with the objects being a \"variable\" of that type.\nFor example, when we define a Person class using the class keyword, we haven't actually created a Person. Instead, what we've created is a sort of instruction manual for constructing \"person\" objects.", "class Person:\n pass\n", "Here the class statement did not create anything, it just the blueprint to create \"Person\" objects. To create an object we need to instantiate the \"Person\" class.", "P1 = Person()\nprint(type(P1))", "Now we created a \"Person\" object and assigned it to \"P1\". We can create any number of objects but please note there will be only one \"Person\" class.", "# Doc string for class\nclass Person:\n '''Simple Person Class'''\n pass\n\nprint(Person.__doc__)", "Attributes & Methods\nClasses contain attributes (also called fields, members etc...) and methods (a.k.a functions). Attributes defines the characteristics of the object and methods perfom action on the object. For example, the class definition below has firstname and lastname attributes and fullname is a method.", "class Person:\n '''Simple Person Class\n \n Attributes:\n firstname: String representing first name of the person\n lastname: String representing last name of the person\n '''\n def __init__(self,firstname,lastname):\n '''Initialiser method for Person'''\n self.firstname = firstname\n self.lastname = lastname\n \n def fullname(self):\n '''Returns the full name of the person'''\n return self.firstname + ' ' + self.lastname", "Inside the class body, we define two functions – these are our object’s methods. The first is called _init_, which is a special method. When we call the class object, a new instance of the class is created, and the _init_ method on this new object is immediately executed with all the parameters that we passed to the class object. The purpose of this method is thus to set up a new object using data that we have provided.\nThe second method is a custom method which derives the fullname of the person using the firstname and the lastname.\n\n_init_ is sometimes called the object’s constructor, because it is used similarly to the way that constructors are used in other languages, but that is not technically correct – it’s better to call it the initialiser. There is a different method called _new_ which is more analogous to a constructor, but it is hardly ever used.\n\nYou may have noticed that both of these method definitions have self as the first parameter, and we use this variable inside the method bodies – but we don’t appear to pass this parameter in. This is because whenever we call a method on an object, the object itself is automatically passed in as the first parameter (as self). This gives us a way to access the object’s properties from inside the object’s methods.\nInstance Attributes\nAll the attributes that are defined on the Person instance are called instance attributes. They are added to the instance when the _init_ method is executed.\nClass Attributes\nWe can, however, also define attributes which are set on the class. These attributes will be shared by all instances of that class. In many ways they behave just like instance attributes, but there are some caveats that you should be aware of.\nWe define class attributes in the body of a class, at the same indentation level as method definitions (one level up from the insides of methods)", "class Person:\n '''Simple Person Class\n \n Attributes:\n firstname: String representing first name of the person\n lastname: String representing last name of the person\n '''\n TITLES = ['Mr','Mrs','Master']\n \n def __init__(self,title,firstname,lastname):\n '''Initialiser method for Person'''\n if title not in self.TITLES:\n raise ValueError(\"%s is not a valid title.\", title)\n self.firstname = firstname\n self.lastname = lastname\n \n def fullname(self):\n '''Returns the full name of the person'''\n return self.firstname + ' ' + self.lastname\n \nJohn = Person('Mister','John','Doe') # this will create an error\n\n\nclass Employee:\n '''Common base class for all employees'''\n\n empCount = 0\n \n def __init__(self, name, salary):\n self.name = name\n self.salary = salary\n Employee.empCount += 1\n \n def displayCount(self):\n print(\"Total Employee %d\",Employee.empCount)\n\n def displayEmployee(self):\n print(\"Name : \", self.name, \", Salary: \", self.salary)\n\n\"This would create first object of Employee class\"\nemp1 = Employee(\"Zara\", 2000)\n\"This would create second object of Employee class\"\nemp2 = Employee(\"Manni\", 5000)\nemp1.displayEmployee()\nemp2.displayEmployee()\nprint(\"Total Employee \", Employee.empCount)", "Please note that when we set an attribute on an instance which has the same name as a class attribute, we are overriding the class attribute with an instance attribute, which will take precedence over it. \nClass Decorators\nClass Methods\nJust like we can define class attributes, which are shared between all instances of a class, we can define class methods. We do this by using the @classmethod decorator to decorate an ordinary method.\nA class method still has its calling object as the first parameter, but by convention we rename this parameter from self to cls. If we call the class method from an instance, this parameter will contain the instance object, but if we call it from the class it will contain the class object. By calling the parameter cls we remind ourselves that it is not guaranteed to have any instance attributes.\nClass methods exists primarily for two reasons:\n\n\nSometimes there are tasks associated with a class which we can perform using constants and other class attributes, without needing to create any class instances. If we had to use instance methods for these tasks, we would need to create an instance for no reason, which would be wasteful.\n\n\nSometimes it is useful to write a class method which creates an instance of the class after processing the input so that it is in the right format to be passed to the class constructor. This allows the constructor to be straightforward and not have to implement any complicated parsing or clean-up code.", "class ClassGrades:\n\n def __init__(self, grades):\n self.grades = grades\n\n @classmethod\n def from_csv(cls, grade_csv_str):\n grades = grade_csv_str.split(', ')\n return cls(grades)\n \nclass_grades = ClassGrades.from_csv('92, -15, 99, 101, 77, 65, 100')\nprint(class_grades.grades)", "Static Methods\nA static method doesn’t have the calling object passed into it as the first parameter. This means that it doesn’t have access to the rest of the class or instance at all. We can call them from an instance or a class object, but they are most commonly called from class objects, like class methods.\nIf we are using a class to group together related methods which don’t need to access each other or any other data on the class, we may want to use this technique. \nThe advantage of using static methods is that we eliminate unnecessary cls or self parameters from our method definitions. \nThe disadvantage is that if we do occasionally want to refer to another class method or attribute inside a static method we have to write the class name out in full, which can be much more verbose than using the cls variable which is available to us inside a class method.", "class ClassGrades:\n\n def __init__(self, grades):\n self.grades = grades\n\n @classmethod\n def from_csv(cls, grade_csv_str):\n grades = grade_csv_str.split(', ')\n cls.validate(grades)\n return cls(grades)\n\n\n @staticmethod\n def validate(grades):\n for g in grades:\n if int(g) < 0 or int(g) > 100:\n raise Exception()\n\ntry: \n # Try out some valid grades\n class_grades_valid = ClassGrades.from_csv('90, 80, 85, 94, 70')\n print('Got grades:', class_grades_valid.grades)\n\n # Should fail with invalid grades\n class_grades_invalid = ClassGrades.from_csv('92, -15, 99, 101, 77, 65, 100')\n print(class_grades_invalid.grades)\nexcept: \n print('Invalid!')", "The difference between a static method and a class method is:\n\nStatic method knows nothing about the class and just deals with the parameters.\nClass method works with the class since its parameter is always the class itself.\n\nProperty\nSometimes we use a method to generate a property of an object dynamically, calculating it from the object’s other properties. Sometimes you can simply use a method to access a single attribute and return it. You can also use a different method to update the value of the attribute instead of accessing it directly. Methods like this are called getters and setters, because they “get” and “set” the values of attributes, respectively.\nThe @property decorator lets us make a method behave like an attribute.", "class Person:\n '''Simple Person Class\n \n Attributes:\n firstname: String representing first name of the person\n lastname: String representing last name of the person\n '''\n def __init__(self,firstname,lastname):\n '''Initialiser method for Person'''\n self.firstname = firstname\n self.lastname = lastname\n \n @property\n def fullname(self):\n '''Returns the full name of the person'''\n return self.firstname + ' ' + self.lastname\n \np1 = Person('John','Doe')\nprint(p1.fullname)", "There are also decorators which we can use to define a setter and a deleter for our attribute (a deleter will delete the attribute from our object). The getter, setter and deleter methods must all have the same name", "class Person:\n '''Simple Person Class\n \n Attributes:\n firstname: String representing first name of the person\n lastname: String representing last name of the person\n '''\n def __init__(self,firstname,lastname):\n '''Initialiser method for Person'''\n self.firstname = firstname\n self.lastname = lastname\n \n @property\n def fullname(self):\n '''Returns the full name of the person'''\n return self.firstname + ' ' + self.lastname\n \n @fullname.setter\n def fullname(self,value):\n firstname,lastname = value.split(\" \")\n self.firstname = firstname\n self.lastname = lastname\n \n @fullname.deleter\n def fullname(self):\n del self.firstname\n del self.lastname\n \np1 = Person('John','Doe')\nprint(p1.fullname)\n\np1.fullname = 'Jack Daniels'\nprint(p1.fullname)", "Inspecting an Object", "class Person:\n def __init__(self, name, surname):\n self.name = name\n self.surname = surname\n\n def fullname(self):\n return \"%s %s\" % (self.name, self.surname)\n\njane = Person(\"Jane\", \"Smith\")\n\nprint(dir(jane))", "Built In Class Attributes", "class Employee:\n 'Common base class for all employees'\n empCount = 0\n\n def __init__(self, name, salary):\n self.name = name\n self.salary = salary\n Employee.empCount += 1\n \n def displayCount(self):\n print(\"Total Employee\", Employee.empCount)\n\n def displayEmployee(self):\n print(\"Name : \", self.name, \", Salary: \", self.salary)\n\nprint (\"Employee.__doc__:\", Employee.__doc__)\nprint (\"Employee.__name__:\", Employee.__name__)\nprint (\"Employee.__module__:\", Employee.__module__)\nprint (\"Employee.__bases__:\", Employee.__bases__)\nprint (\"Employee.__dict__:\", Employee.__dict__)", "Overriding Magic Methods", "import datetime\n\nclass Person:\n def __init__(self, name, surname, birthdate, address, telephone, email):\n self.name = name\n self.surname = surname\n self.birthdate = birthdate\n\n self.address = address\n self.telephone = telephone\n self.email = email\n\n def __str__(self):\n return \"%s %s, born %s\\nAddress: %s\\nTelephone: %s\\nEmail:%s\" % (self.name, self.surname, self.birthdate, self.address, self.telephone, self.email)\n\njane = Person(\n \"Jane\",\n \"Doe\",\n datetime.date(1992, 3, 12), # year, month, day\n \"No. 12 Short Street, Greenville\",\n \"555 456 0987\",\n \"[email protected]\"\n)\n\nprint(jane)", "Create Class Using Key Value Arguments", "class Student:\n def __init__(self, **kwargs):\n for k,v in kwargs.items():\n setattr(self,k,v,)\n \n def __str__(self):\n attrs = [\"{}={}\".format(k, v) for (k, v) in self.__dict__.items()]\n return str(attrs)\n #classname = self.__class__.__name__\n #return \"{}: {}\".format((classname, \" \".join(attrs)))\n \ns1 = Student(firstname=\"John\",lastname=\"Doe\")\nprint(s1.firstname)\nprint(s1.lastname)\nprint(s1)\n\ndef print_values(**kwargs):\n for key, value in kwargs.items():\n print(\"The value of {} is {}\".format(key, value))\n\nprint_values(my_name=\"Sammy\", your_name=\"Casey\")", "Class Inheritance\nInheritance is a way of arranging objects in a hierarchy from the most general to the most specific. An object which inherits from another object is considered to be a subtype of that object.\nWe also often say that a class is a subclass or child class of a class from which it inherits, or that the other class is its superclass or parent class. We can refer to the most generic class at the base of a hierarchy as a base class.\nInheritance is also a way of reusing existing code easily. If we already have a class which does almost what we want, we can create a subclass in which we partially override some of its behaviour, or perhaps add some new functionality.", "# Simple Example of Inheritance\n\nclass Person:\n pass\n\n# Parent class must be defined inside the paranthesis\n\nclass Employee(Person): \n pass\n\ne1 = Employee()\nprint(dir(e1))\n\nclass Person:\n \n def __init__(self,firstname,lastname):\n self.firstname = firstname\n self.lastname = lastname\n \n def __str__(self):\n return \"[{},{}]\".format(self.firstname,self.lastname)\n \nclass Employee(Person):\n pass\n\njohn = Employee('John','Doe')\nprint(john)\n\nclass Person:\n \n def __init__(self, firstname, lastname):\n self.firstname = firstname\n self.lastname = lastname\n \n def __str__(self):\n return \"{},{}\".format(self.firstname, self.lastname)\n \nclass Employee(Person):\n \n def __init__(self, firstname, lastname, staffid):\n super().__init__(firstname, lastname)\n self.staffid = staffid\n \n def __str__(self):\n return super().__str__() + \",{}\".format(self.staffid)\n\njohn = Employee('Jack','Doe','12345')\nprint(john)\n", "Abstract Classes and Interfaces\nAbstract classes are not intended to be instantiated because all the method definitions are empty – all the insides of the methods must be implemented in a subclass.\nThey serves as a template for suitable objects by defining a list of methods that these objects must implement.", "# Abstract Classes\n\nclass shape2D:\n def area(self):\n raise NotImplementedError()\n \nclass shape3D:\n def volume(self):\n raise NotImplementedError()\n \nsh1 = shape2D()\nsh1.area()\n\nclass shape2D:\n def area(self):\n raise NotImplementedError()\n \nclass shape3D:\n def volume(self):\n raise NotImplementedError()\n\nclass Square(shape2D):\n def __init__(self,width):\n self.width = width\n \n def area(self):\n return self.width ** 2\n \ns1 = Square(2)\ns1.area()", "Multiple Inheritance", "class Person:\n pass\n\nclass Company:\n pass\n\nclass Employee(Person,Company):\n pass\n\nprint(Employee.mro())", "Diamond Problem\nMultiple inheritance isn’t too difficult to understand if a class inherits from multiple classes which have completely different properties, but things get complicated if two parent classes implement the same method or attribute.\nIf classes B and C inherit from A and class D inherits from B and C, and both B and C have a method do_something, which do_something will D inherit? This ambiguity is known as the diamond problem, and different languages resolve it in different ways. In our Tutor class we would encounter this problem with the _init_ method.", "class X: pass\nclass Y: pass\nclass Z: pass\n\nclass A(X,Y): pass\nclass B(Y,Z): pass\n\nclass M(B,A,Z): pass\n\n# Output:\n# [<class '__main__.M'>, <class '__main__.B'>,\n# <class '__main__.A'>, <class '__main__.X'>,\n# <class '__main__.Y'>, <class '__main__.Z'>,\n# <class 'object'>]\n\nprint(M.mro())", "Method Resolution Order (MRO)\nIn the multiple inheritance scenario, any specified attribute is searched first in the current class. If not found, the search continues into parent classes in depth-first, left-right fashion without searching same class twice.\nSo, in the above example of MultiDerived class the search order is [MultiDerived, Base1, Base2, object]. This order is also called linearization of MultiDerived class and the set of rules used to find this order is called Method Resolution Order (MRO).\nMRO must prevent local precedence ordering and also provide monotonicity. It ensures that a class always appears before its parents and in case of multiple parents, the order is same as tuple of base classes.\nMRO of a class can be viewed as the mro attribute or mro() method. The former returns a tuple while latter returns a list.", "class Person:\n def __init__(self):\n print('Person')\n\nclass Company:\n def __init__(self):\n print('Company')\n\nclass Employee(Person,Company):\n def _init_(self):\n super(Employee,self).__init__()\n print('Employee')\n \ne1=Employee()", "Mixins\nIf we use multiple inheritance, it is often a good idea for us to design our classes in a way which avoids the kind of ambiguity described above. One way of doing this is to split up optional functionality into mix-ins. A Mix-in is a class which is not intended to stand on its own – it exists to add extra functionality to another class through multiple inheritance.", "class Person:\n def __init__(self, name, surname, number):\n self.name = name\n self.surname = surname\n self.number = number\n\n\nclass LearnerMixin:\n def __init__(self):\n self.classes = []\n\n def enrol(self, course):\n self.classes.append(course)\n\n\nclass TeacherMixin:\n def __init__(self):\n self.courses_taught = []\n\n def assign_teaching(self, course):\n self.courses_taught.append(course)\n\n\nclass Tutor(Person, LearnerMixin, TeacherMixin):\n def __init__(self, *args, **kwargs):\n super(Tutor, self).__init__(*args, **kwargs)\n\njane = Tutor(\"Jane\", \"Smith\", \"SMTJNX045\")\n#jane.enrol(a_postgrad_course)\n#jane.assign_teaching(an_undergrad_course)", "Now Tutor inherits from one “main” class, Person, and two mix-ins which are not related to Person. Each mix-in is responsible for providing a specific piece of optional functionality. Our mix-ins still have _init_ methods, because each one has to initialise a list of courses (we saw in the previous chapter that we can’t do this with a class attribute). Many mix-ins just provide additional methods and don’t initialise anything.\nComposition\nComposition is a way of aggregating objects together by making some objects attributes of other objects. Relationships like this can be one-to-one, one-to-many or many-to-many, and they can be unidirectional or bidirectional, depending on the specifics of the the roles which the objects fulfil.\nThe term composition implies that the two objects are quite strongly linked – one object can be thought of as belonging exclusively to the other object. If the owner object ceases to exist, the owned object will probably cease to exist as well. If the link between two objects is weaker, and neither object has exclusive ownership of the other, it can also be called aggregation.", "class Student:\n def __init__(self, name, student_number):\n self.name = name\n self.student_number = student_number\n self.classes = []\n\n def enrol(self, course_running):\n self.classes.append(course_running)\n course_running.add_student(self)\n\n\nclass Department:\n def __init__(self, name, department_code):\n self.name = name\n self.department_code = department_code\n self.courses = {}\n\n def add_course(self, description, course_code, credits):\n self.courses[course_code] = Course(description, course_code, credits, self)\n return self.courses[course_code]\n\n\nclass Course:\n def __init__(self, description, course_code, credits, department):\n self.description = description\n self.course_code = course_code\n self.credits = credits\n self.department = department\n #self.department.add_course(self)\n\n self.runnings = []\n\n def add_running(self, year):\n self.runnings.append(CourseRunning(self, year))\n return self.runnings[-1]\n\n\nclass CourseRunning:\n def __init__(self, course, year):\n self.course = course\n self.year = year\n self.students = []\n\n def add_student(self, student):\n self.students.append(student)\n\n\nmaths_dept = Department(\"Mathematics and Applied Mathematics\", \"MAM\")\nmam1000w = maths_dept.add_course(\"Mathematics 1000\", \"MAM1000W\", 1)\nmam1000w_2013 = mam1000w.add_running(2013)\n\nbob = Student(\"Bob\", \"Smith\")\nbob.enrol(mam1000w_2013)", "A student can be enrolled in several courses (CourseRunning objects), and a course (CourseRunning) can have multiple students enrolled in it in a particular year, so this is a many-to-many relationship. A student knows about all his or her courses, and a course has a record of all enrolled students, so this is a bidirectional relationship. These objects aren’t very strongly coupled – a student can exist independently of a course, and a course can exist independently of a student.\nA department offers multiple courses (Course objects), but in our implementation a course can only have a single department – this is a one-to-many relationship. It is also bidirectional. Furthermore, these objects are more strongly coupled – you can say that a department owns a course. The course cannot exist without the department.\nA similar relationship exists between a course and its “runnings”: it is also bidirectional, one-to-many and strongly coupled – it wouldn’t make sense for “MAM1000W run in 2013” to exist on its own in the absence of “MAM1000W”.\n\nInheritance Methods", "class Person:\n pass\n\nclass Employee(Person):\n pass\n\nclass Tutor(Employee):\n pass\n\nemp = Employee()\n\nprint(isinstance(emp, Tutor)) # False\nprint(isinstance(emp, Person)) # True\nprint(isinstance(emp, Employee)) # True\nprint(issubclass(Tutor, Person)) # True", "Links to Topics\nPython - Object Oriented Programming\nExcellent Introduction Tutorial on Object Oriented Programming\nMozilla - Introduction to Object Oriented Programming" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
jseabold/statsmodels
examples/notebooks/formulas.ipynb
bsd-3-clause
[ "Formulas: Fitting models using R-style formulas\nSince version 0.5.0, statsmodels allows users to fit statistical models using R-style formulas. Internally, statsmodels uses the patsy package to convert formulas and data to the matrices that are used in model fitting. The formula framework is quite powerful; this tutorial only scratches the surface. A full description of the formula language can be found in the patsy docs: \n\nPatsy formula language description\n\nLoading modules and functions", "import numpy as np # noqa:F401 needed in namespace for patsy\nimport statsmodels.api as sm", "Import convention\nYou can import explicitly from statsmodels.formula.api", "from statsmodels.formula.api import ols", "Alternatively, you can just use the formula namespace of the main statsmodels.api.", "sm.formula.ols", "Or you can use the following convention", "import statsmodels.formula.api as smf", "These names are just a convenient way to get access to each model's from_formula classmethod. See, for instance", "sm.OLS.from_formula", "All of the lower case models accept formula and data arguments, whereas upper case ones take endog and exog design matrices. formula accepts a string which describes the model in terms of a patsy formula. data takes a pandas data frame or any other data structure that defines a __getitem__ for variable names like a structured array or a dictionary of variables. \ndir(sm.formula) will print a list of available models. \nFormula-compatible models have the following generic call signature: (formula, data, subset=None, *args, **kwargs)\nOLS regression using formulas\nTo begin, we fit the linear model described on the Getting Started page. Download the data, subset columns, and list-wise delete to remove missing observations:", "dta = sm.datasets.get_rdataset(\"Guerry\", \"HistData\", cache=True)\n\ndf = dta.data[['Lottery', 'Literacy', 'Wealth', 'Region']].dropna()\ndf.head()", "Fit the model:", "mod = ols(formula='Lottery ~ Literacy + Wealth + Region', data=df)\nres = mod.fit()\nprint(res.summary())", "Categorical variables\nLooking at the summary printed above, notice that patsy determined that elements of Region were text strings, so it treated Region as a categorical variable. patsy's default is also to include an intercept, so we automatically dropped one of the Region categories.\nIf Region had been an integer variable that we wanted to treat explicitly as categorical, we could have done so by using the C() operator:", "res = ols(formula='Lottery ~ Literacy + Wealth + C(Region)', data=df).fit()\nprint(res.params)", "Patsy's mode advanced features for categorical variables are discussed in: Patsy: Contrast Coding Systems for categorical variables\nOperators\nWe have already seen that \"~\" separates the left-hand side of the model from the right-hand side, and that \"+\" adds new columns to the design matrix. \nRemoving variables\nThe \"-\" sign can be used to remove columns/variables. For instance, we can remove the intercept from a model by:", "res = ols(formula='Lottery ~ Literacy + Wealth + C(Region) -1 ', data=df).fit()\nprint(res.params)", "Multiplicative interactions\n\":\" adds a new column to the design matrix with the interaction of the other two columns. \"*\" will also include the individual columns that were multiplied together:", "res1 = ols(formula='Lottery ~ Literacy : Wealth - 1', data=df).fit()\nres2 = ols(formula='Lottery ~ Literacy * Wealth - 1', data=df).fit()\nprint(res1.params, '\\n')\nprint(res2.params)", "Many other things are possible with operators. Please consult the patsy docs to learn more.\nFunctions\nYou can apply vectorized functions to the variables in your model:", "res = smf.ols(formula='Lottery ~ np.log(Literacy)', data=df).fit()\nprint(res.params)", "Define a custom function:", "def log_plus_1(x):\n return np.log(x) + 1.\nres = smf.ols(formula='Lottery ~ log_plus_1(Literacy)', data=df).fit()\nprint(res.params)", "Any function that is in the calling namespace is available to the formula.\nUsing formulas with models that do not (yet) support them\nEven if a given statsmodels function does not support formulas, you can still use patsy's formula language to produce design matrices. Those matrices \ncan then be fed to the fitting function as endog and exog arguments. \nTo generate numpy arrays:", "import patsy\nf = 'Lottery ~ Literacy * Wealth'\ny,X = patsy.dmatrices(f, df, return_type='matrix')\nprint(y[:5])\nprint(X[:5])", "To generate pandas data frames:", "f = 'Lottery ~ Literacy * Wealth'\ny,X = patsy.dmatrices(f, df, return_type='dataframe')\nprint(y[:5])\nprint(X[:5])\n\nprint(sm.OLS(y, X).fit().summary())" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
prisae/blog-notebooks
MX_BarrancasDelCobre.ipynb
cc0-1.0
[ "Barrancas Del Cobre\nMaps for https://mexico.werthmuller.org/besucherreisen/barrancasdelcobre.\nYou can find more explanatory examples in Travel.ipynb, also in this directory.", "import travelmaps2 as tm\ntm.setup(dpi=200)\n\nfig_x = tm.plt.figure(figsize=(tm.cm2in([11, 6])))\n\n# Locations\nMDF = [19.433333, -99.133333] # Mexico City\nURI = [27.216667, -107.916667] # Urique\nCHI = [28.635278, -106.088889] # Chihuahua\n# CRE = [27.752258, -107.634608] # Creel\n# CUA = [28.405, -106.866667] # Cuathémoc \n\n# Create basemap\nm_x = tm.Basemap(width=3500000, height=2300000, resolution='c', projection='tmerc', lat_0=24, lon_0=-102)\n\n# Plot image\nm_x.warpimage('./data/TravelMap/HYP_HR_SR_OB_DR/HYP_HR_SR_OB_DR.tif')\n\n# Put a shade over non-Mexican countries\ncountries = ['USA', 'BLZ', 'GTM', 'HND', 'SLV', 'NIC', 'CUB']\ntm.country(countries, m_x, fc='.8', ec='.3', lw=.5, alpha=.6)\n\n# Fill states\nfcs = 32*['none']\necs = 32*['k']\nlws = 32*[.2,]\ntm.country('MEX', bmap=m_x, fc=fcs, ec=ecs, lw=lws, adm=1)\necs = 32*['none']\necs[5] = 'r'\nlws = 32*[1,]\ntm.country('MEX', bmap=m_x, fc=fcs, ec=ecs, lw=lws, adm=1)\n\n# Add visited cities\ntm.city(URI, 'Urique', m_x, offs=[-.5, -1.5], halign=\"right\")\ntm.city(MDF, 'Mexiko-Stadt', m_x, offs=[.5, .5])\ntm.city(CHI, 'Chihuahua', m_x, offs=[.5, .5])\n#tm.city(CRE, 'Creel', m_x, offs=[.5, .5])\n#tm.city(CUA, 'Cuathemoc', m_x, offs=[.5, .5])\n\n# Save-path\n#fpath = '../mexico.werthmuller.org/content/images/barrancasdelcobre/'\n#tm.plt.savefig(fpath+'MapUrique.png', bbox_inches='tight')\ntm.plt.show()", "Maps with Natural Earth backgrounds\nI got the background image from Natural Earth; it is the 10 m, Cross Blended Hypso with Relief, Water, Drains, and Ocean Bottom. I changed the colour curves slightly in Gimp, to make the image darker.\nAdjustment for Natural Earth:", "from IPython.display import Image\nImage(filename='./data/TravelMap/HYP_HR_SR_OB_DR/Adjustment.jpg') ", "Profile from viewpoint down to Urique\nNot used in blog, later added", "import numpy as np\nimport matplotlib.pyplot as plt\nfig_p,ax = plt.subplots(figsize=(tm.cm2in([10.8, 5])))\n\n# Switch off axis and ticks\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.spines['left'].set_visible(False)\nax.spines['bottom'].set_visible(False)\nax.xaxis.set_ticks_position('bottom')\nax.yaxis.set_ticks_position('none')\n\n# Get data\npdat = np.loadtxt('./data/Mexico/ProfileUrique.txt', skiprows=1)\n\n# Ticks, hlines, axis\nplt.yticks(np.arange(1,6)*500, ('500 m', '1000 m', '1500 m', '2000 m', '2500 m'))\nplt.hlines([1000, 2000], -.5, 17, colors='.8')\nplt.hlines([500, 1500, 2500], -.5, 17, colors='.8', lw=.5)\nplt.axis([-.5, 17, 200, 2500])\n\n# Sum up differences to get distance, distance starts now at every waypoint\ndistance = np.cumsum(pdat[:,4])/1000\n\n# Plot data\nplt.plot(distance, pdat[:, 2])\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
mne-tools/mne-tools.github.io
0.22/_downloads/f781cba191074d5f4243e5933c1e870d/plot_find_ref_artifacts.ipynb
bsd-3-clause
[ "%matplotlib inline", "Find MEG reference channel artifacts\nUse ICA decompositions of MEG reference channels to remove intermittent noise.\nMany MEG systems have an array of reference channels which are used to detect\nexternal magnetic noise. However, standard techniques that use reference\nchannels to remove noise from standard channels often fail when noise is\nintermittent. The technique described here (using ICA on the reference\nchannels) often succeeds where the standard techniques do not.\nThere are two algorithms to choose from: separate and together (default). In\nthe \"separate\" algorithm, two ICA decompositions are made: one on the reference\nchannels, and one on reference + standard channels. The reference + standard\nchannel components which correlate with the reference channel components are\nremoved.\nIn the \"together\" algorithm, a single ICA decomposition is made on reference +\nstandard channels, and those components whose weights are particularly heavy\non the reference channels are removed.\nThis technique is fully described and validated in :footcite:HannaEtAl2020", "# Authors: Jeff Hanna <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport mne\nfrom mne import io\nfrom mne.datasets import refmeg_noise\nfrom mne.preprocessing import ICA\nimport numpy as np\n\nprint(__doc__)\n\ndata_path = refmeg_noise.data_path()", "Read raw data, cropping to 5 minutes to save memory", "raw_fname = data_path + '/sample_reference_MEG_noise-raw.fif'\nraw = io.read_raw_fif(raw_fname).crop(300, 600).load_data()", "Note that even though standard noise removal has already\nbeen applied to these data, much of the noise in the reference channels\n(bottom of the plot) can still be seen in the standard channels.", "select_picks = np.concatenate(\n (mne.pick_types(raw.info, meg=True)[-32:],\n mne.pick_types(raw.info, meg=False, ref_meg=True)))\nplot_kwargs = dict(\n duration=100, order=select_picks, n_channels=len(select_picks),\n scalings={\"mag\": 8e-13, \"ref_meg\": 2e-11})\nraw.plot(**plot_kwargs)", "The PSD of these data show the noise as clear peaks.", "raw.plot_psd(fmax=30)", "Run the \"together\" algorithm.", "raw_tog = raw.copy()\nica_kwargs = dict(\n method='picard',\n fit_params=dict(tol=1e-4), # use a high tol here for speed\n)\nall_picks = mne.pick_types(raw_tog.info, meg=True, ref_meg=True)\nica_tog = ICA(n_components=60, allow_ref_meg=True, **ica_kwargs)\nica_tog.fit(raw_tog, picks=all_picks)\n# low threshold (2.0) here because of cropped data, entire recording can use\n# a higher threshold (2.5)\nbad_comps, scores = ica_tog.find_bads_ref(raw_tog, threshold=2.0)\n\n# Plot scores with bad components marked.\nica_tog.plot_scores(scores, bad_comps)\n\n# Examine the properties of removed components. It's clear from the time\n# courses and topographies that these components represent external,\n# intermittent noise.\nica_tog.plot_properties(raw_tog, picks=bad_comps)\n\n# Remove the components.\nraw_tog = ica_tog.apply(raw_tog, exclude=bad_comps)", "Cleaned data:", "raw_tog.plot_psd(fmax=30)", "Now try the \"separate\" algorithm.", "raw_sep = raw.copy()\n\n# Do ICA only on the reference channels.\nref_picks = mne.pick_types(raw_sep.info, meg=False, ref_meg=True)\nica_ref = ICA(n_components=2, allow_ref_meg=True, **ica_kwargs)\nica_ref.fit(raw_sep, picks=ref_picks)\n\n# Do ICA on both reference and standard channels. Here, we can just reuse\n# ica_tog from the section above.\nica_sep = ica_tog.copy()\n\n# Extract the time courses of these components and add them as channels\n# to the raw data. Think of them the same way as EOG/EKG channels, but instead\n# of giving info about eye movements/cardiac activity, they give info about\n# external magnetic noise.\nref_comps = ica_ref.get_sources(raw_sep)\nfor c in ref_comps.ch_names: # they need to have REF_ prefix to be recognised\n ref_comps.rename_channels({c: \"REF_\" + c})\nraw_sep.add_channels([ref_comps])\n\n# Now that we have our noise channels, we run the separate algorithm.\nbad_comps, scores = ica_sep.find_bads_ref(raw_sep, method=\"separate\")\n\n# Plot scores with bad components marked.\nica_sep.plot_scores(scores, bad_comps)\n\n# Examine the properties of removed components.\nica_sep.plot_properties(raw_sep, picks=bad_comps)\n\n# Remove the components.\nraw_sep = ica_sep.apply(raw_sep, exclude=bad_comps)", "Cleaned raw data traces:", "raw_sep.plot(**plot_kwargs)", "Cleaned raw data PSD:", "raw_sep.plot_psd(fmax=30)", "References\n.. footbibliography::" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
DEIB-GECO/PyGMQL
examples/notebooks/02a_Mixing_Local_Remote_Processing_SIMPLE.ipynb
apache-2.0
[ "Interfacing with an external GMQL service: Aggregating the Chip-Seq signal of histone marks on promotorial regions\nIn this first application, genes' promoters are extracted from a local dataset and a large set of Chip-Seq experiments is selected from a remote repository. Then, for every promoter and for every Chip-seq experiment, the average signal of those Chip-Seq peaks intersecting the promoter is computed. The result is finally visualized as a heatmap, with rows representing promoters and columns representing Chip-Seq experiments. \nThis example shows: \n1. the integration of local PyGMQL programs with remote repositories,\n2. the possibility to outsource the execution to an external deployment of (Py)GMQL, \n3. the interplay between PyGMQL data and Python libraries written by third parties. \nThese features allow users to write arbitrary complex queries - whose execution and size of the inputs exceed the capabilities of the local environment - and, at the same time, analyze/visualize the output by means of well known Python libraries.", "import gmql as gl\nimport matplotlib.pyplot as plt", "The code begins by loading a local dataset of gene annotations and extracting their promotorial regions (here defined as regions at $\\left[gene_{start}-2000;gene_{start}+2000\\right])$.\nNote that the start and stop attributes automatically consider the strand of the region.", "genes = gl.load_from_path(\"../data/genes/\")\n\npromoters = genes.reg_project(new_field_dict={\n 'start':genes.start-2000, \n 'stop':genes.start + 2000})", "The genes and promoters variables are GMQLDataset; the former is loaded directly, the latter results from a projection operation. Region feature names can be accessed directly from variables to build expressions and predicates (e.g., gene.start + 2000). \nNext, we load the external dataset of Chip-Seq from a remote GMQL Web service; in order to do so, the user has to specify the remote address and login. If the user has already signed to the remote GMQL installation, he/she can use his/her own credentials (this will also grant the access to private datasets), otherwise a guest account is automatically created, without requiring the user to do it manually.", "gl.set_remote_address(\"http://gmql.eu/gmql-rest/\")\ngl.login()", "In the following snippet we show how to load the Chip-Seq data of the ENCODE dataset from the remote GMQL repository and select only the experiments of interest.\nFirst, the user sets the remote execution mode and imports remote datasets with the load_from_remote function; such loading is lazy, therefore no actual data is moved or read at this point.\nThen the user specifies the select condition; the hms[\"experiment\\_target\"] notation enables the user to build predicates on the given metadata attribute. The GMQL engine loads from the dataset only the samples whose metadata satisfy such condition; specifically, only experiments targeting the human H3K9ac marker will be selected.", "gl.set_mode(\"remote\")\n\nhms = gl.load_from_remote(\"HG19_ENCODE_BROAD_AUG_2017\",\n owner=\"public\")\nhms_ac = hms[hms[\"experiment_target\"] == \"H3K9ac-human\"]", "Next, the PyGMQL map operation is used to compute the average of the signal of hms_ac intersecting each promoter; iteration over all samples is implicit. Finally, the materialize method triggers the execution of the query.\nSince the mode is set to \\texttt{\"remote\"}, the dataset stored at ./genes is sent to the remote service GMQL system that performs the specified operations. The result is loaded into the mapping GDataframe variable which resides on the local machine.", "mapping = promoters.map(\n hms_ac, \n refName='prom', \n expName='hm',\n new_reg_fields={\n 'avg_signal': gl.AVG('signal')})\nmapping = mapping.materialize()", "At this point, Python libraries for data manipulation, visualization or analysis can be applied to the GDataframe. The following portion of code provides an example of data manipulation of a query result. The to_matrix method transforms the GDataframe into a Pandas matrix, where each row corresponds to a gene and each column to a cell line; values are the average signal on the promoter of the given gene in the given cell line. Finally, the matrix is visualized as a heatmap.", "import seaborn as sns\nheatmap=mapping.to_matrix(\n columns_meta=['hm.biosample_term_name'],\n index_regs=['gene_symbol'], \n values_regs=['avg_signal'],\n fill_value=0)\n\nplt.figure(figsize=(10, 10))\nsns.heatmap(heatmap,vmax = 20)\nplt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ajgpitch/qutip-notebooks
examples/qip-noisy-device-simulator.ipynb
lgpl-3.0
[ "Noisy quantum device simulation with QuTiP\nAuthor: Boxi Li ([email protected])\nThis is the introduction notebook to the deliverable of one of the Google Summer of Code 2019 project (GSoC2019) \"Noise Models in QIP Module\", under the organization NumFocus. The final product of the project is a framework of noisy quantum device simulator based on QuTiP open system solvers.\nThe simulation of quantum information processing (QIP) is usually achieved by gate matrix product. Many simulators such as the simulation backend of Qiskit and porjectQ are based on it. QuTiP offers this common way of simulation with the class qutit.qip.QubitCircuit. It simulates QIP in the circuit model. You can find the introduction notebook for this matrix gate representation here.\nThe simulation introduced here is different as it simulates the dynamics of the quantum device at the level of driving Hamiltonians. It is closer to the physical realization than the matrix product approach and is more convenient when simulating the noise of physical hardware. The simulator is based on QuTiP Lindbladian equation solvers and is defined as qutip.qip.device.Processor. The basic element is the control pulse characterized by the driving Hamiltonian, target qubits, time sequence and pulse strength. Our way of simulation offers a practical way to diagnostically add noise to each pulse or the whole device at the Hamiltonian level. Based on this pulse level control, different backends can be defined for different physical systems such as Cavity QED, Ion trap or Circuit QED. For each backend, a compiler needs to be defined. In the end, the Processor will be able to transfer a simple quantum circuit into the control pulse sequence, add noise automatically and perform the noisy simulation.\nThis notebook contains the most basic part of this quantum device simulator, i.e. the noisy evolution under given control pulses. It demonstrates how to set up the parameters and introduce different kinds of noise into the evolution.\nNote\nThis module is still under active development. Be ready for some adventures and unexpected edges. Please do not hesitate to raise an issue on our GitHub website if you find any bugs. A new release might break some backwards compatibility on this module, therefore we recommend you to check our GitHub website if you are facing some unexpected errors after an update.\nLinks to other related notebook\nThere is a series of notebooks on specialized subclasses and application of the simulator Processor, including finding pulses realizing certain quantum gate based on optimization algorithm or physical model and simulating simple quantum algorithms:\nThe notebook QuTiP example: Physical implementation of Spin Chain Qubit model shows the simulation of a spin-chain based quantum computing model both with qutit.qip.QubitCircuit and qutip.qip.device.Processor.\nThe notebook Examples for OptPulseProcessor describes the class OptPulseProcessor, which uses the optimal control module in QuTiP to find the control pulses for quantum gates.\nThe notebook Running the Deutsch–Jozsa algorithm on the noisy device simulator\n gives an example of simulating simple quantum algorithms in the presence of noise.\nThe pulse level control", "import copy\nimport numpy as np\nimport matplotlib.pyplot as plt\npi = np.pi\n\nfrom qutip.qip.device import Processor\nfrom qutip.operators import sigmaz, sigmay, sigmax, destroy\nfrom qutip.states import basis\nfrom qutip.metrics import fidelity\nfrom qutip.qip.operations import rx, ry, rz, hadamard_transform", "Controlling a single qubit\nThe simulation of a unitary evolution with Processor is defiend by the control pulses. Each pulse is represented by a Pulse object consisting of the control Hamiltonian $H_j$, the target qubits, the pulse strength $c_j$ and the time sequence $t$. The evolution is given by \n\\begin{equation}\nU(t)=\\exp(-\\mathrm{i} \\sum_j c_j(t) H_j t)\n\\end{equation}\nIn this example, we define a single-qubit quantum device with $\\sigma_z$ and $\\sigma_y$ pulses.", "processor = Processor(N=1)\nprocessor.add_control(0.5 * sigmaz(), targets=0, label=\"sigmaz\")\nprocessor.add_control(0.5 * sigmay(), targets=0, label=\"sigmay\")", "The list of defined pulses are saved in an attribute Processor.pulses. We can see the pulse that we just defined by", "for pulse in processor.pulses:\n pulse.print_info()", "We can see that the pulse strength coeff and time sequence tlist still remain undefined. To fully characterize the evolution, we need to define them both.\nThe pulse strength and time are both given as a NumPy array. For discrete pulses, tlist specifies the start and the end time of each pulse coefficient, and thus is one element longer than coeff. (This is different from the usual requirement in QuTiP solver where tlist and coeff needs to have the same length.) The definition below means that we turn on the $\\sigma_y$ pulse for $t=\\pi$ and with strength 1. (Notice that the Hamiltonian is $H=\\frac{1}{2} \\sigma_z$)", "processor.pulses[1].coeff = np.array([1.])\nprocessor.pulses[1].tlist = np.array([0., pi])\nfor pulse in processor.pulses:\n pulse.print_info()", "This pulse is a $\\pi$ pulse that flips the qubit from $\\left |0 \\right\\rangle$ to $\\left |1 \\right\\rangle$, equivalent to a rotation around y-axis of angle $\\pi$:\n$$R_y(\\theta) = \\begin{pmatrix} cos(\\theta/2) & -sin(\\theta/2) \\ sin(\\theta/2) & cos(\\theta/2) \\end{pmatrix}$$\nWe can run the simulation to see the result of the evolution starting from $\\left |0 \\right\\rangle$:", "basis0 = basis(2, 0)\nresult = processor.run_state(init_state=basis0)\nresult.states[-1].tidyup(1.e-5)", "As arbitrary single-qubit gate can be decomposed into $R_z(\\theta_1) \\cdot R_y(\\theta_2) \\cdot R_z(\\theta_3)$, it is enough to use three pulses. For demonstration purpose, we choose $\\theta_1=\\theta_2=\\theta_3=\\pi/2$", "processor.pulses[0].coeff = np.array([1., 0., 1.])\nprocessor.pulses[1].coeff = np.array([0., 1., 0.])\nprocessor.pulses[0].tlist = np.array([0., pi/2., 2*pi/2, 3*pi/2])\nprocessor.pulses[1].tlist = np.array([0., pi/2., 2*pi/2, 3*pi/2])\n\nresult = processor.run_state(init_state=basis(2, 1))\nresult.states[-1].tidyup(1.0e-5) ", "Pulse with continuous amplitude\nIf your pulse strength is generated somewhere else and is a discretization of a continuous function, you can also tell the Processor to use them with the cubic spline interpolation. In this case tlist and coeff must have the same length.", "tlist = np.linspace(0., 2*np.pi, 20)\nprocessor = Processor(N=1, spline_kind=\"step_func\")\nprocessor.add_control(sigmaz(), 0)\nprocessor.pulses[0].tlist = tlist\nprocessor.pulses[0].coeff = np.array([np.sin(t) for t in tlist])\nprocessor.plot_pulses();\n\ntlist = np.linspace(0., 2*np.pi, 20)\nprocessor = Processor(N=1, spline_kind=\"cubic\")\nprocessor.add_control(sigmaz())\nprocessor.pulses[0].tlist = tlist\nprocessor.pulses[0].coeff = np.array([np.sin(t) for t in tlist])\nprocessor.plot_pulses();", "Noisy evolution\nIn real quantum devices, noise affects the perfect execution of gate-based quantum circuits, limiting their depths. In general, we can divide quantum noise into two types: coherent and incoherent noise. The former one usually dues to the deviation of the control pulse. The noisy evolution is still unitary. Incoherent noise comes from the coupling of the quantum system with the environment. This type of noise leads to the loss of information. In QIP theory, we describe this type of noise with a noisy channel, corresponding to the collapse operators in the Lindblad equation.\nAlthough noise can, in general, be simulated with quantum channel representation, it will need some pre-analysis and approximation, which can be difficult in a large system. This simulator offers an easier, but computationally more demanding solution from the viewpoint of quantum control. Processor, as a circuit simulator, is different from the common simulator of QIP, as it simulates the evolution of the qubits under the driving Hamiltonian. The noise will be defined according to the control pulses and the evolution will be calculated using QuTiP solvers. This enables one to define more complicated noise such as cross-talk and leakage error, depending on the physical device and the problem one wants to study. On the one hand, the simulation can help one analyze the noise composition and identify the dominant noise source. On the other hand, together with a backend compiler, one can also use it to study if an algorithm is sensitive to a certain type of noise.\nDecoherence\nIn Processor, decoherence noise is simulated by adding collapse operator into the Lindbladian equation. For single-qubit decoherence, it is equivalent to applying random bit flip and phase flip error after applying the quantum gate. For qubit relaxation, one can simply specify the $t_1$ and $t_2$ time for the device or for each qubit. Here we assume the qubit system has a drift Hamiltonian $H_d=\\hbar \\omega \\sigma_z$, for simplicity, we let $\\hbar \\omega = 10$", "a = destroy(2)\ninitial_state = basis(2,1)\nplus_state = (basis(2,1) + basis(2,0)).unit()\ntlist = np.arange(0.00, 2.02, 0.02)\nH_d = 10.*sigmaz()", "Decay time $T_1$\nThe $T_1$ relaxation time describes the strength of amplitude damping and can be described, in a two-level system, by a collapse operator $\\frac{1}{\\sqrt{T_1}}a$, where $a$ is the annihilation operator. This leads to an exponential decay of the population of excited states proportional to $\\exp({-t/T_1})$. This amplitude damping can be simulated by specifying the attribute t1 of the processor", "from qutip.qip.pulse import Pulse\nt1 = 1.\nprocessor = Processor(1, t1=t1)\n# creat a dummpy pulse that has no Hamiltonian, but only a tlist.\nprocessor.add_pulse(Pulse(None, None, tlist=tlist, coeff=False))\nresult = processor.run_state(init_state=initial_state, e_ops=[a.dag()*a])\n\nfig, ax = plt.subplots()\nax.plot(tlist[0: 100: 10], result.expect[0][0: 100: 10], 'o', label=\"simulation\")\nax.plot(tlist, np.exp(-1./t1*tlist), label=\"theory\")\nax.set_xlabel(\"t\")\nax.set_ylabel(\"population in the excited state\")\nax.legend()\nplt.grid()", "Decay time $T_2$\nThe $T_2$ time describes the dephasing process. Here one has to be careful that the amplitude damping channel characterized by $T_1$ will also lead to a dephasing proportional to $\\exp(-t/2T_1)$. To make sure that the overall phase dampling is $exp(-t/T_2)$, the processor (internally) uses an collapse operator $\\frac{1}{\\sqrt{2*T'_2}} \\sigma_z$ with $\\frac{1}{T'_2}+\\frac{1}{2T_1}=\\frac{1}{T_2}$ to simulate the dephasing. (This also indicates that $T_2 \\leqslant 2T_1$)\nUsually, the $T_2$ time is measured by the Ramsey experiment, where the qubit starts from the excited state, undergoes a $\\pi/2$ pulse, proceeds for a time $t$, and measured after another $\\pi/2$ pulse. For simplicity, here we directly calculate the expectation value of $\\rm{H}\\circ a^\\dagger a \\circ\\rm{H}$, where $\\rm{H}$ denotes the Hadamard transformation. This is equivalent to measure the population of $\\frac{1}{\\sqrt{2}}(|0\\rangle+|1\\rangle)$. The envelope should follow an exponential decay characterized by $T_2$.", "t1 = 1. \nt2 = 0.5\nprocessor = Processor(1, t1=t1, t2=t2)\nprocessor.add_control(H_d, 0)\nprocessor.pulses[0].coeff = True\nprocessor.pulses[0].tlist = tlist\nHadamard = hadamard_transform(1)\nresult = processor.run_state(init_state=plus_state, e_ops=[Hadamard*a.dag()*a*Hadamard])\n\nfig, ax = plt.subplots()\n# detail about lenght of tlist needs to be fixed\nax.plot(tlist[:-1], result.expect[0][:-1], '.', label=\"simulation\")\nax.plot(tlist[:-1], np.exp(-1./t2*tlist[:-1])*0.5 + 0.5, label=\"theory\")\nplt.xlabel(\"t\")\nplt.ylabel(\"Ramsey signal\")\nplt.legend()\nax.grid()", "Random noise in the pulse intensity\nDespite single-qubit decoherence, Processor can also simulate coherent control noise. For general types of noise, one can define a noise object and add it to the processor. An example of predefined noise is the random amplitude noise, where random value is added to the pulse every dt. loc and scale are key word arguments for the random number generator np.random.normal.", "from qutip.qip.noise import RandomNoise\nprocessor = Processor(N=1)\nprocessor.add_control(0.5 * sigmaz(), targets=0, label=\"sigmaz\")\nprocessor.add_control(0.5 * sigmay(), targets=0, label=\"sigmay\")\nprocessor.coeffs = np.array([[1., 0., 1.],\n [0., 1., 0.]])\nprocessor.set_all_tlist(np.array([0., pi/2., 2*pi/2, 3*pi/2]))\nprocessor_white = copy.deepcopy(processor)\nprocessor_white.add_noise(RandomNoise(rand_gen=np.random.normal, dt=0.1, loc=-0.05, scale=0.02)) # gausian white noise", "We again compare the result of the evolution with and without noise.", "result = processor.run_state(init_state=basis(2, 1))\nresult.states[-1].tidyup(1.0e-5)\n\nresult_white = processor_white.run_state(init_state=basis(2, 1))\nresult_white.states[-1].tidyup(1.0e-4)\n\nfidelity(result.states[-1], result_white.states[-1])", "Since the result of this this noise is still a pure state, we can visualize it on a Bloch sphere", "from qutip.bloch import Bloch\nb = Bloch()\nb.add_states([result.states[-1], result_white.states[-1]])\nb.make_sphere()", "We can print the pulse information to see the noise.\nThe ideal pulses:", "for pulse in processor_white.pulses:\n pulse.print_info()", "And the noisy pulses:", "for pulse in processor_white.get_noisy_pulses():\n pulse.print_info()", "Getting a Pulse or QobjEvo representation\nIf you define a complicate Processor but don't want to run the simulation right now, you can extract an ideal/noisy Pulse representation or QobjEvo representation. The later one can be feeded directly to QuTiP sovler for the evolution.", "ideal_pulses = processor_white.pulses\n\nnoisy_pulses = processor_white.get_noisy_pulses(device_noise=True, drift=True)\n\nqobjevo = processor_white.get_qobjevo(noisy=False)\n\nnoisy_qobjevo, c_ops = processor_white.get_qobjevo(noisy=True)", "Structure inside the simulator\nThe figures below help one understanding the workflow inside the simulator. The first figure shows how the noise is processed in the circuit processor. The noise is defined separately in a class object. When called, it takes parameters and the unitary noiseless qutip.QobjEvo from the processor, generates the noisy version and sends the noisy qutip.QobjEvo together with the collapse operators to the processor.\n\nWhen calculating the evolution, the processor first creates its own qutip.QobjEvo of the noiseless evolution. It will then find all the noise objects saved in the attributes qutip.qip.device.Processor.noise and call the corresponding methods to get the qutip.QobjEvo and a list of collapse operators representing the noise. (For collapse operators, we don't want to add all the constant collapse into one time-independent operator, so we use a list). The processor then combines its own qutip.QobjEvo with those from the noise objects and give them to the solver. The figure below shows how the noiseless part and the noisy part are combined.", "from qutip.ipynbtools import version_table\nversion_table()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
awjuliani/DeepRL-Agents
Simple-Policy.ipynb
mit
[ "Simple Reinforcement Learning in Tensorflow Part 1:\nThe Multi-armed bandit\nThis tutorial contains a simple example of how to build a policy-gradient based agent that can solve the multi-armed bandit problem. For more information, see this Medium post.\nFor more Reinforcement Learning algorithms, including DQN and Model-based learning in Tensorflow, see my Github repo, DeepRL-Agents.", "import tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport numpy as np", "The Bandit\nHere we define our bandit. For this example we are using a four-armed bandit. The pullBandit function generates a random number from a normal distribution with a mean of 0. The lower the bandit number, the more likely a positive reward will be returned. We want our agent to learn to always choose the arm that will give that positive reward.", "#List out our bandit arms. \n#Currently arm 4 (index #3) is set to most often provide a positive reward.\nbandit_arms = [0.2,0,-0.2,-2]\nnum_arms = len(bandit_arms)\ndef pullBandit(bandit):\n #Get a random number.\n result = np.random.randn(1)\n if result > bandit:\n #return a positive reward.\n return 1\n else:\n #return a negative reward.\n return -1", "The Agent\nThe code below established our simple neural agent. It consists of a set of values for each of the bandit arms. Each value is an estimate of the value of the return from choosing the bandit. We use a policy gradient method to update the agent by moving the value for the selected action toward the recieved reward.", "tf.reset_default_graph()\n\n#These two lines established the feed-forward part of the network. \nweights = tf.Variable(tf.ones([num_arms]))\noutput = tf.nn.softmax(weights)\n\n#The next six lines establish the training proceedure. We feed the reward and chosen action into the network\n#to compute the loss, and use it to update the network.\nreward_holder = tf.placeholder(shape=[1],dtype=tf.float32)\naction_holder = tf.placeholder(shape=[1],dtype=tf.int32)\n\nresponsible_output = tf.slice(output,action_holder,[1])\nloss = -(tf.log(responsible_output)*reward_holder)\noptimizer = tf.train.AdamOptimizer(learning_rate=1e-3)\nupdate = optimizer.minimize(loss)", "Training the Agent\nWe will train our agent by taking actions in our environment, and recieving rewards. Using the rewards and actions, we can know how to properly update our network in order to more often choose actions that will yield the highest rewards over time.", "total_episodes = 1000 #Set total number of episodes to train agent on.\ntotal_reward = np.zeros(num_arms) #Set scoreboard for bandit arms to 0.\n\ninit = tf.global_variables_initializer()\n\n# Launch the tensorflow graph\nwith tf.Session() as sess:\n sess.run(init)\n i = 0\n while i < total_episodes:\n \n #Choose action according to Boltzmann distribution.\n actions = sess.run(output)\n a = np.random.choice(actions,p=actions)\n action = np.argmax(actions == a)\n\n reward = pullBandit(bandit_arms[action]) #Get our reward from picking one of the bandit arms.\n \n #Update the network.\n _,resp,ww = sess.run([update,responsible_output,weights], feed_dict={reward_holder:[reward],action_holder:[action]})\n \n #Update our running tally of scores.\n total_reward[action] += reward\n if i % 50 == 0:\n print(\"Running reward for the \" + str(num_arms) + \" arms of the bandit: \" + str(total_reward))\n i+=1\nprint(\"\\nThe agent thinks arm \" + str(np.argmax(ww)+1) + \" is the most promising....\")\nif np.argmax(ww) == np.argmax(-np.array(bandit_arms)):\n print(\"...and it was right!\")\nelse:\n print(\"...and it was wrong!\")" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
agile-geoscience/striplog
docs/tutorial/10_Extract_curves_into_striplogs.ipynb
apache-2.0
[ "Extract curves into striplogs\nSometimes you'd like to summarize or otherwise extract curve data (e.g. wireline log data) into a striplog (e.g. one that represents formations).\nWe'll start by making some fake CSV text — we'll make 5 formations called A, B, C, D and E:", "data = \"\"\"Comp Formation,Depth\nA,100\nB,200\nC,250\nD,400\nE,600\"\"\"", "If you have a CSV file, you can do:\ns = Striplog.from_csv(filename=filename)\n\nBut we have text, so we do something slightly different, passing the text argument instead. We also pass a stop argument to tell Striplog to make the last unit (E) 50 m thick. (If you don't do this, it will be 1 m thick).", "from striplog import Striplog\n\ns = Striplog.from_csv(text=data, stop=650)", "Each element of the striplog is an Interval object, which has a top, base and one or more Components, which represent whatever is in the interval (maybe a rock type, or in this case a formation). There is also a data field, which we will use later.", "s[0]", "We can plot the striplog. By default, it will use a random legend for the colours:", "s.plot(aspect=3)", "Or we can plot in the 'tops' style:", "s.plot(style='tops', field='formation', aspect=1)", "Random curve data\nMake some fake data:", "from welly import Curve\nimport numpy as np\n\ndepth = np.linspace(0, 699, 700)\ndata = np.sin(depth/10)\ncurve = Curve(data=data, index=depth)", "Plot it:", "import matplotlib.pyplot as plt\n\nfig, axs = plt.subplots(ncols=2, sharey=True)\n\naxs[0] = s.plot(ax=axs[0])\naxs[1] = curve.plot(ax=axs[1])", "Extract data from the curve into the striplog", "s = s.extract(curve.values, basis=depth, name='GR')", "Now we have some the GR data from each unit stored in that unit:", "s[1]", "So we could plot a segment of curve, say:", "plt.plot(s[1].data['GR'])", "Extract and reduce data\nWe don't have to store all the data points. We can optionaly pass a function to produce anything we like, and store the result of that:", "s = s.extract(curve, basis=depth, name='GRmean', function=np.nanmean)\n\ns[1]", "Other helpful reducing functions:\n\nnp.nanmedian &mdash; median average (ignoring nans)\nnp.product &mdash; product\nnp.nansum &mdash; sum (ignoring nans)\nnp.nanmin &mdash; minimum (ignoring nans)\nnp.nanmax &mdash; maximum (ignoring nans)\nscipy.stats.mstats.mode &mdash; mode average\nscipy.stats.mstats.hmean &mdash; harmonic mean\nscipy.stats.mstats.gmean &mdash; geometric mean\n\nOr you can write your own, for example:\ndef trim_mean(a):\n \"\"\"Compute trimmed mean, trimming min and max\"\"\"\n return (np.nansum(a) - np.nanmin(a) - np.nanmax(a)) / a.size\n\nThen do:\ns.extract(curve, basis=basis, name='GRtrim', function=trim_mean)\n\nThe function doesn't have to return a single number like this, it could return anything you like, including a dictionary.\nWe can also add bits to the data dictionary manually:", "s[1].data['foo'] = 'bar'\ns[1]" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Roc-J/Python_data_science
Data_Mining/Local_outlier_factor.ipynb
apache-2.0
[ "局部异常因子方法发现异常点\n局部异常因子(Local Outlier Factor,LOF)也是一种异常检测算法,它对数据实例的局部密度和邻居进行比较,判断这个数据是否属于相似的密度的区域,它适合从那些簇个数未知,簇的密度和大小各不相同的数据中筛选出异常点。 \n从k近邻算法启发来", "from collections import defaultdict\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ninstance = np.matrix([[0,0],[0,1],[1,1],[1,0],[5,0]])\n\nx = np.squeeze(np.asarray(instance[:,0]))\ny = np.squeeze(np.asarray(instance[:,1]))\nplt.cla()\nplt.figure(1)\nplt.scatter(x,y)\nplt.show()", "局部异常因子计算出每个点的局部密度,通过它与K最近邻的点的距离来评估点的局部密度,并与邻居的密度进行比较,以此找出异常点--异常点比邻居的密度要低得多\n为了理解LOF,先了解一些术语的定义\n* 对象P的K距离:对象P与它第K个最近邻的距离,K是算法的参数\n P的K距离邻居:到P的距离小于或等于P到第K个最邻近的距离的所有对象的集合Q\n* 从P到Q的可达距离:P与它的第K个最近邻的距离和P和Q之间的距离中的最大者。\n P的局部可达密度(local Reachability Density of P):K距离邻居和K与其邻居的可达距离之和的比值\n* P的局部异常因子(Local Outlier Factor of P):P与它的K最近邻的局部可达性的比值的平均值", "# 获取点两两之间的距离pairwise_distance\ndistance = 'manhattan'\nfrom sklearn.metrics import pairwise_distances\ndist = pairwise_distances(instance,metric=distance)\n\nprint dist\n\n# 计算K距离,使用heapq来获得K最近邻\nk = 2\n# 计算K距离\nimport heapq\n# k_distance的值是tuple\nk_distance = defaultdict(tuple)\n# 对每个点计算\nfor i in range(instance.shape[0]):\n # 获取它与所有其点之间的距离\n distances = dist[i].tolist()\n # 获得K最近邻\n ksmallest = heapq.nsmallest(k+1,distances)[1:][k-1]\n # 获取索引号\n ksmallest_idx = distances.index(ksmallest)\n # 记录下每个点到第K个最近邻以及到它的距离\n k_distance[i]=(ksmallest,ksmallest_idx)\n\n# 计算K距离邻居\ndef all_indices(value,inlist):\n out_indices = []\n idx = -1\n while True:\n try:\n idx = inlist.index(value,idx+1)\n out_indices.append(idx)\n except ValueError:\n break\n return out_indices\n\n# 计算K距离邻居\nk_distance_neig = defaultdict(list)\nfor i in range(instance.shape[0]):\n # 获得它到所有邻居点的距离\n distances = dist[i].tolist()\n print 'k distance neighbourhood',i\n print distances\n # 获得从第1到第k的最近邻\n ksmallest = heapq.nsmallest(k+1,distances)[1:]\n print ksmallest\n ksmallest_set = set(ksmallest)\n print ksmallest_set\n ksmallest_idx = []\n # 获取k里最小的元素的索引号\n for x in ksmallest_set:\n ksmallest_idx.append(all_indices(x,distances))\n # 将列表的列表转换为列表\n ksmallest_idx = [item for sublist in ksmallest_idx for item in sublist]\n # 对每个点保存\n k_distance_neig[i].extend(zip(ksmallest,ksmallest_idx))\n\nprint k_distance_neig\n\n# 计算可达距离和LRD\n# 局部可达密度\nlocal_reach_density = defaultdict(float)\nfor i in range(instance.shape[0]):\n # LRD分子,k距离邻居的个数\n no_neighbours = len(k_distance_neig[i])\n denom_sum = 0\n # 可达距离求和\n for neigh in k_distance_neig[i]:\n denom_sum += max(k_distance[neigh[1]][0],neigh[0])\n local_reach_density[i] = no_neighbours/(1.0*denom_sum)\n\n# 计算LOF\nlof_list = []\nfor i in range(instance.shape[0]):\n lrd_sum = 0\n rdist_sum = 0\n for neigh in k_distance_neig[i]:\n lrd_sum +=local_reach_density[neigh[1]]\n rdist_sum += max(k_distance[neigh[1]][0],neigh[0])\n lof_list.append((i,lrd_sum*rdist_sum))\nprint lof_list", "一个点的LOF很高,则认为它是一个异常点" ]
[ "markdown", "code", "markdown", "code", "markdown" ]
napjon/ds-nd
p2-introds/nyc_subway/project.ipynb
mit
[ "Overview\nNYC Subway contains regular number of ridership across different conditions. It also contains time series. In this analysis, I investigate whether there is difference between raining vs not raining, and other statistical method to build the model, predicting number of ridership.", "import pandas as pd\nimport numpy as np\nimport scipy.stats as sp\n# %matplotlib notebook\n%matplotlib inline\nimport seaborn as sns; sns; sns.set_style('dark')\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv('turnstile_data_master_with_weather.csv')\ndf.index = pd.to_datetime(df.pop('DATEn') +' '+ df.pop('TIMEn'))\ndf.sort_index(inplace=True)\ndel df['Unnamed: 0']\n\ndf.head()", "References\n\nhttps://class.coursera.org/statistics-003\nhttps://www.udacity.com/course/intro-to-data-science--ud359\nhttp://blog.minitab.com/blog/adventures-in-statistics/multiple-regession-analysis-use-adjusted-r-squared-and-predicted-r-squared-to-include-the-correct-number-of-variables\nhttps://en.wikipedia.org/wiki/Coefficient_of_determination\nhttp://napitupulu-jon.appspot.com/posts/inference-diagnostic-mlr-coursera-statistics.html\n\nStatistical Test", "df.groupby('rain',as_index=False).ENTRIESn_hourly.mean()", "In this data, we can see summary statistic of number of ridership hourly, represented by ENTRIESn_hourly variable between rainy days and non-rainy days. So the independent variable is rain that represented as non-rainy day in control group, and non-rainy in experiment group. How rainy days affect the number of ridership, so the dependent variable is ENTRIESn_hourly. \nWe can see that means of number ridership hourly of non-rainy days is 1090, where the means with rainy days is 1105. Such small difference, and we're going to test whether the difference is significantly higher, using independence test with one-tail p-value. I'm using 0.05 as p-critical value.\n\nH0 $ P_\\mathbf{(rain > non-rain)} = 0.5$ : Population number of ridership in rainy days and non-rainy days is equal.\nHA $ P_\\mathbf{(rain > non-rain)} \\gt 0.5$ : Population number of ridership in rainy days is higher than non-rainy days.\n\nThe conditions within groups have validated. The sample size in this data is more than 30, and less than 10% population.\nNon-parametric test used as statistical test that doesn't assume any underlying probability distribution. Mann Whittney U test is one of non-parametric test that I will be using in this case. Since we see that the distribution of both rainy and non-rainy is very right skewed in the Visualization section, we can't use any statistical test that assume normal distribution. So instead we can use non-parametric test.", "df.groupby('rain',as_index=False).ENTRIESn_hourly.mean()\n\nsp.mannwhitneyu(df.ix[df.rain==0,'ENTRIESn_hourly'],\n df.ix[df.rain==1,'ENTRIESn_hourly'])", "We're using Mann-Whitney U test with average 1090 hourly ridership on non-rainy days and 1105 hourly ridership on rainy days. Because p-value is 0.025 less than 0.05 p-critical, we reject the null hypothesis, and conclude that the data provide convincing evidence that average number of hourly ridership in rainy days is higher than those of non-rainy days.\nLinear Regression\n\nOLS using Statsmodels or Scikit Learn\nGradient descent using Scikit Learn\nOr something different?*\n\nI'm going to use linear regression with multiple predictor, hence multiple linear regression with OLS.\nI use all numerical variables in my data plus additional variable isBusinessDay, except exits, since it will be expected that number of ridership between entries and exits will be similar. I use UNIT and Hour as dummy variables. I don't test dummy features, since it's computationally expensive. I also subset the data since it's also computationally expensive learn from dummy features. Moreover I know that UNIT and Hour features improve the model when I try it at the Udacity website.", "length = df.shape[0]\nsubset = df.take(np.random.permutation(length)[:int(length*0.1)]).reset_index()\n\ndummy_hours = pd.get_dummies(subset['Hour'], prefix='hour')\ndummy_units = pd.get_dummies(subset['UNIT'], prefix='unit')\n\n# features = subset.join(dummy_units).join(dummy_hours)\nfeatures = subset\nbanned = ['ENTRIESn_hourly','UNIT','Hour','DESCn','EXITSn_hourly','index']\ncandidates = [e for e in features.columns if e not in banned]", "R squared is not a significant measures for testing our model. Since every time we're adding a variable, R-squared will keep increasing. We're going to use adjusted R-squared, since it will incorporate penalty everytime we're adding a variable.", "def test_adjusted_R_squared(col):\n \"\"\"Testing one variable with already approved predictors\"\"\"\n \n reg = sm.OLS(features['ENTRIESn_hourly'],features[predictors + [col]])\n result = reg.fit()\n return result.rsquared_adj", "I'm going to choose forward selection, where I add one variable at a time based on highest adjusted R squared. And I will stop adding a variable if there's isnt anymore increase compared to previous adjusted R squared.", "predictors = []\ntopr2 = 0\nfor i in xrange(len(candidates)):\n \n filtered = filter(lambda x: x not in predictors, candidates)\n list_r2 = map(test_adjusted_R_squared,filtered)\n highest,curr_topr2 = max(zip(filtered,list_r2),key=lambda x: x[1])\n \n if curr_topr2 > topr2:\n topr2 = round(curr_topr2,10)\n else:\n print(\"Adjusted R Squared can't go any higher. Stopping\")\n break\n \n predictors.append(highest)\n print('Step {}: Adjusted R-squared = {} + {}'.format(i,topr2,highest))", "These are non dummy features after I perform forward selection", "predictors", "To test collinearity that may happen in my numerical features, I use scatter matrix.", "print('Scatter Matrix of features and predictors to test collinearity');\npd.scatter_matrix(features[numerics],figsize=(10,10));", "I can see that there are no collinearity among the predictors.\nNext I join non-dummy features and dummy features to features_dummy and create the model.", "features_dummy = features[predictors].join(dummy_units).join(dummy_hours)\nmodel = sm.OLS(features['ENTRIESn_hourly'],features_dummy).fit()\n\nfilter_cols = lambda col: not col.startswith('unit') and not col.startswith('hour')\nmodel.params[model.params.index.map(filter_cols)]\n\nmodel.rsquared", "R2 is often interpreted as the proportion of response variation \"explained\" by the regressors in the model. So we can say 61.67% of the variability in the % number of ridership subway hourly can be explained by the model.\nVisualization\nAt the time of this writing, pandas has grown mature, and ggplot for python,which relies on pandas, is not being updated. So I will not use ggplot in this section, and use pandas plotting.", "fig,axes = plt.subplots(nrows=1,ncols=2,sharex=True,sharey=True,squeeze=False)\n\nfiltered = df.ix[df.ENTRIESn_hourly < 10000]\n\nfor i in xrange(1):\n axes[0][i].set_xlabel('Number of ridership hourly')\n axes[0][i].set_ylabel('Frequency')\n\nfiltered.ix[filtered.rain == 0,'ENTRIESn_hourly'].hist(ax=axes[0][0],bins=50)\naxes[0][0].set_title('Non-rainy days')\nfiltered.ix[filtered.rain == 1,'ENTRIESn_hourly'].hist(ax=axes[0][1],bins=50)\naxes[0][1].set_title('Rainy days')\n\nfig.set_size_inches((15,5))", "In this plot, we can see that more people is riding the subway. But we want to know whether the difference is significance, using hypothesis test. The frequency is indeed higher for non-rainy days compared to non-rainy days.", "(df\n .resample('1D',how='mean')\n .groupby(lambda x : 1 if pd.datetools.isBusinessDay(x) else 0)\n .ENTRIESn_hourly\n .plot(legend=True))\nplt.legend(['Not Business Day', 'Business Day'])\nplt.xlabel('By day in May 2011')\nplt.ylabel('Average number of ridership hourly')\nplt.title('Average number of ridership every day at in May 2011');", "We can see that the difference is likely siginificant of ridership from the time of day. We can create a new variable to turn this into categorical variable.", "df['BusinessDay'] = df.index.map(lambda x : 0 if pd.datetools.isBusinessDay(x) else 1)\n\ndf.resample('1D').rain.value_counts()", "Conclusion\nSince the data is observation and not controlled experiment, we can't make causation. However there is likely to be no different for average number of ridership hourly of non-rainy days and rainy days. We know that the dataset is taken from NYC data subway, but because the data is not random sampled in this observation, we can't generalize to all people who use subway in NYC. So pretty much we can't make any causal statement that whether or not there is a difference of average number ridership hourly between rainy days and non rainy days. Moreover, the data also doesn't provide convincing evidence that the number people ride NYC subway is significantly different between rainy days and not rainy days.\nUsing Statistical Test, If in fact there's no different of average number of ridership hourly of non-rainy days and rainy days, the probability of getting a sample with size 44104 for rainy days and 87847 sample size for non-rainy days with average difference of 15 ridership, is 0.025. Such a small probability could means that rain is a significant predictor, and the difference it's not due to chance.\nUsing Linear Regression, we can say that all else held constant, the model predicts number of ridership in one hour for non-rainy days is 117 people higher than rainy days, on average.\nReflection\nSo where will this lead us? Well we could see that in average day, number of ridership still following some pattern. But it's not clear how this affect through season since we only have limited data. The data itself could expand to through one year. As we can see that this data only include May 2011, and we have no idea how winter, autumn, summer, and spring affecting the number of ridership.\nThat's more analysis that can be done. With statistical test, I just analyze how rain is not significant different. The different is just due to chance, or it could be other factor than the rain. Fog may be significantly different, or you also that in Visualization section, the number of ridership is different between business day and non business day.\nWe also have seen that the distribution of number in hour (ENTRIESn_hourly) is right skewed, so we could do some tranformation to make it more normal. The number of ridership between business day and non business day also not linear, it follows what seems to be cyclical.\nThe model predict not really linear. To test the performance of our model we can do following things:\n\nlinear relationship between every numerical explanatory and response\nNearly normal residuals wih mean 0\nConstant variability of residuals\nIndependent residuals\n\nOur model is not a good fit if at least one this diagnostics failed, which it does.\nlinear relationship between every numerical explanatory and response\nTo test if the model is good we can plot all the numerical features again residuals, see whether every plot is random scatter around zero. This is to check whether there is a linear relationship between residuals and numerical features, to make sure that it doesn't containy any other dependent variables.", "fig,axes = plt.subplots(nrows=1,ncols=3,sharey=True,squeeze=False)\nnumerics = ['maxpressurei', 'mintempi', 'precipi']\nfor i in xrange(len(numerics)):\n axes[0][i].scatter(x=features[numerics[i]],y=model.resid,alpha=0.1)\n axes[0][i].set_xlabel(numerics[i])\n \n\naxes[0][0].set_ylabel('final model residuals')\naxes[0][1].set_title('linear relationships between features and residual, alpha 0.1')\nfig.set_size_inches(12,5);", "We see that eventhough seems categorical maxpressurei and mintempi is random scatter. But precipi is not a good candidate for linear relationship of the model. It seems it's not randomly scattered.\nNearly normal residuals wih mean 0", "fig,axes = plt.subplots(nrows=1,ncols=2,squeeze=False)\n\nsp.probplot(model.resid,plot=axes[0][0])\nmodel.resid.hist(bins=20,ax=axes[0][1]);\n\naxes[0][1].set_title('Histogram of residuals')\naxes[0][1].set_xlabel('Residuals')\naxes[0][1].set_ylabel('Frequency');", "Next, we're checking by histogram that the residuals is normally distributed. The histogram shown that it's pretty normal and distributed around zero. Quantile plot checking if the residuals randomly scattered around zero. We can see that our model failed in this test. The residuals is very skewed, explained by large number of points deviated from mean line at tails area. This means that our linear regression is not a good model for this case.\nConstant variability of residuals", "fig,axes = plt.subplots(nrows=1,ncols=2,squeeze=False)\naxes[0][0].scatter(x=model.fittedvalues, y=model.resid, alpha=0.1)\naxes[0][1].scatter(x=model.fittedvalues, y=abs(model.resid), alpha=0.1);\n\naxes[0][0].set_xlabel('fitted_values')\naxes[0][1].set_xlabel('fitted_values')\n\naxes[0][0].set_ylabel('Abs(residuals)')\naxes[0][1].set_ylabel('residuals');\n\nfig.set_size_inches(13,5)", "The model also failed in this diagnostic. The first plot, the fitted values and residuals should be randomly scattered around zero, and not performing some kind of fan shape. For the plot in the left, we're seeing that there's some kind of boundary that limit the plot to be randomly scattered, and it's performing fan shape. This could means there's another dependent variables that we don't yet find. Some fan shape also ocurring where we plot in the right with absolute value of residuals.\nIndependent residuals", "resids = pd.DataFrame(model.resid.copy())\nresids.columns = ['residuals']\nresids.index = pd.to_datetime(features['index'])\nresids.sort_index(inplace=True)\n\nplt.plot_date(x=resids.resample('1H',how='mean').index,\n y=resids.resample('1H',how='mean').residuals);\nplt.xlabel('Time Series')\nplt.ylabel('residuals')\nplt.title('Residuals Variability across time');", "Finally, our model should be independent across time. We can plot this by residuals through time series, checking whether the residuals is constant variability, randomly scatter around zero. In this plot, it's pretty constant across May 2011. But since we only have limited data, 1 month and 1 year, we can't be sure whether the model predict accurately in any other month and year.\nAs linear regression is not a good model, there's could be another model, and some additional dependent variables, that can be used to better fit for this problem." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
rubensfernando/mba-analytics-big-data
Python/2016-07-22/aula2-parte1-funcoes.ipynb
mit
[ "Funções\n\nAté agora, vimos diversos tipos de dados, atribuições, comparações e estruturas de controle.\nA ideia da função é dividir para conquistar, onde:\nUm problema é dividido em diversos subproblemas\nAs soluções dos subproblemas são combinadas numa solução do problema maior.\n\n\n\nEsses subproblemas têm o nome de funções.\n\n\nFunções possibilitam abstrair, ou seja permite capturar a computação realiza e tratá-la como primitiva.\n\nSuponha que queremos que a variável z seja o máximo de dois números (x e y).\n\nUm programa simples seria:\nif x &gt; y:\n z = x\nelse:\n z = y\n\n\nA ideia é encapsular essa computação dentro de um escopo que pode ser tratado como primitiva.\n\nÉ utilizado simplesmente chamando o nome e fornecendo uma entrada.\nOs detalhes internos sendo escondidos dos usuários.\n\n\n\nUma função tem 3 partes importantes:\ndef &lt;nome&gt; ( &lt;parametros&gt; ):\n &lt;corpo da função&gt;\n\n\ndef é uma palavra chave\n\n&lt;nome&gt; é qualquer nome aceito pelo Python\n&lt;parametros&gt; é a quantidade de parâmetros que será passado para a função (pode ser nenhum).\n&lt;corpo da função&gt; contém o código da função.\n\nVoltando ao exemplo:", "def maximo(x, y):\n if x > y:\n z = x\n else:\n z = y", "Ótimo temos uma função e podemos reaproveita-la. Porém, para de fato reaproveita-la temos que utilizar o comando return.", "def maximo(x, y):\n if x > y:\n return x\n else:\n return y", "Pronto agora sim! Já podemos reaproveitar nossa função!\nE como fazer isso?", "z = maximo(3, 4)", "Quando chamamos a função maximo(3, 4) estamos definindo que x = 3 e y = 4. Após, as expressões são avaliadas até que não se tenha mais expressões, e nesse caso é retornado None. Ou até que encontre a palavra especial return, retornando como valor da chamada da função.", "print(z)", "Já entendemos o que é e como criar funções. \nPara testar vamos criar uma função que irá realizar uma conta.", "def economias (dinheiro, conta, gastos):\n total = (dinheiro + conta) - gastos\n return (total)\n\neco = economias(10, 20, 10)\nprint(eco)", "Também podemos definir um valor padrão para um ou mais argumentos\nVamos reescrever a função economias para que os gastos sejam fixados em 150, caso não seja passado nenhum valor por padrão.", "def economias(dinheiro, conta, gastos=150):\n total = (dinheiro + conta) - gastos\n return(total)\n\nprint(economias(100, 60))\n\nprint(economias(100, 60, 10))", "É importante notar que uma variável que está dentro de uma função, não pode ser utilizada novamente enquanto a função não terminar de ser executada. \nNo mundo da programação, isso é chamado de escopo. Vamos tentar imprimir o valor da variável dinheiro.", "print(dinheiro)", "<span style=\"color:blue;\">Por que isso aconteceu?</span>\nEsse erro acontece pois a variável dinheiro somente existe dentro da função economias, ou seja, ela existe apenas no contexto local dessa função.\nVamos modificar novamente a função economias:", "def economias(dinheiro, conta, gastos=150):\n total = (dinheiro + conta) - gastos\n total = total + eco\n return(total)\n\nprint(economias(100,60))", "<span style=\"color:blue;\">Por que não deu problema?</span>\nQuando utilizamos uma variável que está fora da função dentro de uma função estamos utilizando a ideia de variáveis globais, onde dentro do contexto geral essa variável existe e pode ser utilizada dentro da função.\n<span style=\"color:red;\">Isso não é recomendado! O correto seria ter um novo argumento!</span>\nExercício de Funções\nCrie uma função que receba dois argumentos.\n* O primeiro argumento é o valor de um determinado serviço\n* O segundo é a porcentagem da multa por atraso do pagamento. O valor padrão da porcentagem, se não passado, é de 7%. A função deve retornar o valor final da conta com o juros. Lembre-se de converter 7%.", "def conta(valor, multa=7):\n # Seu código aqui", "Funções embutidas\nPython tem um número de funções embutidas que sempre estão presentes. Uma lista completa pode ser encontrada em https://docs.python.org/3/library/functions.html.\n<span style=\"color:blue;\">Já utilizamos algumas delas! Quais?</span>\ninput\nUma outra função que é bem interessante, é a input. Essa função permite que o usuário digite uma entrada, por exemplo:", "idade = input('Digite sua idade:')\nprint(idade)\n\nnome = input('Digite seu nome:')\nprint(nome)\n\nprint(type(idade))\n\nprint(type(nome))", "Note que ambas as variáveis são strings. Portanto precisamos converter para inteiro a idade.", "idade = int(input(\"Digite sua idade:\"))\n\nprint(type(idade))", "open\nA função open, permite abrir um arquivo para leitura e escrita.\nopen(nome_do_arquivo, modo)\n\nModos:\n * r - abre o arquivo para leitura.\n * w - abre o arquivo para escrita.\n * a - abre o arquivo para escrita acrescentando os dados no final do arquivo.\n * + - pode ser lido e escrito simultaneamente.", "import os\nos.remove(\"arquivo.txt\")\n\narq = open(\"arquivo.txt\", \"w\")\n\nfor i in range(1, 5):\n arq.write('{}. Escrevendo em arquivo\\n'.format(i))\n\narq.close()", "Métodos\n\nread() - retorna uma string única com todo o conteúdo do arquivo.\nreadlines() - todo o conteúdo do arquivo é salvo em uma lista, onde cada linha do arquivo será um elemento da lista.", "f = open(\"arquivo.txt\", \"r\")\nprint(f, '\\n')\ntexto = f.read()\nprint(texto)\nf.close()\n\nf = open(\"arquivo.txt\", \"r\")\ntexto = f.readlines()\nprint(texto)\nf.close()\n\n#help(f.readlines)", "Para remover o \\n podemos utilizar o método read que irá gerar uma única string e depois aplicamos o método splitlines.", "f = open(\"arquivo.txt\", \"r\")\ntexto = f.read().splitlines()\nprint(texto)\nf.close()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
jsharpna/DavisSML
lectures/lecture5/lecture5.ipynb
mit
[ "The Lasso\nStatML: Lecture 5\nProf. James Sharpnack\n\nSome content and images are from \"The Elements of Statistical Learning\" by Hastie, Tibshirani, Friedman\nReading ESL Chapter 3\n\nRecall Convex Optimization\nDef A function $f : \\mathbb R^p \\to \\mathbb R$ is convex if for any $0 \\le \\alpha \\le 1$, $x_0, x_1 \\in \\mathbb R^p$,\n$$\nf(\\alpha x_0 + (1 - \\alpha) x_1) \\le \\alpha f(x_0) + (1 - \\alpha) f(x_1).\n$$\n\nFor convex functions, local minima are global minima\n\nRecall 1st Order Condition. If f is differentiable then it is convex if \n$$\nf(x) \\ge f(x_0) + \\nabla f(x_0)^\\top (x - x_0), \\forall x,x_0\n$$\nand when $\\nabla f(x_0) = 0$ then \n$$\nf(x) \\ge f(x_0), \\forall x\n$$\nso any fixed point of gradient descent is a global min (for convex, differentiable f)\nSubdifferential\nDef. $g(x_0) \\in \\mathbb R^p$ is a subgradient of $f$ at $x_0$ if\n$$\nf(x) \\ge f(x_0) + g(x_0)^\\top (x - x_0), \\forall x.\n$$\nThe set of all subgradients at $x_0$ is call the subdifferential, denoted $\\partial f(x_0)$.\n\nFor any global optima, $0 \\in \\partial f(x_0)$.\n\nWavelet denoising\nSoft thresholding is commonly used for orthonormal bases.\n- Suppose that we have a vector $y_1,\\ldots, y_T$ (like a time series).\n- And we want to reconstruct $y$ with $W \\beta$ where $\\beta$ has a small sum of absolute values $\\sum_i |\\beta_i|$ \n- $W$ is $T \\times T$ and $W W^\\top = W^\\top W = I$ (orthonormal full rank design)\nWant to minimize \n$$\n\\frac 12 \\sum_{i=1}^T (y - W \\beta)i^2 + \\lambda \\sum{i=1}^T |\\beta_i|.\n$$", "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n## Explore Turkish stock exchange dataset\n\ntse = pd.read_excel('../../data/data_akbilgic.xlsx',skiprows=1)\n\ntse = tse.rename(columns={'ISE':'TLISE','ISE.1':'USDISE'})\n\ndef const_wave(T,a,b):\n wave = np.zeros(T)\n s1 = (b-a) // 2\n s2 = (b-a) - s1\n norm_C = (s1*s2 / (s1+s2))**0.5\n wave[a:a+s1] = norm_C / s1\n wave[a+s1:b] = -norm_C / s2\n return wave\n\ndef _const_wave_basis(T,a,b):\n if b-a < 2:\n return []\n wave_basis = []\n wave_basis.append(const_wave(T,a,b))\n mid_pt = a + (b-a)//2\n wave_basis += _const_wave_basis(T,a,mid_pt)\n wave_basis += _const_wave_basis(T,mid_pt,b)\n return wave_basis\n\ndef const_wave_basis(T,a,b):\n father = np.ones(T) / T**0.5\n return [father] + _const_wave_basis(T,a,b)\n\n# Construct discrete Haar wavelet basis\nT,p = tse.shape\nwave_basis = const_wave_basis(T,0,T)\nW = np.array(wave_basis).T\n\n_ = plt.plot(W[:,:3])\n\ndef soft(y,lamb):\n pos_part = (y - lamb) * (y > lamb)\n neg_part = (y + lamb) * (y < -lamb)\n return pos_part + neg_part\n\n## Volatility seems most interesting\n## will construct local measure of volatility\n## remove rolling window estimate (local centering)\n## square the residuals\n\ntse = tse.set_index('date')\ntse_trem = tse - tse.rolling(\"7D\").mean()\ntse_vol = tse_trem**2.\n\n## Make wavelet transformation and soft threshold\n\ntse_wave = W.T @ tse_vol.values\nlamb = .001\ntse_soft = soft(tse_wave,lamb)\ntse_rec = W @ tse_soft\ntse_den = tse_vol.copy()\ntse_den.iloc[:,:] = tse_rec\n\n_ = tse_vol.plot(subplots=True,figsize=(10,10))\n\n_ = tse_den.plot(subplots=True,figsize=(10,10))", "Wavelet reconstruction\nCan reconstruct the sequence by\n$$\n\\hat y = W \\hat \\beta.\n$$\nThe objective is likelihood term + L1 penalty term,\n$$\n\\frac 12 \\sum_{i=1}^T (y - W \\beta)i^2 + \\lambda \\sum{i=1}^T |\\beta_i|.\n$$\n\nThe L1 penalty \"forces\" some $\\beta_i = 0$, inducing sparsity", "plt.plot(tse_soft[:,4])\nhigh_idx = np.where(np.abs(tse_soft[:,4]) > .0001)[0]\nprint(high_idx)\n\nfig, axs = plt.subplots(len(high_idx) + 1,1)\nfor i, idx in enumerate(high_idx):\n axs[i].plot(W[:,idx])\nplt.plot(tse_den['FTSE'],c='r')", "Non-orthogonal design\nThe objective is likelihood term + L1 penalty term,\n$$\n\\frac 12 \\sum_{i=1}^T (y - X \\beta)i^2 + \\lambda \\sum{i=1}^T |\\beta_i|.\n$$\ndoes not have closed form for $X$ that is non-orthogonal.\n\nit is convex\nit is non-smooth (recall $|x|$)\nhas tuning parameter $\\lambda$\n\nCompare to best subset selection (NP-hard):\n$$\n\\min \\frac 12 \\sum_{i=1}^T (y - X \\beta)_i^2.\n$$\nfor\n$$\n\\| \\beta \\|_0 = |{\\rm supp}(\\beta)| < s.\n$$\nImage of Lasso solution\n<img src=\"lasso_soln.PNG\" width=100%>\nSolving the Lasso\nThe lasso can be written in regularized form,\n$$\n\\min \\frac 12 \\sum_{i=1}^T (y - X \\beta)i^2 + \\lambda \\sum{i=1}^T |\\beta_i|,\n$$\nor in constrained form,\n$$\n\\min \\frac 12 \\sum_{i=1}^T (y - X \\beta)i^2, \\quad \\textrm{s.t.} \\sum{i=1}^T |\\beta_i| \\le C,\n$$\n\nFor every $\\lambda$ there is a $C$ such that the regularized form and constrained form have the same argmin\nThis correspondence is data dependent\n\nExercise 5.1. Solving the Lasso\nA quadratic program (QP) is any convex optimization of the form \n$$\n\\min \\beta^\\top Q \\beta + \\beta^\\top a \\quad \\textrm{ s.t. } A\\beta \\le c\n$$\nwhere $Q$ is positive semi-definite.\nShow that the lasso in constrained form is a QP. (Hint: write $\\beta = \\beta_+ - \\beta_-$ where $\\beta_{+,j} = \\beta_{j} 1{ \\beta_j > 0}$ and $\\beta_{-,j} = - \\beta_{j} 1{ \\beta_j < 0}$). \nSolution to 5.1\nThe objective is certainly quadratic...\n$$\n\\frac 12 \\sum_{i=1}^T (y - X \\beta)_i^2 = \\frac 12 \\beta^\\top (X^\\top X) \\beta - \\beta^\\top (X^\\top y) + C\n$$\nand we know that $X^\\top X$ is PSD because $a^\\top X^\\top X a = \\| X a\\|^2 \\ge 0$.\nWhat about $\\| \\beta \\|_1$?\n\nSolving the lasso\nFor a single $\\lambda$ (or $C$ in constrained form) can solve the lasso with many specialized methods\n- quadratic program solver\n- proximal gradient\n- alternating direction method of multipliers\nbut $\\lambda$ is a tuning parameter. Options\n1. Construct a grid of $\\lambda$ and solve each lasso\n2. Solve for all $\\lambda$ values - path algorithm\nActive sets and why lasso works better\n\nLet $\\hat \\beta_\\lambda$ be the $\\hat \\beta$ at tuning parameter $\\lambda$.\nDefine $\\mathcal A_\\lambda = {\\rm supp}(\\hat \\beta_\\lambda)$ the non-zero elements of $\\hat \\beta_\\lambda$.\nFor large $\\lambda \\rightarrow \\infty$, $|\\mathcal A_\\lambda| = 0$\nFor small $\\lambda = 0$, $|\\mathcal A_\\lambda| = p$ (when OLS solution has full support)\n\nForward greedy selection only adds elements to the active set, does not remove elements.\nExercise 5.2.1\nVerify 1 and 2 above.\n\nLasso Path\n\nStart at $\\lambda = +\\infty, \\hat \\beta = 0$.\nDecrease $\\lambda$ until $\\hat \\beta_{j_1} \\ne 0$, $\\mathcal A \\gets {j_1}$. (Hitting event)\n\nContinue decreasing $\\lambda$ updating $\\mathcal A$ with hitting and leaving events\n\n\n$x_{j_1}$ is the predictor variable most correlated with $y$\n\nHitting events are when element is added to $\\mathcal A$\nLeaving events are when element is removed from $\\mathcal A$\n$\\hat \\beta_{\\lambda,j}$ is piecewise linear, continuous, as a function of $\\lambda$\nknots are at \"hitting\" and \"leaving\" events\n\n\nfrom sklearn.org\nLeast Angle Regression (LAR)\n\nStandardize predictors and start with residual $r = y - \\bar y$, $\\hat \\beta = 0$\nFind $x_j$ most correlated with $r$\nMove $\\beta_j$ in the direction of $x_j^\\top r$ until the residual is more correlated with another $x_k$\nMove $\\beta_j,\\beta_k$ in the direction of their joint OLS coefficients of $r$ on $(x_j,x_k)$ until some other competitor $x_l$ has as much correlation with the current residual\nContinue until all predictors have been entered.\n\nExercise 5.2.2\nHow do we know that LAR does not give us the Lasso solution?\nLasso modification\n4.5 If a non-zero coefficient drops to 0 then remove it from the active set and recompute the restricted OLS.\n\nfrom ESL", "# %load ../standard_import.txt\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing, model_selection, linear_model\n\n%matplotlib inline\n\n## Modified from the github repo: https://github.com/JWarmenhoven/ISLR-python \n## which is based on the book by James et al. Intro to Statistical Learning.\n\ndf = pd.read_csv('../../data/Hitters.csv', index_col=0).dropna()\ndf.index.name = 'Player'\ndf.info()\n\n## Simulate a dataset for lasso\n\nn=100\np=1000\nX = np.random.randn(n,p)\nX = preprocessing.scale(X)\n\n## Subselect true active set\n\nsprob = 0.02\nSbool = np.random.rand(p) < sprob\ns = np.sum(Sbool)\nprint(\"Number of non-zero's: {}\".format(s))\n\n## Construct beta and y\n\nmu = 100.\nbeta = np.zeros(p)\nbeta[Sbool] = mu * np.random.randn(s)\n\neps = np.random.randn(n)\ny = X.dot(beta) + eps", "Exercise 5.3\n\nRun the lasso using linear_model.lars_path with the lasso modification (see docstring with ?linear_model.lars_path) \nPlot the lasso coefficients that are learned as a function of lambda. You should have a plot with the x-axis being lambda and the y-axis being the coefficient value, with $p=1000$ lines plotted. Highlight the $s$ coefficients that are truly non-zero by plotting them in red.", "?linear_model.lars_path\n\n## Answer to exercise 5.3\n## Run lars with lasso mod, find active set\n\nlarper = linear_model.lars_path(X,y,method=\"lasso\")\nS = set(np.where(Sbool)[0])\n\ndef plot_it():\n for j in S:\n _ = plt.plot(larper[0],larper[2][j,:],'r')\n for j in set(range(p)) - S:\n _ = plt.plot(larper[0],larper[2][j,:],'k',linewidth=.75)\n _ = plt.title('Lasso path for simulated data')\n _ = plt.xlabel('lambda')\n _ = plt.ylabel('Coef')\n\nplot_it()\n\n## Hitters dataset\n\ndf = pd.read_csv('../../data/Hitters.csv', index_col=0).dropna()\ndf.index.name = 'Player'\ndf.info()\n\ndf.head()\n\ndummies = pd.get_dummies(df[['League', 'Division', 'NewLeague']])\ndummies.info()\nprint(dummies.head())\n\ny = df.Salary\n\n# Drop the column with the independent variable (Salary), and columns for which we created dummy variables\nX_ = df.drop(['Salary', 'League', 'Division', 'NewLeague'], axis=1).astype('float64')\n# Define the feature set X.\nX = pd.concat([X_, dummies[['League_N', 'Division_W', 'NewLeague_N']]], axis=1)\nX.info()\n\nX.head(5)", "Exercise 5.4\nYou should cross-validate to select the lambda just like any other tuning parameter. Sklearn gives you the option of using their fast cross-validation script via linear_model.LassoCV, see the documentation. You can create a leave-one-out cross validator with model_selection.LeaveOneOut then pass this to LassoCV with the cv argument. Do this, and see what the returned fit and selected lambda are.", "## Answer to 5.4\n## Fit the lasso and cross-validate, increased max_iter to achieve convergence\nloo = model_selection.LeaveOneOut()\nlooiter = loo.split(X)\nhitlasso = linear_model.LassoCV(cv=looiter,max_iter=2000) \nhitlasso.fit(X,y)\n\nprint(\"The selected lambda value is {:.2f}\".format(hitlasso.alpha_))\n\nhitlasso.coef_", "We can also compare this to the selected model from forward stagewise regression:\n[-0.21830515, 0.38154135, 0. , 0. , 0. ,\n 0.16139123, 0. , 0. , 0. , 0. ,\n 0.09994524, 0.56696569, -0.16872682, 0.16924078, 0. ,\n 0. , 0. , -0.19429699, 0. ]\nThis is not exactly the same model with differences in the inclusion or exclusion of AtBat, HmRun, Runs, RBI, Years, CHmRun, Errors, League_N, Division_W, NewLeague_N", "bforw = [-0.21830515, 0.38154135, 0. , 0. , 0. ,\n 0.16139123, 0. , 0. , 0. , 0. ,\n 0.09994524, 0.56696569, -0.16872682, 0.16924078, 0. ,\n 0. , 0. , -0.19429699, 0. ]\n\nprint(\", \".join(X.columns[(hitlasso.coef_ != 0.) != (bforw != 0.)]))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
sdpython/ensae_teaching_cs
_doc/notebooks/td2a_ml/seasonal_timeseries.ipynb
mit
[ "Timeseries\nCe notebook présente quelques étapes simples pour une série temporelle. La plupart utilise le module statsmodels.tsa.", "from jyquickhelper import add_notebook_menu\nadd_notebook_menu()\n\n%matplotlib inline", "Données\nLes données sont artificielles mais simulent ce que pourraient être le chiffre d'affaires d'un magasin de quartier, des samedi très forts, une semaine morne, un Noël chargé, un été plat.", "from ensae_teaching_cs.data import generate_sells\nimport pandas\ndf = pandas.DataFrame(generate_sells())\ndf.head()", "Premiers graphiques\nLa série a deux saisonnalités, hebdomadaire, mensuelle.", "import matplotlib.pyplot as plt\nfig, ax = plt.subplots(1, 2, figsize=(14, 4))\ndf.iloc[-30:].set_index('date').plot(ax=ax[0])\ndf.set_index('date').plot(ax=ax[1])\nax[0].set_title(\"chiffre d'affaire sur le dernier mois\")\nax[1].set_title(\"chiffre d'affaire sur deux ans\");", "Elle a une vague tendance, on peut calculer un tendance à l'ordre 1, 2, ...", "from statsmodels.tsa.tsatools import detrend\nnotrend = detrend(df.value, order=1)\ndf[\"notrend\"] = notrend\ndf[\"trend\"] = df['value'] - notrend\nax = df.plot(x=\"date\", y=[\"value\", \"trend\"], figsize=(14,4))\nax.set_title('tendance');", "Autocorrélations...", "from statsmodels.tsa.stattools import acf\ncor = acf(df.value)\ncor\n\nfig, ax = plt.subplots(1, 1, figsize=(14,2))\nax.plot(cor)\nax.set_title(\"Autocorrélogramme\");", "La première saisonalité apparaît, 7, 14, 21... Les autocorrélations partielles confirment cela, plutôt 7 jours.", "from statsmodels.tsa.stattools import pacf\nfrom statsmodels.graphics.tsaplots import plot_pacf\nplot_pacf(df.value, lags=50);", "Comme il n'y a rien le dimanche, il vaut mieux les enlever. Garder des zéros nous priverait de modèles multiplicatifs.", "df[\"weekday\"] = df.date.dt.weekday\ndf.head()\n\ndf_nosunday = df[df.weekday != 6]\ndf_nosunday.head(n=10)\n\nfig, ax = plt.subplots(1, 1, figsize=(14,2))\ncor = acf(df_nosunday.value)\nax.plot(cor)\nax.set_title(\"Autocorrélogramme\");\n\nplot_pacf(df_nosunday.value, lags=50);", "On décompose la série en tendance + saisonnalité. Les étés et Noël apparaissent.", "from statsmodels.tsa.seasonal import seasonal_decompose\nres = seasonal_decompose(df_nosunday.value, freq=7)\nres.plot();\n\nplt.plot(res.seasonal[-30:])\nplt.title(\"Saisonnalité\");\n\ncor = acf(res.trend[5:-5]);\nplt.plot(cor);", "On cherche maintenant la saisonnalité de la série débarrassée de sa tendance herbdomadaire. On retrouve la saisonnalité mensuelle.", "res_year = seasonal_decompose(res.trend[5:-5], freq=25)\nres_year.plot();", "Test de stationnarité\nLe test KPSS permet de tester la stationnarité d'une série.", "from statsmodels.tsa.stattools import kpss\nkpss(res.trend[5:-5])", "Comme ce n'est pas toujours facile à interpréter, on simule une variable aléatoire gaussienne donc sans tendance.", "from numpy.random import randn\nbruit = randn(1000)\nkpss(bruit)", "Et puis une série avec une tendance forte.", "from numpy.random import randn\nfrom numpy import arange\nbruit = randn(1000) * 100 + arange(1000) / 10\nkpss(bruit)", "Une valeur forte indique une tendance et la série en a clairement une.\nPrédiction\nLes modèles AR, ARMA, ARIMA se concentrent sur une série à une dimension. En machine learning, il y a la série et plein d'autres informations. On construit une matrice avec des séries décalées.", "from statsmodels.tsa.tsatools import lagmat\nlag = 8\nX = lagmat(df_nosunday[\"value\"], lag)\nlagged = df_nosunday.copy()\nfor c in range(1,lag+1):\n lagged[\"lag%d\" % c] = X[:, c-1]\nlagged.tail()", "On ajoute ou on réécrit le jour de la semaine qu'on utilise comme variable supplémentaire.", "lagged[\"weekday\"] = lagged.date.dt.weekday\n\nX = lagged.drop([\"date\", \"value\", \"notrend\", \"trend\"], axis=1)\nY = lagged[\"value\"]\nX.shape, Y.shape\n\nfrom numpy import corrcoef\ncorrcoef(X)", "Etrange autant de grandes valeurs, cela veut dire que la tendance est trop forte pour calculer des corrélations, il vaudrait mieux tout recommencer avec la série $\\Delta Y_t = Y_t - Y_{t-1}$. Bref, passons...", "X.columns", "Une régression linéaire car les modèles linéaires sont toujours de bonnes baseline et pour connaître le modèle simulé, on ne fera pas beaucoup mieux.", "from sklearn.linear_model import LinearRegression\nclr = LinearRegression()\nclr.fit(X, Y)\n\nfrom sklearn.metrics import r2_score\nr2_score(Y, clr.predict(X))\n\nclr.coef_", "On retrouve la saisonnalité, $Y_t$ et $Y_{t-6}$ sont de mèches.", "for i in range(1, X.shape[1]):\n print(\"X(t-%d)\" % (i), r2_score(Y, X.iloc[:, i]))", "Auparavant (l'année dernière en fait), je construisais deux bases, apprentissage et tests, comme ceci :", "n = X.shape[0]\nX_train = X.iloc[:n * 2//3]\nX_test = X.iloc[n * 2//3:]\nY_train = Y[:n * 2//3]\nY_test = Y[n * 2//3:]", "Et puis scikit-learn est arrivée avec TimeSeriesSplit.", "from sklearn.model_selection import TimeSeriesSplit\ntscv = TimeSeriesSplit(n_splits=5)\nfor train_index, test_index in tscv.split(lagged):\n data_train, data_test = lagged.iloc[train_index, :], lagged.iloc[test_index, :]\n print(\"TRAIN:\", data_train.shape, \"TEST:\", data_test.shape)", "Et on calé une forêt aléatoire...", "import warnings\nfrom sklearn.ensemble import RandomForestRegressor\nclr = RandomForestRegressor()\n\ndef train_test(clr, train_index, test_index):\n data_train = lagged.iloc[train_index, :]\n data_test = lagged.iloc[test_index, :]\n clr.fit(data_train.drop([\"value\", \"date\", \"notrend\", \"trend\"], \n axis=1), \n data_train.value)\n r2 = r2_score(data_test.value,\n clr.predict(data_test.drop([\"value\", \"date\", \"notrend\",\n \"trend\"], axis=1).values))\n return r2\n\nwarnings.simplefilter(\"ignore\")\nlast_test_index = None\nfor train_index, test_index in tscv.split(lagged):\n r2 = train_test(clr, train_index, test_index) \n if last_test_index is not None:\n r2_prime = train_test(clr, last_test_index, test_index) \n print(r2, r2_prime)\n else:\n print(r2)\n last_test_index = test_index", "2 ans coupé en 5, soit tous les 5 mois, ça veut dire que ce découpage inclut parfois Noël, parfois l'été et que les performances y seront très sensibles.", "from sklearn.metrics import r2_score\nr2 = r2_score(data_test.value,\n clr.predict(data_test.drop([\"value\", \"date\", \"notrend\",\n \"trend\"], axis=1).values))\nr2", "On compare avec le $r_2$ avec le même $r_2$ obtenu en utilisant $Y_{t-1}$, $Y_{t-2}$, ... $Y_{t-d}$ comme prédiction.", "for i in range(1, 9):\n print(i, \":\", r2_score(data_test.value, data_test[\"lag%d\" % i]))\n\nlagged[:5]", "En fait le jour de la semaine est une variable catégorielle, on crée une colonne par jour.", "from sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import OneHotEncoder\n\ncols = ['lag1', 'lag2', 'lag3',\n 'lag4', 'lag5', 'lag6', 'lag7', 'lag8']\nct = ColumnTransformer(\n [('pass', \"passthrough\", cols),\n (\"dummies\", OneHotEncoder(), [\"weekday\"])])\npred = ct.fit(lagged).transform(lagged[:5])\npred", "On met tout dans un pipeline parce que c'est plus joli, plus pratique aussi.", "from sklearn.pipeline import make_pipeline\nfrom sklearn.decomposition import PCA, TruncatedSVD \ncols = ['lag1', 'lag2', 'lag3',\n 'lag4', 'lag5', 'lag6', 'lag7', 'lag8']\nmodel = make_pipeline(\n make_pipeline(\n ColumnTransformer(\n [('pass', \"passthrough\", cols),\n (\"dummies\", make_pipeline(OneHotEncoder(), \n TruncatedSVD(n_components=2)), [\"weekday\"])]),\n LinearRegression()))\nmodel.fit(lagged, lagged[\"value\"])", "C'est plus facile à voir visuellement.", "from mlinsights.plotting import pipeline2dot\ndot = pipeline2dot(model, lagged)\nfrom jyquickhelper import RenderJsDot\nRenderJsDot(dot)\n\nr2_score(lagged['value'], model.predict(lagged))", "Templating\nComplètement hors sujet mais utile.", "from jinja2 import Template\ntemplate = Template('Hello {{ name }}!')\ntemplate.render(name='John Doe')\n\ntemplate = Template(\"\"\"\n{{ name }}\n{{ \"-\" * len(name) }}\nPossède :\n{% for i in range(len(meubles)) %}\n- {{meubles[i]}}{% endfor %}\n\"\"\")\nmeubles = ['table', \"tabouret\"]\nprint(template.render(name='John Doe Doe', len=len,\n meubles=meubles))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
bbalasub1/glmnet_python
docs/glmnet_vignette.ipynb
gpl-3.0
[ "Glmnet Vignette (for python)\nJuly 12, 2017\nAuthors\nTrevor Hastie, B. J. Balakumar\nIntroduction\nGlmnet is a package that fits a generalized linear model via penalized maximum likelihood. The regularization path is computed for the lasso or elasticnet penalty at a grid of values for the regularization parameter lambda. The algorithm is extremely fast, and can exploit sparsity in the input matrix x. It fits linear, logistic and multinomial, poisson, and Cox regression models. A variety of predictions can be made from the fitted models. It can also fit multi-response linear regression.\nThe authors of glmnet are Jerome Friedman, Trevor Hastie, Rob Tibshirani and Noah Simon. The Python package is maintained by B. J. Balakumar. The R package is maintained by Trevor Hastie. The matlab version of glmnet is maintained by Junyang Qian. This vignette describes the usage of glmnet in Python.\nglmnet solves the following problem:\n$$\n \\min_{\\beta_0, \\beta}\\frac{1}{N} \\sum_{i=1}^N w_i l(y_i, \\beta_0+ \\beta^T x_i)^2+\\lambda \\left[ (1-\\alpha)||\\beta||_2^2/2 + \\alpha||\\beta||_1\\right],\n$$\nover a grid of values of $\\lambda$ covering the entire range. Here $l(y, \\eta)$ is the negative log-likelihood contribution for observation $i$; e.g. for the Gaussian case it is $\\frac{1}{2} l(y-\\eta)^2$. The elastic-net penalty is controlled by $\\alpha$, and bridges the gap between lasso ($\\alpha=1$, the default) and ridge ($\\alpha=0$). The tuning parameter $\\lambda$ controls the overall strength of the penalty.\nIt is known that the ridge penalty shrinks the coefficients of correlated predictors towards each other while the lasso tends to pick one of them and discard the others. The elastic-net penalty mixes these two; if predictors are correlated in groups, an $\\alpha=0.5$ tends to select the groups in or out together. This is a higher level parameter, and users might pick a value upfront, else experiment with a few different values. One use of $\\alpha$ is for numerical stability; for example, the elastic net with $\\alpha = 1-\\varepsilon$ for some small $\\varepsilon>0$ performs much like the lasso, but removes any degeneracies and wild behavior caused by extreme correlations.\nThe glmnet algorithms use cyclical coordinate descent, which successively optimizes the objective function over each parameter with others fixed, and cycles repeatedly until convergence. The package also makes use of the strong rules for efficient restriction of the active set. Due to highly efficient updates and techniques such as warm starts and active-set convergence, our algorithms can compute the solution path very fast.\nThe code can handle sparse input-matrix formats, as well as range constraints on coefficients. The core of glmnet is a set of fortran subroutines, which make for very fast execution.\nThe package also includes methods for prediction and plotting, and a function that performs K-fold cross-validation.\nInstallation\nUsing pip (recommended, courtesy: Han Fan)\npip install glmnet_py\n\nComplied from source\ngit clone https://github.com/bbalasub1/glmnet_python.git\ncd glmnet_python\npython setup.py install\n\nRequirement\nPython 3, Linux\nCurrently, the checked-in version of GLMnet.so is compiled for the following config:\nLinux: Linux version 2.6.32-573.26.1.el6.x86_64 (gcc version 4.4.7 20120313 (Red Hat 4.4.7-16) (GCC) ) \n OS: CentOS 6.7 (Final) \n Hardware: 8-core Intel(R) Core(TM) i7-2630QM \n gfortran: version 4.4.7 20120313 (Red Hat 4.4.7-17) (GCC)\nUsage\nimport glmnet_python\nfrom glmnet import glmnet\n\nLinear Regression\nLinear regression here refers to two families of models. One is gaussian, the Gaussian family, and the other is mgaussian, the multiresponse Gaussian family. We first discuss the ordinary Gaussian and the multiresponse one after that.\nLinear Regression - Gaussian family\ngaussian is the default family option in the function glmnet. Suppose we have observations $x_i \\in \\mathbb{R}^p$ and the responses $y_i \\in \\mathbb{R}, i = 1, \\ldots, N$. The objective function for the Gaussian family is\n$$\n\\min_{(\\beta_0, \\beta) \\in \\mathbb{R}^{p+1}}\\frac{1}{2N} \\sum_{i=1}^N (y_i -\\beta_0-x_i^T \\beta)^2+\\lambda \\left[ (1-\\alpha)||\\beta||_2^2/2 + \\alpha||\\beta||_1\\right],\n$$\nwhere \n$\\lambda \\geq 0$ is a complexity parameter and $0 \\leq \\alpha \\leq 1$ is a compromise between ridge ($\\alpha = 0$) and lasso ($\\alpha = 1$).\nCoordinate descent is applied to solve the problem. Specifically, suppose we have current estimates $\\tilde{\\beta_0}$ and $\\tilde{\\beta}\\ell$ $\\forall j\\in 1,\\ldots,p$. By computing the gradient at $\\beta_j = \\tilde{\\beta}_j$ and simple calculus, the update is\n$$\n\\tilde{\\beta}_j \\leftarrow \\frac{S(\\frac{1}{N}\\sum{i=1}^N x_{ij}(y_i-\\tilde{y}_i^{(j)}),\\lambda \\alpha)}{1+\\lambda(1-\\alpha)},\n$$\nwhere \n$\\tilde{y}i^{(j)} = \\tilde{\\beta}_0 + \\sum{\\ell \\neq j} x_{i\\ell} \\tilde{\\beta}\\ell$, and $S(z, \\gamma)$ is the soft-thresholding operator with value $\\text{sign}(z)(|z|-\\gamma)+$.\nThis formula above applies when the x variables are standardized to have unit variance (the default); it is slightly more complicated when they are not. Note that for \"family=gaussian\", glmnet standardizes $y$ to have unit variance before computing its lambda sequence (and then unstandardizes the resulting coefficients); if you wish to reproduce/compare results with other software, best to supply a standardized $y$ first (Using the \"1/N\" variance formula).\nglmnet provides various options for users to customize the fit. We introduce some commonly used options here and they can be specified in the glmnet function.\n\n\nalpha is for the elastic-net mixing parameter $\\alpha$, with range $\\alpha \\in [0,1]$. $\\alpha = 1$ is the lasso (default) and $\\alpha = 0$ is the ridge.\n\n\nweights is for the observation weights. Default is 1 for each observation. (Note: glmnet rescales the weights to sum to N, the sample size.)\n\n\nnlambda is the number of $\\lambda$ values in the sequence. Default is 100.\n\n\nlambda can be provided, but is typically not and the program constructs a sequence. When automatically generated, the $\\lambda$ sequence is determined by lambda.max and lambda.min.ratio. The latter is the ratio of smallest value of the generated $\\lambda$ sequence (say lambda.min) to lambda.max. The program then generated nlambda values linear on the log scale from lambda.max down to lambda.min. lambda.max is not given, but easily computed from the input $x$ and $y$; it is the smallest value for lambda such that all the coefficients are zero. For alpha=0 (ridge) lambda.max would be $\\infty$; hence for this case we pick a value corresponding to a small value for alpha close to zero.)\n\n\nstandardize is a logical flag for x variable standardization, prior to fitting the model sequence. The coefficients are always returned on the original scale. Default is standardize=TRUE.\n\n\nFor more information, type help(glmnet) or simply ?glmnet. Let us start by loading the data:", "# Jupyter setup to expand cell display to 100% width on your screen (optional)\nfrom IPython.core.display import display, HTML\ndisplay(HTML(\"<style>.container { width:100% !important; }</style>\"))\n\n# Import relevant modules and setup for calling glmnet\n%reset -f\n%matplotlib inline\n\nimport sys\nsys.path.append('../test')\nsys.path.append('../lib')\nimport scipy, importlib, pprint, matplotlib.pyplot as plt, warnings\nfrom glmnet import glmnet; from glmnetPlot import glmnetPlot \nfrom glmnetPrint import glmnetPrint; from glmnetCoef import glmnetCoef; from glmnetPredict import glmnetPredict\nfrom cvglmnet import cvglmnet; from cvglmnetCoef import cvglmnetCoef\nfrom cvglmnetPlot import cvglmnetPlot; from cvglmnetPredict import cvglmnetPredict\n\n# parameters\nbaseDataDir= '../data/'\n\n# load data\nx = scipy.loadtxt(baseDataDir + 'QuickStartExampleX.dat', dtype = scipy.float64)\ny = scipy.loadtxt(baseDataDir + 'QuickStartExampleY.dat', dtype = scipy.float64)\n\n# create weights\nt = scipy.ones((50, 1), dtype = scipy.float64)\nwts = scipy.row_stack((t, 2*t))", "As an example, we set $\\alpha = 0.2$ (more like a ridge regression), and give double weights to the latter half of the observations. To avoid too long a display here, we set nlambda to 20. In practice, however, the number of values of $\\lambda$ is recommended to be 100 (default) or more. In most cases, it does not come with extra cost because of the warm-starts used in the algorithm, and for nonlinear models leads to better convergence properties.", "# call glmnet\nfit = glmnet(x = x.copy(), y = y.copy(), family = 'gaussian', \\\n weights = wts, \\\n alpha = 0.2, nlambda = 20\n )", "We can then print the glmnet object.", "glmnetPrint(fit)", "This displays the call that produced the object fit and a three-column matrix with columns Df (the number of nonzero coefficients), %dev (the percent deviance explained) and Lambda (the corresponding value of $\\lambda$).\n(Note that the digits option can used to specify significant digits in the printout.)\nHere the actual number of $\\lambda$'s here is less than specified in the call. The reason lies in the stopping criteria of the algorithm. According to the default internal settings, the computations stop if either the fractional change in deviance down the path is less than $10^{-5}$ or the fraction of explained deviance reaches $0.999$. From the last few lines , we see the fraction of deviance does not change much and therefore the computation ends when meeting the stopping criteria. We can change such internal parameters. For details, see the Appendix section or type help(glmnet.control).\nWe can plot the fitted object as in the previous section. There are more options in the plot function.\nUsers can decide what is on the X-axis. xvar allows three measures: \"norm\" for the $\\ell_1$-norm of the coefficients (default), \"lambda\" for the log-lambda value and \"dev\" for %deviance explained.\nUsers can also label the curves with variable sequence numbers simply by setting label = TRUE. Let's plot \"fit\" against the log-lambda value and with each curve labeled.", "glmnetPlot(fit, xvar = 'lambda', label = True);", "Now when we plot against %deviance we get a very different picture. This is percent deviance explained on the training data. What we see here is that toward the end of the path this value are not changing much, but the coefficients are \"blowing up\" a bit. This lets us focus attention on the parts of the fit that matter. This will especially be true for other models, such as logistic regression.", "glmnetPlot(fit, xvar = 'dev', label = True);", "We can extract the coefficients and make predictions at certain values of $\\lambda$. Two commonly used options are:\n\n\ns specifies the value(s) of $\\lambda$ at which extraction is made.\n\n\nexact indicates whether the exact values of coefficients are desired or not. That is, if exact = TRUE, and predictions are to be made at values of s not included in the original fit, these values of s are merged with object$lambda, and the model is refit before predictions are made. If exact=FALSE (default), then the predict function uses linear interpolation to make predictions for values of s that do not coincide with lambdas used in the fitting algorithm.\n\n\nA simple example is:", "any(fit['lambdau'] == 0.5)\n\nglmnetCoef(fit, s = scipy.float64([0.5]), exact = False)", "The output is for False.(TBD) The exact = 'True' option is not yet implemented. \nUsers can make predictions from the fitted object. In addition to the options in coef, the primary argument is newx, a matrix of new values for x. The type option allows users to choose the type of prediction:\n* \"link\" gives the fitted values\n\n\n\"response\" the sames as \"link\" for \"gaussian\" family.\n\n\n\"coefficients\" computes the coefficients at values of s\n\n\n\"nonzero\" retuns a list of the indices of the nonzero coefficients for each value of s.\n\n\nFor example,", "fc = glmnetPredict(fit, x[0:5,:], ptype = 'response', \\\n s = scipy.float64([0.05]))\nprint(fc)", "gives the fitted values for the first 5 observations at $\\lambda = 0.05$. If multiple values of s are supplied, a matrix of predictions is produced.\nUsers can customize K-fold cross-validation. In addition to all the glmnet parameters, cvglmnet has its special parameters including nfolds (the number of folds), foldid (user-supplied folds), ptype(the loss used for cross-validation):\n\n\n\"deviance\" or \"mse\" uses squared loss\n\n\n\"mae\" uses mean absolute error\n\n\nAs an example,", "warnings.filterwarnings('ignore') \ncvfit = cvglmnet(x = x.copy(), y = y.copy(), ptype = 'mse', nfolds = 20)\nwarnings.filterwarnings('default')", "does 20-fold cross-validation, based on mean squared error criterion (default though).\nParallel computing is also supported by cvglmnet. Parallel processing is turned off by default. It can be turned on using parallel=True in the cvglmnet call. \nParallel computing can significantly speed up the computation process, especially for large-scale problems. But for smaller problems, it could result in a reduction in speed due to the additional overhead. User discretion is advised.\nFunctions coef and predict on cv.glmnet object are similar to those for a glmnet object, except that two special strings are also supported by s (the values of $\\lambda$ requested):\n\n\n\"lambda.1se\": the largest $\\lambda$ at which the MSE is within one standard error of the minimal MSE.\n\n\n\"lambda.min\": the $\\lambda$ at which the minimal MSE is achieved.", "cvfit['lambda_min']\n\ncvglmnetCoef(cvfit, s = 'lambda_min')\n\ncvglmnetPredict(cvfit, newx = x[0:5,], s='lambda_min')", "Users can control the folds used. Here we use the same folds so we can also select a value for $\\alpha$.", "foldid = scipy.random.choice(10, size = y.shape[0], replace = True)\ncv1=cvglmnet(x = x.copy(),y = y.copy(),foldid=foldid,alpha=1)\ncv0p5=cvglmnet(x = x.copy(),y = y.copy(),foldid=foldid,alpha=0.5)\ncv0=cvglmnet(x = x.copy(),y = y.copy(),foldid=foldid,alpha=0)", "There are no built-in plot functions to put them all on the same plot, so we are on our own here:", "f = plt.figure()\nf.add_subplot(2,2,1)\ncvglmnetPlot(cv1)\nf.add_subplot(2,2,2)\ncvglmnetPlot(cv0p5)\nf.add_subplot(2,2,3)\ncvglmnetPlot(cv0)\nf.add_subplot(2,2,4)\nplt.plot( scipy.log(cv1['lambdau']), cv1['cvm'], 'r.')\nplt.hold(True)\nplt.plot( scipy.log(cv0p5['lambdau']), cv0p5['cvm'], 'g.')\nplt.plot( scipy.log(cv0['lambdau']), cv0['cvm'], 'b.')\nplt.xlabel('log(Lambda)')\nplt.ylabel(cv1['name'])\nplt.xlim(-6, 4)\nplt.ylim(0, 9)\nplt.legend( ('alpha = 1', 'alpha = 0.5', 'alpha = 0'), loc = 'upper left', prop={'size':6});", "We see that lasso (alpha=1) does about the best here. We also see that the range of lambdas used differs with alpha.\nCoefficient upper and lower bounds\nThese are recently added features that enhance the scope of the models. Suppose we want to fit our model, but limit the coefficients to be bigger than -0.7 and less than 0.5. This is easily achieved via the upper.limits and lower.limits arguments:", "cl = scipy.array([[-0.7], [0.5]], dtype = scipy.float64)\ntfit=glmnet(x = x.copy(),y= y.copy(), cl = cl)\nglmnetPlot(tfit);", "These are rather arbitrary limits; often we want the coefficients to be positive, so we can set only lower.limit to be 0.\n(Note, the lower limit must be no bigger than zero, and the upper limit no smaller than zero.)\nThese bounds can be a vector, with different values for each coefficient. If given as a scalar, the same number gets recycled for all.\nPenalty factors\nThis argument allows users to apply separate penalty factors to each coefficient. Its default is 1 for each parameter, but other values can be specified. In particular, any variable with penalty.factor equal to zero is not penalized at all! Let $v_j$ denote the penalty factor for $j$ th variable. The penalty term becomes\n$$\n\\lambda \\sum_{j=1}^p \\boldsymbol{v_j} P_\\alpha(\\beta_j) = \\lambda \\sum_{j=1}^p \\boldsymbol{v_j} \\left[ (1-\\alpha)\\frac{1}{2} \\beta_j^2 + \\alpha |\\beta_j| \\right].\n$$\nNote the penalty factors are internally rescaled to sum to nvars.\nThis is very useful when people have prior knowledge or preference over the variables. In many cases, some variables may be so important that one wants to keep them all the time, which can be achieved by setting corresponding penalty factors to 0:", "pfac = scipy.ones([1, 20])\npfac[0, 4] = 0; pfac[0, 9] = 0; pfac[0, 14] = 0\npfit = glmnet(x = x.copy(), y = y.copy(), penalty_factor = pfac)\nglmnetPlot(pfit, label = True);", "We see from the labels that the three variables with 0 penalty factors always stay in the model, while the others follow typical regularization paths and shrunken to 0 eventually.\nSome other useful arguments. exclude allows one to block certain variables from being the model at all. Of course, one could simply subset these out of x, but sometimes exclude is more useful, since it returns a full vector of coefficients, just with the excluded ones set to zero. There is also an intercept argument which defaults to True; if False the intercept is forced to be zero.\nCustomizing plots\nSometimes, especially when the number of variables is small, we want to add variable labels to a plot. Since glmnet is intended primarily for wide data, this is not supprted in plot.glmnet. However, it is easy to do, as the following little toy example shows.\nWe first generate some data, with 10 variables, and for lack of imagination and ease we give them simple character names. We then fit a glmnet model, and make the standard plot.", "scipy.random.seed(101)\nx = scipy.random.rand(100,10)\ny = scipy.random.rand(100,1)\nfit = glmnet(x = x, y = y)\nglmnetPlot(fit);", "We wish to label the curves with the variable names. Here's a simple way to do this, using the matplotlib library in python (and a little research into how to customize it). We need to have the positions of the coefficients at the end of the path.", "%%capture\n# Output from this sample code has been suppressed due to (possible) Jupyter limitations\n# The code works just fine from ipython (tested on spyder)\nc = glmnetCoef(fit)\nc = c[1:, -1] # remove intercept and get the coefficients at the end of the path \nh = glmnetPlot(fit)\nax1 = h['ax1']\nxloc = plt.xlim()\nxloc = xloc[1]\nfor i in range(len(c)):\n ax1.text(xloc, c[i], 'var' + str(i)); ", "We have done nothing here to avoid overwriting of labels, in the event that they are close together. This would be a bit more work, but perhaps best left alone, anyway.\nLinear Regression - Multiresponse Gaussian Family\nThe multiresponse Gaussian family is obtained using family = \"mgaussian\" option in glmnet. It is very similar to the single-response case above. This is useful when there are a number of (correlated) responses - the so-called \"multi-task learning\" problem. Here the sharing involves which variables are selected, since when a variable is selected, a coefficient is fit for each response. Most of the options are the same, so we focus here on the differences with the single response model.\nObviously, as the name suggests, $y$ is not a vector, but a matrix of quantitative responses in this section. The coefficients at each value of lambda are also a matrix as a result.\nHere we solve the following problem:\n$$\n\\min_{(\\beta_0, \\beta) \\in \\mathbb{R}^{(p+1)\\times K}}\\frac{1}{2N} \\sum_{i=1}^N ||y_i -\\beta_0-\\beta^T x_i||^2_F+\\lambda \\left[ (1-\\alpha)||\\beta||F^2/2 + \\alpha\\sum{j=1}^p||\\beta_j||_2\\right].\n$$\nHere, $\\beta_j$ is the jth row of the $p\\times K$ coefficient matrix $\\beta$, and we replace the absolute penalty on each single coefficient by a group-lasso penalty on each coefficient K-vector $\\beta_j$ for a single predictor $x_j$.\nWe use a set of data generated beforehand for illustration.", "# Import relevant modules and setup for calling glmnet\n%reset -f\n%matplotlib inline\n\nimport sys\nsys.path.append('../test')\nsys.path.append('../lib')\nimport scipy, importlib, pprint, matplotlib.pyplot as plt, warnings\nfrom glmnet import glmnet; from glmnetPlot import glmnetPlot \nfrom glmnetPrint import glmnetPrint; from glmnetCoef import glmnetCoef; from glmnetPredict import glmnetPredict\nfrom cvglmnet import cvglmnet; from cvglmnetCoef import cvglmnetCoef\nfrom cvglmnetPlot import cvglmnetPlot; from cvglmnetPredict import cvglmnetPredict\n\n# parameters\nbaseDataDir= '../data/'\n\n# load data\nx = scipy.loadtxt(baseDataDir + 'MultiGaussianExampleX.dat', dtype = scipy.float64, delimiter = ',')\ny = scipy.loadtxt(baseDataDir + 'MultiGaussianExampleY.dat', dtype = scipy.float64, delimiter = ',')", "We fit the data, with an object \"mfit\" returned.", "mfit = glmnet(x = x.copy(), y = y.copy(), family = 'mgaussian')", "For multiresponse Gaussian, the options in glmnet are almost the same as the single-response case, such as alpha, weights, nlambda, standardize. A exception to be noticed is that standardize.response is only for mgaussian family. The default value is FALSE. If standardize.response = TRUE, it standardizes the response variables.\nTo visualize the coefficients, we use the plot function.", "glmnetPlot(mfit, xvar = 'lambda', label = True, ptype = '2norm');", "Note that we set type.coef = \"2norm\". Under this setting, a single curve is plotted per variable, with value equal to the $\\ell_2$ norm. The default setting is type.coef = \"coef\", where a coefficient plot is created for each response (multiple figures).\nxvar and label are two other options besides ordinary graphical parameters. They are the same as the single-response case.\nWe can extract the coefficients at requested values of $\\lambda$ by using the function coef and make predictions by predict. The usage is similar and we only provide an example of predict here.", "f = glmnetPredict(mfit, x[0:5,:], s = scipy.float64([0.1, 0.01]))\nprint(f[:,:,0], '\\n')\nprint(f[:,:,1])", "The prediction result is saved in a three-dimensional array with the first two dimensions being the prediction matrix for each response variable and the third indicating the response variables.\nWe can also do k-fold cross-validation. The options are almost the same as the ordinary Gaussian family and we do not expand here.", "warnings.filterwarnings('ignore')\ncvmfit = cvglmnet(x = x.copy(), y = y.copy(), family = \"mgaussian\")\nwarnings.filterwarnings('default')", "We plot the resulting cv.glmnet object \"cvmfit\".", "cvglmnetPlot(cvmfit)", "To show explicitly the selected optimal values of $\\lambda$, type", "cvmfit['lambda_min']\n\ncvmfit['lambda_1se']", "As before, the first one is the value at which the minimal mean squared error is achieved and the second is for the most regularized model whose mean squared error is within one standard error of the minimal.\nPrediction for cvglmnet object works almost the same as for glmnet object. We omit the details here.\nLogistic Regression\nLogistic regression is another widely-used model when the response is categorical. If there are two possible outcomes, we use the binomial distribution, else we use the multinomial.\nLogistic Regression: Binomial Models\nFor the binomial model, suppose the response variable takes value in $\\mathcal{G}={1,2}$. Denote $y_i = I(g_i=1)$. We model\n$$\n\\mbox{Pr}(G=2|X=x)+\\frac{e^{\\beta_0+\\beta^Tx}}{1+e^{\\beta_0+\\beta^Tx}},\n$$\nwhich can be written in the following form\n$$\n\\log\\frac{\\mbox{Pr}(G=2|X=x)}{\\mbox{Pr}(G=1|X=x)}=\\beta_0+\\beta^Tx,\n$$\nthe so-called \"logistic\" or log-odds transformation.\nThe objective function for the penalized logistic regression uses the negative binomial log-likelihood, and is\n$$\n\\min_{(\\beta_0, \\beta) \\in \\mathbb{R}^{p+1}} -\\left[\\frac{1}{N} \\sum_{i=1}^N y_i \\cdot (\\beta_0 + x_i^T \\beta) - \\log (1+e^{(\\beta_0+x_i^T \\beta)})\\right] + \\lambda \\big[ (1-\\alpha)||\\beta||_2^2/2 + \\alpha||\\beta||_1\\big].\n$$\nLogistic regression is often plagued with degeneracies when $p > N$ and exhibits wild behavior even when $N$ is close to $p$;\nthe elastic-net penalty alleviates these issues, and regularizes and selects variables as well.\nOur algorithm uses a quadratic approximation to the log-likelihood, and then coordinate descent on the resulting penalized weighted least-squares problem. These constitute an outer and inner loop.\nFor illustration purpose, we load pre-generated input matrix x and the response vector y from the data file.", "# Import relevant modules and setup for calling glmnet\n%reset -f\n%matplotlib inline\n\nimport sys\nsys.path.append('../test')\nsys.path.append('../lib')\nimport scipy, importlib, pprint, matplotlib.pyplot as plt, warnings\nfrom glmnet import glmnet; from glmnetPlot import glmnetPlot \nfrom glmnetPrint import glmnetPrint; from glmnetCoef import glmnetCoef; from glmnetPredict import glmnetPredict\nfrom cvglmnet import cvglmnet; from cvglmnetCoef import cvglmnetCoef\nfrom cvglmnetPlot import cvglmnetPlot; from cvglmnetPredict import cvglmnetPredict\n\n# parameters\nbaseDataDir= '../data/'\n\n# load data\nx = scipy.loadtxt(baseDataDir + 'BinomialExampleX.dat', dtype = scipy.float64, delimiter = ',')\ny = scipy.loadtxt(baseDataDir + 'BinomialExampleY.dat', dtype = scipy.float64)", "The input matrix $x$ is the same as other families. For binomial logistic regression, the response variable $y$ should be either a factor with two levels, or a two-column matrix of counts or proportions.\nOther optional arguments of glmnet for binomial regression are almost same as those for Gaussian family. Don't forget to set family option to \"binomial\".", "fit = glmnet(x = x.copy(), y = y.copy(), family = 'binomial')", "Like before, we can print and plot the fitted object, extract the coefficients at specific $\\lambda$'s and also make predictions. For plotting, the optional arguments such as xvar and label are similar to the Gaussian. We plot against the deviance explained and show the labels.", "glmnetPlot(fit, xvar = 'dev', label = True);", "Prediction is a little different for logistic from Gaussian, mainly in the option type. \"link\" and \"response\" are never equivalent and \"class\" is only available for logistic regression. In summary,\n* \"link\" gives the linear predictors\n\n\n\"response\" gives the fitted probabilities\n\n\n\"class\" produces the class label corresponding to the maximum probability.\n\n\n\"coefficients\" computes the coefficients at values of s\n\n\n\"nonzero\" retuns a list of the indices of the nonzero coefficients for each value of s.\n\n\nFor \"binomial\" models, results (\"link\", \"response\", \"coefficients\", \"nonzero\") are returned only for the class corresponding to the second level of the factor response.\nIn the following example, we make prediction of the class labels at $\\lambda = 0.05, 0.01$.", "glmnetPredict(fit, newx = x[0:5,], ptype='class', s = scipy.array([0.05, 0.01]))", "For logistic regression, cvglmnet has similar arguments and usage as Gaussian. nfolds, weights, lambda, parallel are all available to users. There are some differences in ptype: \"deviance\" and \"mse\" do not both mean squared loss and \"class\" is enabled. Hence,\n* \"mse\" uses squared loss.\n\n\n\"deviance\" uses actual deviance.\n\n\n\"mae\" uses mean absolute error.\n\n\n\"class\" gives misclassification error.\n\n\n\"auc\" (for two-class logistic regression ONLY) gives area under the ROC curve.\n\n\nFor example,", "warnings.filterwarnings('ignore')\ncvfit = cvglmnet(x = x.copy(), y = y.copy(), family = 'binomial', ptype = 'class')\nwarnings.filterwarnings('default')", "It uses misclassification error as the criterion for 10-fold cross-validation.\nWe plot the object and show the optimal values of $\\lambda$.", "cvglmnetPlot(cvfit)\n\ncvfit['lambda_min']\n\ncvfit['lambda_1se']", "coef and predict are simliar to the Gaussian case and we omit the details. We review by some examples.", "cvglmnetCoef(cvfit, s = 'lambda_min')", "As mentioned previously, the results returned here are only for the second level of the factor response.", "cvglmnetPredict(cvfit, newx = x[0:10, ], s = 'lambda_min', ptype = 'class')", "Like other GLMs, glmnet allows for an \"offset\". This is a fixed vector of N numbers that is added into the linear predictor.\nFor example, you may have fitted some other logistic regression using other variables (and data), and now you want to see if the present variables can add anything. So you use the predicted logit from the other model as an offset in.\nLike other GLMs, glmnet allows for an \"offset\". This is a fixed vector of N numbers that is added into the linear predictor.\nFor example, you may have fitted some other logistic regression using other variables (and data), and now you want to see if the present variables can add anything. So you use the predicted logit from the other model as an offset in.\nLogistic Regression - Multinomial Models\nFor the multinomial model, suppose the response variable has $K$ levels ${\\cal G}={1,2,\\ldots,K}$. Here we model\n$$\\mbox{Pr}(G=k|X=x)=\\frac{e^{\\beta_{0k}+\\beta_k^Tx}}{\\sum_{\\ell=1}^Ke^{\\beta_{0\\ell}+\\beta_\\ell^Tx}}.$$\nLet ${Y}$ be the $N \\times K$ indicator response matrix, with elements $y_{i\\ell} = I(g_i=\\ell)$. Then the elastic-net penalized negative log-likelihood function becomes\n$$\n\\ell({\\beta_{0k},\\beta_{k}}1^K) = -\\left[\\frac{1}{N} \\sum{i=1}^N \\Big(\\sum_{k=1}^Ky_{il} (\\beta_{0k} + x_i^T \\beta_k)- \\log \\big(\\sum_{k=1}^K e^{\\beta_{0k}+x_i^T \\beta_k}\\big)\\Big)\\right] +\\lambda \\left[ (1-\\alpha)||\\beta||F^2/2 + \\alpha\\sum{j=1}^p||\\beta_j||_q\\right].\n$$\nHere we really abuse notation! $\\beta$ is a $p\\times K$ matrix of coefficients. $\\beta_k$ refers to the kth column (for outcome category k), and $\\beta_j$ the jth row (vector of K coefficients for variable j).\nThe last penalty term is $||\\beta_j||_q$, we have two options for q: $q\\in {1,2}$.\nWhen q=1, this is a lasso penalty on each of the parameters. When q=2, this is a grouped-lasso penalty on all the K coefficients for a particular variables, which makes them all be zero or nonzero together.\nThe standard Newton algorithm can be tedious here. Instead, we use a so-called partial Newton algorithm by making a partial quadratic approximation to the log-likelihood, allowing only $(\\beta_{0k}, \\beta_k)$ to vary for a single class at a time.\nFor each value of $\\lambda$, we first cycle over all classes indexed by $k$, computing each time a partial quadratic approximation about the parameters of the current class. Then the inner procedure is almost the same as for the binomial case.\nThis is the case for lasso (q=1). When q=2, we use a different approach, which we wont dwell on here.\nFor the multinomial case, the usage is similar to logistic regression, and we mainly illustrate by examples and address any differences. We load a set of generated data.", "# Import relevant modules and setup for calling glmnet\n%reset -f\n%matplotlib inline\n\nimport sys\nsys.path.append('../test')\nsys.path.append('../lib')\nimport scipy, importlib, pprint, matplotlib.pyplot as plt, warnings\nfrom glmnet import glmnet; from glmnetPlot import glmnetPlot \nfrom glmnetPrint import glmnetPrint; from glmnetCoef import glmnetCoef; from glmnetPredict import glmnetPredict\nfrom cvglmnet import cvglmnet; from cvglmnetCoef import cvglmnetCoef\nfrom cvglmnetPlot import cvglmnetPlot; from cvglmnetPredict import cvglmnetPredict\n\n# parameters\nbaseDataDir= '../data/'\n\n# load data\nx = scipy.loadtxt(baseDataDir + 'MultinomialExampleX.dat', dtype = scipy.float64, delimiter = ',')\ny = scipy.loadtxt(baseDataDir + 'MultinomialExampleY.dat', dtype = scipy.float64)", "The optional arguments in glmnet for multinomial logistic regression are mostly similar to binomial regression except for a few cases.\nThe response variable can be a nc &gt;= 2 level factor, or a nc-column matrix of counts or proportions.\nInternally glmnet will make the rows of this matrix sum to 1, and absorb the total mass into the weight for that observation.\noffset should be a nobs x nc matrix if there is one.\nA special option for multinomial regression is mtype, which allows the usage of a grouped lasso penalty if mtype = 'grouped'. This will ensure that the multinomial coefficients for a variable are all in or out together, just like for the multi-response Gaussian.", "fit = glmnet(x = x.copy(), y = y.copy(), family = 'multinomial', mtype = 'grouped')", "We plot the resulting object \"fit\".", "glmnetPlot(fit, xvar = 'lambda', label = True, ptype = '2norm');", "The options are xvar, label and ptype, in addition to other ordinary graphical parameters.\nxvar and label are the same as other families while ptype is only for multinomial regression and multiresponse Gaussian model. It can produce a figure of coefficients for each response variable if ptype = \"coef\" or a figure showing the $\\ell_2$-norm in one figure if ptype = \"2norm\"\nWe can also do cross-validation and plot the returned object.", "warnings.filterwarnings('ignore')\ncvfit=cvglmnet(x = x.copy(), y = y.copy(), family='multinomial', mtype = 'grouped');\nwarnings.filterwarnings('default')\ncvglmnetPlot(cvfit)", "Note that although mtype is not a typical argument in cvglmnet, in fact any argument that can be passed to glmnet is valid in the argument list of cvglmnet. We also use parallel computing to accelerate the calculation.\nUsers may wish to predict at the optimally selected $\\lambda$:", "cvglmnetPredict(cvfit, newx = x[0:10, :], s = 'lambda_min', ptype = 'class')", "Poisson Models\nPoisson regression is used to model count data under the assumption of Poisson error, or otherwise non-negative data where the mean and variance are proportional. Like the Gaussian and binomial model, the Poisson is a member of the exponential family of distributions. We usually model its positive mean on the log scale: $\\log \\mu(x) = \\beta_0+\\beta' x$.\nThe log-likelihood for observations ${x_i,y_i}1^N$ is given my\n$$\nl(\\beta|X, Y) = \\sum{i=1}^N (y_i (\\beta_0+\\beta' x_i) - e^{\\beta_0+\\beta^Tx_i}.\n$$\nAs before, we optimize the penalized log-likelihood:\n$$\n\\min_{\\beta_0,\\beta} -\\frac1N l(\\beta|X, Y) + \\lambda \\left((1-\\alpha) \\sum_{i=1}^N \\beta_i^2/2) +\\alpha \\sum_{i=1}^N |\\beta_i|\\right).\n$$\nGlmnet uses an outer Newton loop, and an inner weighted least-squares loop (as in logistic regression) to optimize this criterion.\nFirst, we load a pre-generated set of Poisson data.", "# Import relevant modules and setup for calling glmnet\n%reset -f\n%matplotlib inline\n\nimport sys\nsys.path.append('../test')\nsys.path.append('../lib')\nimport scipy, importlib, pprint, matplotlib.pyplot as plt, warnings\nfrom glmnet import glmnet; from glmnetPlot import glmnetPlot \nfrom glmnetPrint import glmnetPrint; from glmnetCoef import glmnetCoef; from glmnetPredict import glmnetPredict\nfrom cvglmnet import cvglmnet; from cvglmnetCoef import cvglmnetCoef\nfrom cvglmnetPlot import cvglmnetPlot; from cvglmnetPredict import cvglmnetPredict\n\n# parameters\nbaseDataDir= '../data/'\n\n# load data\nx = scipy.loadtxt(baseDataDir + 'PoissonExampleX.dat', dtype = scipy.float64, delimiter = ',')\ny = scipy.loadtxt(baseDataDir + 'PoissonExampleY.dat', dtype = scipy.float64, delimiter = ',')", "We apply the function glmnet with the \"poisson\" option.", "fit = glmnet(x = x.copy(), y = y.copy(), family = 'poisson')", "The optional input arguments of glmnet for \"poisson\" family are similar to those for others.\noffset is a useful argument particularly in Poisson models.\nWhen dealing with rate data in Poisson models, the counts collected are often based on different exposures, such as length of time observed, area and years. A poisson rate $\\mu(x)$ is relative to a unit exposure time, so if an observation $y_i$ was exposed for $E_i$ units of time, then the expected count would be $E_i\\mu(x)$, and the log mean would be $\\log(E_i)+\\log(\\mu(x)$. In a case like this, we would supply an offset $\\log(E_i)$ for each observation.\nHence offset is a vector of length nobs that is included in the linear predictor. Other families can also use options, typically for different reasons.\n(Warning: if offset is supplied in glmnet, offsets must also also be supplied to predict to make reasonable predictions.)\nAgain, we plot the coefficients to have a first sense of the result.", "glmnetPlot(fit);", "Like before, we can extract the coefficients and make predictions at certain $\\lambda$'s by using coef and predict respectively. The optional input arguments are similar to those for other families. In function predict, the option type, which is the type of prediction required, has its own specialties for Poisson family. That is,\n* \"link\" (default) gives the linear predictors like others\n* \"response\" gives the fitted mean\n* \"coefficients\" computes the coefficients at the requested values for s, which can also be realized by coef function\n* \"nonzero\" returns a a list of the indices of the nonzero coefficients for each value of s.\nFor example, we can do as follows:", "glmnetCoef(fit, s = scipy.float64([1.0]))\n\nglmnetPredict(fit, x[0:5,:], ptype = 'response', s = scipy.float64([0.1, 0.01]))", "We may also use cross-validation to find the optimal $\\lambda$'s and thus make inferences.", "warnings.filterwarnings('ignore')\ncvfit = cvglmnet(x.copy(), y.copy(), family = 'poisson')\nwarnings.filterwarnings('default')", "Options are almost the same as the Gaussian family except that for type.measure,\n* \"deviance\" (default) gives the deviance\n* \"mse\" stands for mean squared error\n* \"mae\" is for mean absolute error.\nWe can plot the cvglmnet object.", "cvglmnetPlot(cvfit)", "We can also show the optimal $\\lambda$'s and the corresponding coefficients.", "optlam = scipy.array([cvfit['lambda_min'], cvfit['lambda_1se']]).reshape([2,])\ncvglmnetCoef(cvfit, s = optlam)", "The predict method is similar and we do not repeat it here.\nCox Models\nThe Cox proportional hazards model is commonly used for the study of the relationship beteween predictor variables and survival time. In the usual survival analysis framework, we have data of the form $(y_1, x_1, \\delta_1), \\ldots, (y_n, x_n, \\delta_n)$ where $y_i$, the observed time, is a time of failure if $\\delta_i$ is 1 or right-censoring if $\\delta_i$ is 0. We also let $t_1 < t_2 < \\ldots < t_m$ be the increasing list of unique failure times, and $j(i)$ denote the index of the observation failing at time $t_i$.\nThe Cox model assumes a semi-parametric form for the hazard\n$$\nh_i(t) = h_0(t) e^{x_i^T \\beta},\n$$\nwhere $h_i(t)$ is the hazard for patient $i$ at time $t$, $h_0(t)$ is a shared baseline hazard, and $\\beta$ is a fixed, length $p$ vector. In the classic setting $n \\geq p$, inference is made via the partial likelihood\n$$\nL(\\beta) = \\prod_{i=1}^m \\frac{e^{x_{j(i)}^T \\beta}}{\\sum_{j \\in R_i} e^{x_j^T \\beta}},\n$$\nwhere $R_i$ is the set of indices $j$ with $y_j \\geq t_i$ (those at risk at time $t_i$).\nNote there is no intercept in the Cox mode (its built into the baseline hazard, and like it, would cancel in the partial likelihood.)\nWe penalize the negative log of the partial likelihood, just like the other models, with an elastic-net penalty.\nWe use a pre-generated set of sample data and response. Users can load their own data and follow a similar procedure. In this case $x$ must be an $n\\times p$ matrix of covariate values — each row corresponds to a patient and each column a covariate. $y$ is an $n \\times 2$ matrix, with a column \"time\" of failure/censoring times, and \"status\" a 0/1 indicator, with 1 meaning the time is a failure time, and zero a censoring time.", "# Import relevant modules and setup for calling glmnet\n%reset -f\n%matplotlib inline\n\nimport sys\nsys.path.append('../test')\nsys.path.append('../lib')\nimport scipy, importlib, pprint, matplotlib.pyplot as plt, warnings\nfrom glmnet import glmnet; from glmnetPlot import glmnetPlot \nfrom glmnetPrint import glmnetPrint; from glmnetCoef import glmnetCoef; from glmnetPredict import glmnetPredict\nfrom cvglmnet import cvglmnet; from cvglmnetCoef import cvglmnetCoef\nfrom cvglmnetPlot import cvglmnetPlot; from cvglmnetPredict import cvglmnetPredict\n\n# parameters\nbaseDataDir= '../data/'\n\n# load data\nx = scipy.loadtxt(baseDataDir + 'CoxExampleX.dat', dtype = scipy.float64, delimiter = ',')\ny = scipy.loadtxt(baseDataDir + 'CoxExampleY.dat', dtype = scipy.float64, delimiter = ',')", "The Surv function in the package survival can create such a matrix. Note, however, that the coxph and related linear models can handle interval and other fors of censoring, while glmnet can only handle right censoring in its present form.\nWe apply the glmnet function to compute the solution path under default settings.", "fit = glmnet(x = x.copy(), y = y.copy(), family = 'cox')", "All the standard options are available such as alpha, weights, nlambda and standardize. Their usage is similar as in the Gaussian case and we omit the details here. Users can also refer to the help file help(glmnet).\nWe can plot the coefficients.", "glmnetPlot(fit);", "As before, we can extract the coefficients at certain values of $\\lambda$.", "glmnetCoef(fit, s = scipy.float64([0.05]))", "Since the Cox Model is not commonly used for prediction, we do not give an illustrative example on prediction. If needed, users can refer to the help file by typing help(predict.glmnet).\nCurrently, cross-validation is not implemented for cox case. But this is not difficult to do using the existing glmnet calls that work perfectly well for this case. (TBD: cvglmnet to be implemented for cox).\nReferences\n<p>Jerome Friedman, Trevor Hastie and Rob Tibshirani. (2008). <br>\n<a href=\"http://www.jstatsoft.org/v33/i01/\">Regularization Paths for Generalized Linear Models via Coordinate Descent</a><br>\n<em>Journal of Statistical Software</em>, Vol. 33(1), 1-22 Feb 2010.</p>\n<p>Noah Simon, Jerome Friedman, Trevor Hastie and Rob Tibshirani. (2011).<br>\n<a href=\"http://www.jstatsoft.org/v39/i05/\">Regularization Paths for Cox's Proportional Hazards Model via Coordinate Descent</a><br>\n<em>Journal of Statistical Software</em>, Vol. 39(5) 1-13.</p>\n<p>Robert Tibshirani, Jacob Bien, Jerome Friedman, Trevor Hastie, Noah Simon, Jonathan Taylor, Ryan J. Tibshirani. (2010).<br>\n<a href=\"http://www-stat.stanford.edu/~tibs/ftp/strong.pdf\">Strong Rules for Discarding Predictors in Lasso-type Problems</a><br>\n<em>Journal of the Royal Statistical Society: Series B (Statistical Methodology)</em>, 74(2), 245-266.</p>\n<p> Noah Simon, Jerome Friedman and Trevor Hastie (2013). <br>\n<a href=\"http://www.stanford.edu/~hastie/Papers/multi_response.pdf\">A Blockwise Descent Algorithm for Group-penalized Multiresponse and Multinomial Regression </a><br>" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
waltervh/BornAgain-tutorial
old/python/tutorial.ipynb
gpl-3.0
[ "Introduction to Python\nUseful links\n\nBornAgain: http://bornagainproject.org\nBornAgain tutorial: https://github.com/scgmlz/BornAgain-tutorial\nPython official tutorial: https://docs.python.org/3/tutorial/\nAnaconda Python: https://www.continuum.io/\nPyCharm IDE: https://www.jetbrains.com/pycharm/\n\nNote that BornAgain Win/Mac requires Python 2.7\nClone the BornAgain Tutorial Repository\nFrom command line:\nbash\ngit clone https://github.com/scgmlz/BornAgain-tutorial.git\nWindows/Mac: can also use Github Desktop.\nPower Users: Build BornAgain with Python 3 Support\nbash\ngit clone https://github.com/scgmlz/BornAgain.git\nmkdir build; cd build\ncmake .. -DCMAKE_BUILD_TYPE=Release -DBORNAGAIN_USE_PYTHON3=ON\nmake &amp;&amp; make install\nVerify your Python Environment\nCheck the Python version:\n$ python --version\nPython 3.5.2\nCheck for numpy and matplotlib\n$ python -c \"import numpy\"\n$ python -c \"import matplotlib\"\nCheck for BornAgain Python module\n$ python -c \"import bornagain\"\nRunning Python\n\n\nDirect from command line:\n$ python -c \"print 'hello, world'\"\nhello, world\n\n\nRun script from command line:\n$ echo \"print 'hello, world'\" &gt; hello.py\n$ python hello.py\nhello, world\n\n\nDefault interactive interpreter:\n```\n$ python\n\n\n\nx = 5\nx * x\n25\n```\n\n\n\n\n\nIPython interactive interpreter:\n$ ipython\nIn [1]: x = 5\nIn [2]: x * x\nOut [2]: 25\n\n\nIPython interactive notebook:\n$ ipython notebook\n\n\nJupyter interactive notebook:\n$ jupyter notebook\n\nRun within PyCharm IDE\n\nNotebook support is included by default with Anaconda, and can be installed as an optional package on most Linux distros.\nPython 2.7 vs. 3.5 Compatibility\nThere are a few important differences between Python 2.7 and 3.5. The line below is used to ensure that this notebook remains compatible with both. There is a list of differences between Python 2.7 and 3.5 near the end of this notebook.", "from __future__ import print_function", "Basic Data Types\nPython has many data types, e.g.\n* numeric: int, float, complex\n* string\n* boolean values, i.e. true and false\n* sequences: list, tuple\n* dict\nVariables are declared via assignment:\npython\nx = 5", "# scratch area", "Numeric Types\nPython numeric types are similar to those in other languages such as C/C++.\npython\nx = 5 # int\nx = 10**100 # long (2.7) or int (3.5)\nx = 3.141592 # float\nx = 1.0j # complex\nNote: ordinary machine types can be accessed/manipulated through the ctypes module.", "# scratch area", "Arithmetic Operations\npython\n3 + 2 # addition\n3 - 2 # subtraction\n3 * 2 # multiplication\n3 ** 2 # exponentiation\n3 / 2 # division (warning: int (2.7) or float (3.5))\n3 % 2 # modulus", "# scratch area", "Exercise\nUse the Python interpreter to perform some basic arithemetic.\nStrings\npython\nx = \"hello\" # string enclosed with double quotes\ny = 'world' # string enclosed with single quotes\nx + ' ' + y # string concatenation via +\n\"{} + {} = {}\".format(5 , 6, 5+6) # string formatting", "# scratch area", "Lists\npython\nx = [1, 2, 3] # initialize list\nx[1] = 0 # modify element\nx.append(4) # append to end\nx.extend([5, 6]) # extend\nx[3:5] # slice", "# scratch area", "Tuples\nTuples are similar to lists, but are immutable:\npython\nx = (1, 2, 3) # initialize a tuple with ()\nx[0] = 4 # will result in error", "# scratch area", "List Comprehension\nComprehension provides a convenient way to create new lists:\npython\n[ i for i in range (5) ] # result: [0, 1, 2, 3, 4]\n[ i**2 for i in range (5) ] # result: [0, 1, 4, 9, 16]\nthe_list = [5, 2, 6, 1] \n[ i**2 for i in the_list ] # result [25, 4, 36, 1]", "# scratch area", "Exercise\nCreate a list of floating point numbers and then create a second list which contains the squares of the entries of teh fist list\nBoolean Values and Comparisons\nBoolean types take the values True or False. The result of a comparison operator is boolean.\npython\n5 &lt; 6 # evalutes to True\n5 &gt;= 6 # evaluates to False\n5 == 6 # evaluates to False\nLogical operations:\npython\nTrue and False # False\nTrue or False # True\nnot True # False\nTrue ^ False # True (exclusive or)", "# scratch area", "Functions\nFunctions are defined with def:\npython\ndef hello():\n print 'hello, world'\nNote: Python uses indentation to denote blocks of code, rather than braces {} as in many other languages. It is common to use either 4 spaces or 2 spaces to indent. It doesn't matter, as long as you are consistent.\nUse the return keyword for a function which returns a value:\npython\ndef square(x):\n return x**2", "# scratch area", "Loops and Flow Control\nFor loop:", "for i in range(10):\n print(i**2)", "It is also possible to use for..in to iterate through elements of a list:", "for i in ['hello', 'world']:\n print(i)", "While loops have the form while condition:", "i = 0\nwhile i < 10:\n print(i**2)\n i = i + 1", "The keywords break and continue can be used for flow control inside a loop\n* continue: skip to the next iteration of the loop\n* break: jump out of the loop entirely", "for i in range(10):\n if i == 3:\n continue\n if i == 7:\n break\n print(i)", "Use the keywords if, elif, else for branching\npython\nif 5 &gt; 6:\n # never reached\n pass\nelif 1 &gt; 2:\n # reached\n pass\nelse:\n # never reached\n pass", "# scratch area", "Exercise\nWrite a function fib(n) which returns the nth Fibonacci number. The Fibonacci numbers are defined by \n* fib(0) = fib(1) = 1\n* fib(n) = fib(n-1) + fib(n-2) for n &gt;= 2.\nExercise\n\n”Write a program that prints the numbers from 1 to 100. But for\nmultiples of three print Fizz instead of the number and for the\nmultiples of five print Buzz. For numbers which are multiples of\nboth three and five print FizzBuzz.”\n\nhttp://wiki.c2.com/?FizzBuzzTest", "# scratch area", "Modules\nLoad external modules (built-in or user-defined) via import:", "import math\nprint(math.pi)\nprint(math.sin(math.pi/2.0))", "Rename modules with as:", "import math as m\nprint(m.pi)", "Load specific functions or submodules:", "from math import pi, sin\nprint(sin(pi/2.0))\n\n# scratch area", "User-defined Modules\nAny code written in a separate file (with .py extension) can be imported as a module. Suppose we have a script my_module.py which defines a function do_something(). Then we can call it as", "import my_module\nmy_module.do_something()", "Exercise\nImplement your FizzBuzz solution as a function called FizzBuzz() in a module called fizzbuzz. Check that it works by importing it and calling FizzBuzz() in a separate script.", "# scratch area", "numpy\nnumpy is a module used for numerical calculation. The main data type is numpy.array, which is a multidimensional array of numbers (integer, float, complex).", "import numpy as np\nx = np.array([1, 2, 3, 4])\nprint(x.sum())\nprint(x.mean())", "The basic arithmetic operations work elementwise on numpy arrays:", "x = np.array([1, 2, 3, 4])\ny = np.array([5, 6, 7, 8])\n\nprint(x + y)\nprint(x * y)\nprint(x / y)", "It is also possible to call functions on numpy arrays:", "x = np.array([1, 2, 3, 4])\nprint(np.sin(x))\nprint(np.log(x))\n\n# scratch area", "Generating numpy Arrays\nnumpy arrays can be generated with zeros, ones, linspace, and rand:", "print(np.zeros(4))\nprint(np.ones(3))\nprint(np.linspace(-1, 1, num=4))\nprint(np.random.rand(2))\n\n# scratch area", "Plotting with matplotlib\nWe use matplotlib.pyplot for plotting:", "import numpy as np\nfrom matplotlib import pyplot as plt\n\nx = np.linspace(-3.14, 3.14, num=100)\ny = np.sin(x)\n\nplt.plot(x, y)\nplt.xlabel('x values')\nplt.ylabel('y')\nplt.title('y=sin(x)')\nplt.show()", "Exercise\nCreate plots the following functions\n* f(x) = log(x)\n* f(x) = sqrt(x)\n* f(x) = x**2\n* f(x) = log(1 + x**2)\n* anything else you might find interesting or challenging\nCombining Plots\nPlots can be combined using addition:", "x = np.linspace(-10, 10, num=100)\n\ny1 = np.sin(x)\ny2 = np.cos(x)\ny3 = np.arctan(x)\n\n\nplt.plot(x, y1, x, y2, x, y3)\nplt.show()", "todo\narray manipulation routines\nnumpy.flipud, fliplr, transpose, rot90, flatten, ravel\nColormap Plots\nPlot color maps with pcolormesh:", "x = np.linspace (-1, 1, num =100)\ny = np.linspace (-1, 1, num =100)\nxx, yy = np.meshgrid (x, y)\nz = np.sin(xx**2 + yy**2 + yy)\nplt.pcolormesh(x, y, z, shading = 'gouraud')\nplt.show()", "Or with imshow:", "plt.imshow(z, aspect='auto')\nplt.show()", "Note that the image is flipped because images start from top left and go to bottom right. We can fix this with flipud:", "plt.imshow(np.flipud(z), aspect='auto')\nplt.show()\n\n# scratch area", "3D Plots", "from mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\n\n%matplotlib inline\nfig = plt.figure()\nax = fig.gca(projection='3d')\nax.plot_surface(xx, yy, z, rstride=5, cstride=5, cmap=cm.coolwarm, linewidth=1, antialiased=True)\nplt.show()", "3D Wireframe Plot", "%matplotlib inline\nfig = plt.figure()\nax = fig.gca(projection='3d')\nax.plot_wireframe(xx, yy, z, rstride=5, cstride=5, antialiased=True)\nplt.show()", "Gallery of matplotlib Plots\nSee http://matplotlib.org/gallery.html\nPlotting Exercise\nConsider the function f(x, y) = exp(x + 1.0j*y) for −4 ≤ x, y ≤ 4. Create colormap and 3d plots of the magnitude, real,\nand imaginary parts of f.", "# scratch", "Plotting Images", "x = np.linspace(-2, 2, num=100)\ny = np.linspace(-2, 2, num=100)\n\nresult = np.flipud(np.array([[u*v for u in x] for v in y]))\n\nfig = plt.figure()\nplt.imshow(result, extent=[x.min(), x.max(), y.min(), y.max()], aspect='auto')\nplt.show()", "Classes\nClasses can be used to package data and methods together:", "class SomeClass:\n def __init__ (self, x):\n self.x = x\n\n def doSomething(self):\n print(\"my x value is {}\".format(self.x))\n \nobj = SomeClass(5)\nobj.doSomething()\n\n# scratch area", "Inheritance\nClasses can be derived from others:", "class SomeOtherClass (SomeClass):\n def __init__ (self, x, y):\n SomeClass.__init__ (self, x) \n self.y = y\n\n def doSomethingElse(self):\n print(\"my y value is {}\".format(self.y))\n \nother_obj = SomeOtherClass(5, 6)\nother_obj.doSomething()\nother_obj.doSomethingElse()", "Polymorphism\nAn instance of a derived class is automatically an instance of its base class:", "print('The type of obj is {}'.format(type(obj)))\nprint('The type of other_obj is {}'.format(type(other_obj)))\n\nprint('obj is instance of SomeClass? {}'.format(isinstance(obj, SomeClass)))\nprint('obj is instance of SomeOtherClass? {}'.format(isinstance(obj, SomeOtherClass)))\n\nprint('other_obj is instance of SomeClass? {}'.format(isinstance(obj, SomeClass)))\nprint('other_obj is instance of SomeOtherClass? {}'.format( isinstance(obj, SomeOtherClass)))\n\n# scratch area", "Exercise\ntodo", "# todo", "todo\nPython 2 vs. 3\nKey differences include:\nprint statement\ninteger division\nint vs. long\nnew style classes\nsome standard modules/functions have been moved/renamed\nThe “ future ” module can be used to write code compatible\nwith both Python 2 and 3." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
PYPIT/PYPIT
doc/nb/LRIS_blue_notes.ipynb
gpl-3.0
[ "Notes on the LRIS Blue reduction", "# imports\nsys.path.append(os.path.abspath('/Users/xavier/local/Python/PYPIT/src'))\nimport arload as pyp_arload\nimport ario as pyp_ario", "Detectors\nNote: LRISb has employed different detectors. We may need to\nmake PYPIT backwards compatible.\nFITS file", "fil = '/Users/xavier/PYPIT/LRIS_blue/Raw/b150910_2033.fits.gz'\nhdu = fits.open(fil)\nhdu.info()\n\nhead0['OBSTYPE']\n\nhead0 = hdu[0].header\nhead0\n#head0['DATE']\n\nplt.clf()\nplt.imshow(hdu[1].data)\nplt.show()", "Display Raw LRIS image in Ginga", "### Need to port readmhdufits\n\nhead0\n\nreload(pyp_ario)\nimg, head = pyp_ario.read_lris('/Users/xavier/PYPIT/LRIS_blue/Raw/b150910_2070.fits',TRIM=True)\n\nxdb.ximshow(img)\n\nimport subprocess\n\nsubprocess.call([\"touch\", \"dum.fil\"])\n\nb = 'as'\n\n'{1:s}'.format(b)\n\nrange(1,5)\n\ntmp = np.ones((10,20))\n\ntmp[0:1,:].shape" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
madsenmj/ml-introduction-course
Class03/Class03.ipynb
apache-2.0
[ "Class 03\nBig Data Cleaning: Data Transformations\nAlthough machine learning is the exciting part of this course, most data scientists spend the vast majority of their time doing data clearning and data wrangling. Some put the figure at as high as 90% of their time! There is a good reason for this: most of the data out there is not in a format needed for the machine learning algorithms. So, in order to do machine learning, the data must be reorganized, cleaned, rearranged, normalized, enriched, and filtered. We'll begin this process today and continue working on it through the course.\nFeature Types\nWe start with an overview of some of the types of features we could potentially use. In the end, all of the data are represented as bits in the computer (ones and zeros), but we can organize those bits in a bunch of different ways in the pandas dataframes. We'll build a \"fake\" dataframe with the different types in them.\nIntegers\nIntegers are counting numbers and other whole numbers (including negatives): ...,-4,-3,-2,-1,0,1,2,3,4,... They are somewhat special because they can be stored very efficiently and the computer can operate on them very efficiently (positive integers especially). Pandas stores these using a data type called int64 where the 64 means they are 64-bit integers (capable of storing any number between -9,223,372,036,854,775,807 and 9,223,372,036,854,775,807)\nWe'll use a sample dataset to look at the different types of data as we go.", "import pandas as pd\n\nsampledata = pd.read_csv('Class03_sample_dataframe.csv')\n\n# This will let us look at the data type of each column. Note that the first column is an \"int64\".\nprint(sampledata.dtypes)\n\n# These are the values stored in this column.\nprint(\"\\nInteger Values\")\nprint(sampledata['IntCol'].values)", "Floating point numbers\nFloating point numbers, or decimal numbers are just that: any number with a decimal place in it such as 4.566642 and -156.986714. Pandas stores these as a float64. They could also be stored in scientific notation like this: 4.509013e+14. This means \"4.509013 times 10 raised to the +14\". These are still floating point numbers and are treated like any other decimal number.", "print(\"Float Values\")\nprint(sampledata['FloatCol'].values)", "Before we move on, I'd like to take a quick look at the data graphically.", "sampledata.plot(kind='scatter', x='IntCol',y='FloatCol')", "Because this is \"fake\" data, I put in a functional dependence here. The float column looks like it is some function of the integer column. It is almost always a good idea to visualize your data early on to see what it looks like graphically!\nText\nPandas can store text in its columns. Because there are a number of different types of text objects, by default pandas will store text as an object which just means it doesn't know which of the types it really is. Text can, in principle, be anything you want it to be, so it is both the most flexible and the most challenging data type.", "print(\"Text Values\")\nprint(sampledata['TextCol'].values)", "Categorical\nA categorical data type is a finite set of different objects. These objects are represented internally as integers but may be displayed as text or other generic objects. To make things simple, we'll start with a categorical object that has three possible values: \"yes\", \"no\", and \"maybe\". Internally, pandas will represent these as integers 0,1, and 2. But it knows that this is a categorical data type, so it keeps track of the text value associated with the integer and displays that for the user.", "print(\"Categorical Values\")\nprint(sampledata['CatCol'].values)", "When we loaded the data, it actually loaded this column as an object, which means it doesn't know that it is supposed to be a categorical column. We will tell pandas to do that. We will use the astype() command that will tell pandas to change the data type of that column. We check to make sure it worked, too. Note that the \"CatCol2\" column is now a 'category' type.\nData Processing Tip\nA quick aside here: there are a couple of ways of doing this kind of transformation on the data. We'll see this a little later when we do more column-wise processing. We could either change the original column or we could create a new column. The second method doesn't overwrite the original data and will be what we typically do. That way if something goes wrong or we want to change how we are processing the data, we still have the original data column to work with.", "sampledata[\"CatCol2\"] = sampledata[\"CatCol\"].astype('category')\nsampledata.dtypes", "We can now look at how the data are stored as categorical data. We can get thi internal codes for each of the entries like this:", "sampledata[\"CatCol2\"].cat.codes", "We can also get a list of the categories that pandas found when converting the column. These are in order- the first entry corresponds to 0, the second to 1, etc.", "sampledata[\"CatCol2\"].cat.categories", "We may encounter situations where we want to plot the data and visualize each category as its own color. We saw how to do this back in Class01.", "import seaborn as sns\nsns.set_style('white')\nsns.lmplot(x='IntCol', y='FloatCol', data=sampledata, hue='CatCol2', fit_reg=False)", "Date/Times\nWe will frequently encounter date/time values in working with data. There are many different ways that these values get stored, but mostly we'll find that they start as a text object. We need to know how they are stored (in what order are the year-month-day-hour-minute-second values are stored). There are utilities to convert any type of date/time string to a datetime object in pandas. We will start with the ISO 8601 datetime standard, since it is both the most logical and the easiest to work with. Dates are stored like this: 2017-01-23 where we use a four-digit year, then a two-digit month and a two-digit day, all separated by dashes. If we want to add a time, it is appended to the date like this: 2017-01-23T03:13:42. The \"T\" tells the computer that we've added a time. Then it is followed by a two-digit hour (using 00 as midnight and 23 as 11pm) a colon, a two-digit minute, a colon, and a two-digit second. There are other variations of this that can include a time-zone, but we will leave those for later.", "print(\"Date/Time Values\")\nprint(sampledata['DateCol'].values)", "They are currently stored as objects, not as datetimes. We need to convert this column as well, but we'll use a special pandas function to do that. Take a quick look at the reference page for this function to see what else it can do. Note that the new column has type datetime64[ns]. That means that the date format is capable of counting nanoseconds. We won't use all of that capability, but pandas used that format because our dates are accurate to the second.", "sampledata[\"DateCol2\"] = pd.to_datetime(sampledata[\"DateCol\"])\nsampledata.dtypes\n\n#We print out the column to see what it looks like\nsampledata[\"DateCol2\"]", "Now that we have the datetime column, I'd like to plot the data as a function of date. This is often a useful thing to do with time series data. We'll need to import the matplotlib library and use a trick to format the data by date. Here's the code that makes it work.", "import matplotlib.pyplot as plt\n%matplotlib inline\n# We will plot the data values and set the linestyle to 'None' which will not plot the line. We also want to show the individual data points, so we set the marker.\nplt.plot(sampledata['DateCol2'].values, sampledata['FloatCol'].values, linestyle='None', marker='o')\n# autofmt_xdate() tells the computer that it should treat the x-values as dates and format them appropriately. This is a figure function, so we use gcf() to \"get current figure\"\nplt.gcf().autofmt_xdate()", "Geographical\nAlthough this is not typically a single data type, you may encounter geographical data. These are typically in a Latitude-Longitude format where both Latitude and Longitude are floating point numbers like this: (32.1545, -138.5532). There are a number of tools we can use to work with and plot this type of data, so I wanted to cover it now. For now, we will treat these as separate entities and work with geographical data as we encounter it.", "print(\"Latitude Values\")\nprint(sampledata['LatCol'].values)\nprint(\"Longitude Values\")\nprint(sampledata['LonCol'].values)", "It is also useful to plot the geographical data. There are python libraries that make this easy to do.", "from mpl_toolkits.basemap import Basemap\nimport numpy as np\n\n# Draw the base map of the world\nm = Basemap(projection='robin',lon_0=0,resolution='c')\n# Draw the continent coast lines\nm.drawcoastlines()\n# Color in the water and the land masses\nm.fillcontinents(color='red',lake_color='aqua')\n# draw parallels and meridians.\nm.drawparallels(np.arange(-90.,120.,30.))\nm.drawmeridians(np.arange(0.,360.,60.))\n#m.drawmapboundary(fill_color='aqua')\n\n# Prep the data for plotting on the map\nx,y = m(sampledata['LonCol'].values, sampledata['LatCol'].values)\n# Plot the data points on the map\nm.plot(x,y, 'bo', markersize=10)", "Column-wise processing\nNow that we have data columns, we've already seen a couple of examples of column-wise processing. When we created the categorical column and the datetime column we took the data from one column and operated on it all at the same time creating the new columns with the different data types. There are other ways to manipulate the columns.\napply\nThe apply function takes each entry in a column and applies whatever function you want to the entry. For example, we are interested in whether the entry is greater than 4. We will simplify the code by using what is called a lambda function. So, inside the apply() function we have: lambda x: x&gt;4. This is shorthand notation for the following:\n\"Treat x as if it were each entry in the column. Apply whatever follows the colon (:) to each entry and create a new column based on the output\". The use of x was arbitrary: we could choose any variable. For example if we chose w, the code would read: lambda w: w&gt;4. This would do exactly the same thing.", "sampledata['GTfour'] = sampledata['FloatCol'].apply(lambda x: x > 4.0)\nprint(sampledata[['FloatCol','GTfour']])", "Common functions\nThere are a number of common functions that we could use inside the apply. For example, if we wanted to get the square root of each entry, this is what it would look like. We are using the function np.sqrt from the numpy library. We already imported this library, but if we didn't, we'd need to import numpy as np before running this function.", "sampledata['FloatSQRT'] = sampledata['FloatCol'].apply(np.sqrt)\nprint(sampledata[['FloatCol','FloatSQRT']])", "Another useful function is adding up columns. Note that we need to tell pandas to run through each row by adding the argument axis=1 to the apply function. Otherwise it tries to add up each column. This might be something you might want to do, too, though the easiest way to do that is to use the pandas sum function for the column.", "sampledata['IntSUM'] = sampledata[['IntCol','FloatCol']].apply(np.sum,axis=1)\nprint(sampledata[['IntCol','FloatCol','IntSUM']])\n\nsampledata['IntCol'].sum()", "Custom functions\nWe will now create our first custom function and use it to process the data. We will make a short function that will look to see if a value in the TextCol feature matches an item on a list we create.", "\n# We first tell the computer that we are writing a function by starting with \"def\"\n# The next text is the name of the function. We name this one \"isMammal\" meaning it will tell us if an animal is in our list of mammals\n# The final text in the parenthesis is an input to the function. This is another \"dummy\" variable - we could give it any name we want. \n# In this case we call it \"animal\" to remind ourselves that we expect an animal type in text form.\ndef isMammal(animal):\n # We create a list of text objects that will be our \"inclusive\" list. If the item is on this list, the function will return True. Otherwise it returns false.\n mammallist = ['cat','dog','horse','cow','elephant','giraffe','wolf','prairie dog', 'whale', 'dolphin']\n # This is our first \"if\" statement. What this particular version does is look at the list \"mammallist\". \n # If the text passed into the variable \"animal\" matches any item in the list, it jumps into this next block of code\n # Otherwise it jumps into block of code following the \"else\" statement\n if animal in mammallist:\n # the \"return\" code word tells the computer we are done and to send back to the apply function the value following \"return\". In this case, send back \"True\"\n return 'mammal'\n else:\n # The other case will send back \"false\".\n return 'notmammal'\n \nsampledata['IsMammal'] = sampledata['TextCol'].apply(isMammal)\nprint(sampledata[['TextCol', 'IsMammal']])\n\n# We'll now operate on an entire row of data at once and do a more complicated operation. We'll return only mammals where the 'FloatCol' is smaller than 2.\n\ndef isMammalFloat(row):\n # We create a list of text objects that will be our \"inclusive\" list. If the item is on this list, the function will return True. Otherwise it returns false.\n mammallist = ['cat','dog','horse','cow','elephant','giraffe','wolf','prairie dog', 'whale', 'dolphin']\n \n # We need to identify the animal from the row - it can be addressed using the column name\n animal = row['TextCol']\n \n if animal in mammallist:\n # the \"return\" code word tells the computer we are done and to send back to the apply function the value following \"return\". \n # In this case it returns True if the float value is less than 2 and false otherwise.\n return row['FloatCol'] < 2\n else:\n # If it isn't a mammal, return false\n return False\n\n# Note that we need to tell `apply` to send one row at a time by adding the `axis=1` argument\nsampledata['IsSmallMammal'] = sampledata.apply(isMammalFloat, axis=1)\nprint(sampledata[['TextCol', 'FloatCol','IsSmallMammal']])\n\nsampledata['TextCol'][ sampledata['FloatCol']<2 ]", "Feature extraction\nWe can often pull additional features from what we currently have. This involves doing a column-wise processing step, but with the additional component of doing a transformation or extraction from the data. We'll look at a couple of techniques to do this.\nDate/day/week features\nWe already saw how to take a text column that is a date and turn it into a datetime data type. The to_datetime() function has the capability of parsing many different string formats. I recommend looking at the documentation for the function to learn how to do parsing of more specific date time formats. \nOnce we have a datetime data type, we can use other functions to get, for example, the day of the week or the week of the year for any given date. This may be useful for looking at weekly patterns or yearly patterns. The full list of features we can easily extract is found in the documentation. We use the apply function with the simple in-line lambda function to get the date or time features. Another use for this might be to identify holidays- for example, Memorial day is always on the same relative day of the year (last Monday in May). We could use these functions to identify which days are national or bank holidays.", "# Get the day of the week for each of the data features. We can get either a numerical value (0-6) or the names\nsampledata['DayofWeek'] = sampledata['DateCol2'].apply(lambda x: x.weekday_name)\n# Or the week number in the year\nsampledata['WeekofYear'] = sampledata['DateCol2'].apply(lambda x: x.week)\n\nprint(sampledata[['DayofWeek', 'WeekofYear']])", "Unique values\nSometimes it is helpful to know what unique values are in a column. Especially when there are many rows (millions), it is impractical to manually scan through the columns to look for unique values. However, we can use a pandas function unique() to do just that. We will see this is particularly helpful in doing data cleaning to identify rows with problems in the data.", "sampledata['CatCol'].unique()", "Text regex features\nAnother type of text feature extraction using a regex or regular expression pattern recognition code. The date/time conversion uses one form of this, but we can be more general in identifying patterns. There are some very useful tools for testing your pattern. I like the tester at https://regex101.com/. I use it whenever I build a pattern recognition string.", "# This simple text pattern gathers all the letters up to (but not including) the last 'e' in the text entry. There are lots of other pattern recognition tools to extract features from text.\n# Note that it returns \"NaN\" if there are no 'e's in the text string. We could use that to find all the strings without an 'e' in them.\nsampledata['TextCol'].str.extract(\"(.*)e\", expand=True)", "Converting to categorical\nWe already saw how to convert text columns to categorical columns. We can also covert other data types to categorical columns. For example, we could bin a float column into regularly sized bins, then create a categorical column from those bins.\nWord/Text cleaning\nFinally, it is often useful to clean up text entries before trying to turn them into features. For example, we may want to remove all punctuation, capital letters, or other special characters. We may also want to consider all of the forms of a word as the same word. For example, we may want to have both \"dog\" and \"dogs\" as the same feature. Or we may want \"wonder\" and \"wonderful\" as the same feature. There are a couple of text processing tools in python that simplify this work considerably.\nI created a small dataset to work with. We'll use one of the rows to test our text cleaning process.", "textDF = pd.read_csv('Class03_text.tsv',sep='\\t')\ntestcase = textDF['review'][3]\ntestcase", "The first thing we notice is that there are hypertext bits in the text (the &lt;br /&gt; items). We want to clean all of those out. The BeautifulSoup function does this for us.", "from bs4 import BeautifulSoup\ncleantext = BeautifulSoup(testcase,\"html5lib\").text\ncleantext", "We now want to get rid of everything that isn't an alphabetical letter. That will clean up all punctuation and get rid of all numbers. We'll use a regex substitution function to do this. It looks for everything that is not an alphabetical character and replaces it with a blank space.", "import re\nonlyletters = re.sub(\"[^a-zA-Z]\",\" \",cleantext)\nonlyletters", "We'll get rid of upper-case letters to only look at the words themselves.", "lowercase = onlyletters.lower()\nlowercase", "The next two steps we'll do at once because we need to split up the text into individual words to do them. The split() function breaks up the string into an array of words. We will then eliminate any words that are stopwords in English. These are words like \"and\", \"or\", \"the\" that don't communciate any information but are necessary for language.\nThe other thing we'll do is cut the words down to their root stems. This will get rid of plurals or other modifications of words.", "import nltk\nfrom nltk.corpus import stopwords # Import the stop word list\n\nwords = lowercase.split() \nmeaningfulwords = [w for w in words if not w in stopwords.words(\"english\")]\n\nfrom nltk.stem import SnowballStemmer\nsnowball_stemmer = SnowballStemmer(\"english\")\n\nstemmedwords = [snowball_stemmer.stem(w) for w in meaningfulwords ]\n\nprint(\" \".join(meaningfulwords))\nprint(\"\\n\")\nprint(\" \".join(stemmedwords))\n\n# Now we make a function that we can apply to every entry in the dataframe\n\ndef cleantext(textinput):\n \n # First Pass: remove any html tags\n from bs4 import BeautifulSoup\n cleantext = BeautifulSoup(textinput,\"html5lib\").text\n \n # Second pass: remove non-letters and make everything lower case\n import re\n testcase = re.sub(\"[^a-zA-Z]\",\" \",cleantext)\n lowercase = testcase.lower()\n \n # Third pass: remove all stop words (non-essential words)\n from nltk.corpus import stopwords # Import the stop word list\n words = lowercase.split() \n meaningfulwords = [w for w in words if not w in stopwords.words(\"english\")]\n\n # Fourth pass: get the word stems so that plurals, etc. are reduced\n from nltk.stem import SnowballStemmer\n snowball_stemmer = SnowballStemmer(\"english\")\n stemmedwords = [snowball_stemmer.stem(w) for w in meaningfulwords ]\n\n # Put the words back together again with a single space beteen them\n return \" \".join(stemmedwords)\n\ntextDF['cleaned'] = textDF['review'].apply(cleantext)\ntextDF", "Data Cleaning Example In-class Activity\nThe tutorial on cleaning messy data is located here: http://nbviewer.jupyter.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%207%20-%20Cleaning%20up%20messy%20data.ipynb\nFollow the tutorial, looking at the data and how to do a preliminary clean to eliminate entries that aren't correct or don't help. The data file can be loaded from the SageMath folder. I've reduced the number of column features in the data set to make it a bit easier to work with.", "requests = pd.read_csv(\"Class03_311_data.csv\")", "Assignment\nYour assignment is to do data processing and cleaning on your own dataset. I want documentation of what you've done and why you chose to do those things to your data. \nI would also like you to try redoing your regression from last week, using the new features that you create through the data processing steps. See if you can improve the quality of your regression." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
Hvass-Labs/TensorFlow-Tutorials
13B_Visual_Analysis_MNIST.ipynb
mit
[ "TensorFlow Tutorial #13-B\nVisual Analysis (MNIST)\nby Magnus Erik Hvass Pedersen\n/ GitHub / Videos on YouTube\nIntroduction\nTutorial #13 showed how to find input images that maximized the response of individual neurons inside the Inception model, so as to find the images that the neuron liked to see. But because the Inception model is so large and complex the images were just complex wavy patterns.\nThis tutorial uses a much simpler Convolutional Neural Network with the MNIST data-set for recognizing hand-written digits. The code is spliced together from Tutorial #03-B for constructing the neural network and Tutorial #13 for finding input images that maximize individual neuron responses inside the neural network, so a lot of this code may look familiar to you.\nFlowchart\nThe following chart shows roughly how the data flows in the Convolutional Neural Network that is implemented below. Note that there are two separate optimization loops here:\nFirst the weights of the neural network are optimized by inputting images and their true classes to the network so as to improve the classification accuracy.\nAfterwards a second optimization is performed which finds the input image that maximizes a given feature or neuron inside the network. This finds an image that the network likes to see.\n\nTensorFlow 2\nThis tutorial was developed using TensorFlow v.1 back in the year 2016. There have been significant API changes in TensorFlow v.2. This tutorial uses TF2 in \"v.1 compatibility mode\", which is still useful for learning how TensorFlow works, but you would have to implement it slightly differently in TF2 (see Tutorial 03C on the Keras API). It would be too big a job for me to keep updating these tutorials every time Google's engineers update the TensorFlow API, so this tutorial may eventually stop working.\nImports", "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport math\n\n# Use TensorFlow v.2 with this old v.1 code.\n# E.g. placeholder variables and sessions have changed in TF2.\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()", "This was developed using Python 3.6 (Anaconda) and TensorFlow version:", "tf.__version__", "Load Data\nThe MNIST data-set is about 12 MB and will be downloaded automatically if it is not located in the given path.", "from mnist import MNIST\ndata = MNIST(data_dir=\"data/MNIST/\")", "The MNIST data-set has now been loaded and consists of 70.000 images and class-numbers for the images. The data-set is split into 3 mutually exclusive sub-sets. We will only use the training and test-sets in this tutorial.", "print(\"Size of:\")\nprint(\"- Training-set:\\t\\t{}\".format(data.num_train))\nprint(\"- Validation-set:\\t{}\".format(data.num_val))\nprint(\"- Test-set:\\t\\t{}\".format(data.num_test))", "Copy some of the data-dimensions for convenience.", "# The number of pixels in each dimension of an image.\nimg_size = data.img_size\n\n# The images are stored in one-dimensional arrays of this length.\nimg_size_flat = data.img_size_flat\n\n# Tuple with height and width of images used to reshape arrays.\nimg_shape = data.img_shape\n\n# Number of classes, one class for each of 10 digits.\nnum_classes = data.num_classes\n\n# Number of colour channels for the images: 1 channel for gray-scale.\nnum_channels = data.num_channels", "Helper-functions for plotting images\nFunction used to plot 9 images in a 3x3 grid, and writing the true and predicted classes below each image.", "def plot_images(images, cls_true, cls_pred=None):\n assert len(images) == len(cls_true) == 9\n \n # Create figure with 3x3 sub-plots.\n fig, axes = plt.subplots(3, 3)\n fig.subplots_adjust(hspace=0.3, wspace=0.3)\n\n for i, ax in enumerate(axes.flat):\n # Plot image.\n ax.imshow(images[i].reshape(img_shape), cmap='binary')\n\n # Show true and predicted classes.\n if cls_pred is None:\n xlabel = \"True: {0}\".format(cls_true[i])\n else:\n xlabel = \"True: {0}, Pred: {1}\".format(cls_true[i], cls_pred[i])\n\n # Show the classes as the label on the x-axis.\n ax.set_xlabel(xlabel)\n \n # Remove ticks from the plot.\n ax.set_xticks([])\n ax.set_yticks([])\n \n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()", "Function used to plot 10 images in a 2x5 grid.", "def plot_images10(images, smooth=True):\n # Interpolation type.\n if smooth:\n interpolation = 'spline16'\n else:\n interpolation = 'nearest'\n\n # Create figure with sub-plots.\n fig, axes = plt.subplots(2, 5)\n\n # Adjust vertical spacing.\n fig.subplots_adjust(hspace=0.1, wspace=0.1)\n\n # For each entry in the grid.\n for i, ax in enumerate(axes.flat):\n # Get the i'th image and only use the desired pixels.\n img = images[i, :, :]\n \n # Plot the image.\n ax.imshow(img, interpolation=interpolation, cmap='binary')\n\n # Remove ticks.\n ax.set_xticks([])\n ax.set_yticks([])\n\n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show() ", "Function used to plot a single image.", "def plot_image(image):\n plt.imshow(image, interpolation='nearest', cmap='binary')\n plt.xticks([])\n plt.yticks([])", "Plot a few images to see if data is correct", "# Get the first images from the test-set.\nimages = data.x_test[0:9]\n\n# Get the true classes for those images.\ncls_true = data.y_test_cls[0:9]\n\n# Plot the images and labels using our helper-function above.\nplot_images(images=images, cls_true=cls_true)", "TensorFlow Graph\nThe neural network is constructed as a computational graph in TensorFlow using the tf.layers API, which is described in detail in Tutorial #03-B.\nPlaceholder variables\nPlaceholder variables serve as the input to the TensorFlow computational graph that we may change each time we execute the graph.\nFirst we define the placeholder variable for the input images. This allows us to change the images that are input to the TensorFlow graph. This is a so-called tensor, which just means that it is a multi-dimensional array. The data-type is set to float32 and the shape is set to [None, img_size_flat], where None means that the tensor may hold an arbitrary number of images with each image being a vector of length img_size_flat.", "x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')", "The convolutional layers expect x to be encoded as a 4-rank tensor so we have to reshape it so its shape is instead [num_images, img_height, img_width, num_channels]. Note that img_height == img_width == img_size and num_images can be inferred automatically by using -1 for the size of the first dimension. So the reshape operation is:", "x_image = tf.reshape(x, [-1, img_size, img_size, num_channels])", "Next we have the placeholder variable for the true labels associated with the images that were input in the placeholder variable x. The shape of this placeholder variable is [None, num_classes] which means it may hold an arbitrary number of labels and each label is a vector of length num_classes which is 10 in this case.", "y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')", "We could also have a placeholder variable for the class-number, but we will instead calculate it using argmax. Note that this is a TensorFlow operator so nothing is calculated at this point.", "y_true_cls = tf.argmax(y_true, axis=1)", "Neural Network\nWe now implement the Convolutional Neural Network using the Layers API. We use the net-variable to refer to the last layer while building the neural network. This makes it easy to add or remove layers in the code if you want to experiment. First we set the net-variable to the reshaped input image.", "net = x_image", "The input image is then input to the first convolutional layer, which has 16 filters each of size 5x5 pixels. The activation-function is the Rectified Linear Unit (ReLU) described in more detail in Tutorial #02.", "net = tf.layers.conv2d(inputs=net, name='layer_conv1', padding='same',\n filters=16, kernel_size=5, activation=tf.nn.relu)", "After the convolution we do a max-pooling which is also described in Tutorial #02.", "net = tf.layers.max_pooling2d(inputs=net, pool_size=2, strides=2)", "Then we make a second convolutional layer, also with max-pooling.", "net = tf.layers.conv2d(inputs=net, name='layer_conv2', padding='same',\n filters=36, kernel_size=5, activation=tf.nn.relu)\n\nnet = tf.layers.max_pooling2d(inputs=net, pool_size=2, strides=2)", "The output then needs to be flattened so it can be used in fully-connected (aka. dense) layers.", "net = tf.layers.flatten(net)\n\n# This should eventually be replaced by:\n# net = tf.layers.flatten(net)", "We can now add fully-connected (or dense) layers to the neural network.", "net = tf.layers.dense(inputs=net, name='layer_fc1',\n units=128, activation=tf.nn.relu)", "We need the neural network to classify the input images into 10 different classes. So the final fully-connected layer has num_classes=10 output neurons.", "net = tf.layers.dense(inputs=net, name='layer_fc_out',\n units=num_classes, activation=None)", "The outputs of the final fully-connected layer are sometimes called logits, so we have a convenience variable with that name which we will also use further below.", "logits = net", "We use the softmax function to 'squash' the outputs so they are between zero and one, and so they sum to one.", "y_pred = tf.nn.softmax(logits=logits)", "This tells us how likely the neural network thinks the input image is of each possible class. The one that has the highest value is considered the most likely so its index is taken to be the class-number.", "y_pred_cls = tf.argmax(y_pred, axis=1)", "Loss-Function to be Optimized\nTo make the model better at classifying the input images, we must somehow change the variables of the neural network.\nThe cross-entropy is a performance measure used in classification. The cross-entropy is a continuous function that is always positive and if the predicted output of the model exactly matches the desired output then the cross-entropy equals zero. The goal of optimization is therefore to minimize the cross-entropy so it gets as close to zero as possible by changing the variables of the model.\nTensorFlow has a function for calculating the cross-entropy, which uses the values of the logits-layer because it also calculates the softmax internally, so as to to improve numerical stability.", "cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_true, logits=logits)", "We have now calculated the cross-entropy for each of the image classifications so we have a measure of how well the model performs on each image individually. But in order to use the cross-entropy to guide the optimization of the model's variables we need a single scalar value, so we simply take the average of the cross-entropy for all the image classifications.", "loss = tf.reduce_mean(cross_entropy)", "Optimization Method\nNow that we have a cost measure that must be minimized, we can then create an optimizer. In this case it is the Adam optimizer with a learning-rate of 1e-4.\nNote that optimization is not performed at this point. In fact, nothing is calculated at all, we just add the optimizer-object to the TensorFlow graph for later execution.", "optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss)", "Classification Accuracy\nWe need to calculate the classification accuracy so we can report progress to the user.\nFirst we create a vector of booleans telling us whether the predicted class equals the true class of each image.", "correct_prediction = tf.equal(y_pred_cls, y_true_cls)", "The classification accuracy is calculated by first type-casting the vector of booleans to floats, so that False becomes 0 and True becomes 1, and then taking the average of these numbers.", "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))", "Optimize the Neural Network\nCreate TensorFlow session\nOnce the TensorFlow graph has been created, we have to create a TensorFlow session which is used to execute the graph.", "session = tf.Session()", "Initialize variables\nThe variables for the TensorFlow graph must be initialized before we start optimizing them.", "session.run(tf.global_variables_initializer())", "Helper-function to perform optimization iterations\nThere are 55,000 images in the training-set. It takes a long time to calculate the gradient of the model using all these images. We therefore only use a small batch of images in each iteration of the optimizer.\nIf your computer crashes or becomes very slow because you run out of RAM, then you may try and lower this number, but you may then need to do more optimization iterations.", "train_batch_size = 64", "This function performs a number of optimization iterations so as to gradually improve the variables of the neural network layers. In each iteration, a new batch of data is selected from the training-set and then TensorFlow executes the optimizer using those training samples. The progress is printed every 100 iterations.", "# Counter for total number of iterations performed so far.\ntotal_iterations = 0\n\ndef optimize(num_iterations):\n # Ensure we update the global variable rather than a local copy.\n global total_iterations\n\n for i in range(total_iterations,\n total_iterations + num_iterations):\n\n # Get a batch of training examples.\n # x_batch now holds a batch of images and\n # y_true_batch are the true labels for those images.\n x_batch, y_true_batch, _ = data.random_batch(batch_size=train_batch_size)\n\n # Put the batch into a dict with the proper names\n # for placeholder variables in the TensorFlow graph.\n feed_dict_train = {x: x_batch,\n y_true: y_true_batch}\n\n # Run the optimizer using this batch of training data.\n # TensorFlow assigns the variables in feed_dict_train\n # to the placeholder variables and then runs the optimizer.\n session.run(optimizer, feed_dict=feed_dict_train)\n\n # Print status every 100 iterations.\n if i % 100 == 0:\n # Calculate the accuracy on the training-set.\n acc = session.run(accuracy, feed_dict=feed_dict_train)\n\n # Message for printing.\n msg = \"Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}\"\n\n # Print it.\n print(msg.format(i + 1, acc))\n\n # Update the total number of iterations performed.\n total_iterations += num_iterations", "Helper-function to plot example errors\nFunction for plotting examples of images from the test-set that have been mis-classified.", "def plot_example_errors(cls_pred, correct):\n # This function is called from print_test_accuracy() below.\n\n # cls_pred is an array of the predicted class-number for\n # all images in the test-set.\n\n # correct is a boolean array whether the predicted class\n # is equal to the true class for each image in the test-set.\n\n # Negate the boolean array.\n incorrect = (correct == False)\n \n # Get the images from the test-set that have been\n # incorrectly classified.\n images = data.x_test[incorrect]\n \n # Get the predicted classes for those images.\n cls_pred = cls_pred[incorrect]\n\n # Get the true classes for those images.\n cls_true = data.y_test_cls[incorrect]\n \n # Plot the first 9 images.\n plot_images(images=images[0:9],\n cls_true=cls_true[0:9],\n cls_pred=cls_pred[0:9])", "Helper-function to plot confusion matrix", "def plot_confusion_matrix(cls_pred):\n # This is called from print_test_accuracy() below.\n\n # cls_pred is an array of the predicted class-number for\n # all images in the test-set.\n\n # Get the true classifications for the test-set.\n cls_true = data.y_test_cls\n \n # Get the confusion matrix using sklearn.\n cm = confusion_matrix(y_true=cls_true,\n y_pred=cls_pred)\n\n # Print the confusion matrix as text.\n print(cm)\n\n # Plot the confusion matrix as an image.\n plt.matshow(cm)\n\n # Make various adjustments to the plot.\n plt.colorbar()\n tick_marks = np.arange(num_classes)\n plt.xticks(tick_marks, range(num_classes))\n plt.yticks(tick_marks, range(num_classes))\n plt.xlabel('Predicted')\n plt.ylabel('True')\n\n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()", "Helper-function for showing the performance\nBelow is a function for printing the classification accuracy on the test-set.\nIt takes a while to compute the classification for all the images in the test-set, that's why the results are re-used by calling the above functions directly from this function, so the classifications don't have to be recalculated by each function.\nNote that this function can use a lot of computer memory, which is why the test-set is split into smaller batches. If you have little RAM in your computer and it crashes, then you can try and lower the batch-size.", "# Split the test-set into smaller batches of this size.\ntest_batch_size = 256\n\ndef print_test_accuracy(show_example_errors=False,\n show_confusion_matrix=False):\n\n # Number of images in the test-set.\n num_test = data.num_test\n\n # Allocate an array for the predicted classes which\n # will be calculated in batches and filled into this array.\n cls_pred = np.zeros(shape=num_test, dtype=np.int)\n\n # Now calculate the predicted classes for the batches.\n # We will just iterate through all the batches.\n # There might be a more clever and Pythonic way of doing this.\n\n # The starting index for the next batch is denoted i.\n i = 0\n\n while i < num_test:\n # The ending index for the next batch is denoted j.\n j = min(i + test_batch_size, num_test)\n\n # Get the images from the test-set between index i and j.\n images = data.x_test[i:j, :]\n\n # Get the associated labels.\n labels = data.y_test[i:j, :]\n\n # Create a feed-dict with these images and labels.\n feed_dict = {x: images,\n y_true: labels}\n\n # Calculate the predicted class using TensorFlow.\n cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict)\n\n # Set the start-index for the next batch to the\n # end-index of the current batch.\n i = j\n\n # Convenience variable for the true class-numbers of the test-set.\n cls_true = data.y_test_cls\n\n # Create a boolean array whether each image is correctly classified.\n correct = (cls_true == cls_pred)\n\n # Calculate the number of correctly classified images.\n # When summing a boolean array, False means 0 and True means 1.\n correct_sum = correct.sum()\n\n # Classification accuracy is the number of correctly classified\n # images divided by the total number of images in the test-set.\n acc = float(correct_sum) / num_test\n\n # Print the accuracy.\n msg = \"Accuracy on Test-Set: {0:.1%} ({1} / {2})\"\n print(msg.format(acc, correct_sum, num_test))\n\n # Plot some examples of mis-classifications, if desired.\n if show_example_errors:\n print(\"Example errors:\")\n plot_example_errors(cls_pred=cls_pred, correct=correct)\n\n # Plot the confusion matrix, if desired.\n if show_confusion_matrix:\n print(\"Confusion Matrix:\")\n plot_confusion_matrix(cls_pred=cls_pred)", "Performance before any optimization\nThe accuracy on the test-set is very low because the variables for the neural network have only been initialized and not optimized at all, so it just classifies the images randomly.", "print_test_accuracy()", "Performance after 10,000 optimization iterations\nAfter 10,000 optimization iterations, the model has a classification accuracy on the test-set of about 99%.", "%%time\noptimize(num_iterations=10000)\n\nprint_test_accuracy(show_example_errors=True,\n show_confusion_matrix=True)", "Optimizing the Input Images\nNow that the neural network has been optimized so it can recognize hand-written digits with about 99% accuracy, we will then find the input images that maximize certain features inside the neural network. This will show us what images the neural network likes to see the most.\nWe will do this by creating another form of optimization for the neural network, and we need several helper functions for doing this.\nHelper-function for getting the names of convolutional layers\nFunction for getting the names of all the convolutional layers in the neural network. We could have made this list manually, but for larger neural networks it is easier to do this with a function.", "def get_conv_layer_names():\n graph = tf.get_default_graph()\n \n # Create a list of names for the operations in the graph\n # for the Inception model where the operator-type is 'Conv2D'.\n names = [op.name for op in graph.get_operations() if op.type=='Conv2D']\n\n return names\n\nconv_names = get_conv_layer_names()\n\nconv_names\n\nlen(conv_names)", "Helper-function for finding the input image\nThis function finds the input image that maximizes a given feature in the network. It essentially just performs optimization with gradient ascent. The image is initialized with small random values and is then iteratively updated using the gradient for the given feature with regard to the image.", "def optimize_image(conv_id=None, feature=0,\n num_iterations=30, show_progress=True):\n \"\"\"\n Find an image that maximizes the feature\n given by the conv_id and feature number.\n\n Parameters:\n conv_id: Integer identifying the convolutional layer to\n maximize. It is an index into conv_names.\n If None then use the last fully-connected layer\n before the softmax output.\n feature: Index into the layer for the feature to maximize.\n num_iteration: Number of optimization iterations to perform.\n show_progress: Boolean whether to show the progress.\n \"\"\"\n\n # Create the loss-function that must be maximized.\n if conv_id is None:\n # If we want to maximize a feature on the last layer,\n # then we use the fully-connected layer prior to the\n # softmax-classifier. The feature no. is the class-number\n # and must be an integer between 1 and 1000.\n # The loss-function is just the value of that feature.\n loss = tf.reduce_mean(logits[:, feature])\n else:\n # If instead we want to maximize a feature of a\n # convolutional layer inside the neural network.\n\n # Get the name of the convolutional operator.\n conv_name = conv_names[conv_id]\n \n # Get the default TensorFlow graph.\n graph = tf.get_default_graph()\n \n # Get a reference to the tensor that is output by the\n # operator. Note that \":0\" is added to the name for this.\n tensor = graph.get_tensor_by_name(conv_name + \":0\")\n\n # The loss-function is the average of all the\n # tensor-values for the given feature. This\n # ensures that we generate the whole input image.\n # You can try and modify this so it only uses\n # a part of the tensor.\n loss = tf.reduce_mean(tensor[:,:,:,feature])\n\n # Get the gradient for the loss-function with regard to\n # the input image. This creates a mathematical\n # function for calculating the gradient.\n gradient = tf.gradients(loss, x_image)\n\n # Generate a random image of the same size as the raw input.\n # Each pixel is a small random value between 0.45 and 0.55,\n # which is the middle of the valid range between 0 and 1.\n image = 0.1 * np.random.uniform(size=img_shape) + 0.45\n\n # Perform a number of optimization iterations to find\n # the image that maximizes the loss-function.\n for i in range(num_iterations):\n # Reshape the array so it is a 4-rank tensor.\n img_reshaped = image[np.newaxis,:,:,np.newaxis]\n\n # Create a feed-dict for inputting the image to the graph.\n feed_dict = {x_image: img_reshaped}\n\n # Calculate the predicted class-scores,\n # as well as the gradient and the loss-value.\n pred, grad, loss_value = session.run([y_pred, gradient, loss],\n feed_dict=feed_dict)\n \n # Squeeze the dimensionality for the gradient-array.\n grad = np.array(grad).squeeze()\n\n # The gradient now tells us how much we need to change the\n # input image in order to maximize the given feature.\n\n # Calculate the step-size for updating the image.\n # This step-size was found to give fast convergence.\n # The addition of 1e-8 is to protect from div-by-zero.\n step_size = 1.0 / (grad.std() + 1e-8)\n\n # Update the image by adding the scaled gradient\n # This is called gradient ascent.\n image += step_size * grad\n\n # Ensure all pixel-values in the image are between 0 and 1.\n image = np.clip(image, 0.0, 1.0)\n\n if show_progress:\n print(\"Iteration:\", i)\n\n # Convert the predicted class-scores to a one-dim array.\n pred = np.squeeze(pred)\n\n # The predicted class for the Inception model.\n pred_cls = np.argmax(pred)\n\n # The score (probability) for the predicted class.\n cls_score = pred[pred_cls]\n\n # Print the predicted score etc.\n msg = \"Predicted class: {0}, score: {1:>7.2%}\"\n print(msg.format(pred_cls, cls_score))\n\n # Print statistics for the gradient.\n msg = \"Gradient min: {0:>9.6f}, max: {1:>9.6f}, stepsize: {2:>9.2f}\"\n print(msg.format(grad.min(), grad.max(), step_size))\n\n # Print the loss-value.\n print(\"Loss:\", loss_value)\n\n # Newline.\n print()\n\n return image.squeeze()", "This next function finds the images that maximize the first 10 features of a layer, by calling the above function 10 times.", "def optimize_images(conv_id=None, num_iterations=30):\n \"\"\"\n Find 10 images that maximize the 10 first features in the layer\n given by the conv_id.\n \n Parameters:\n conv_id: Integer identifying the convolutional layer to\n maximize. It is an index into conv_names.\n If None then use the last layer before the softmax output.\n num_iterations: Number of optimization iterations to perform.\n \"\"\"\n\n # Which layer are we using?\n if conv_id is None:\n print(\"Final fully-connected layer before softmax.\")\n else:\n print(\"Layer:\", conv_names[conv_id])\n\n # Initialize the array of images.\n images = []\n\n # For each feature do the following.\n for feature in range(0,10):\n print(\"Optimizing image for feature no.\", feature)\n \n # Find the image that maximizes the given feature\n # for the network layer identified by conv_id (or None).\n image = optimize_image(conv_id=conv_id, feature=feature,\n show_progress=False,\n num_iterations=num_iterations)\n\n # Squeeze the dim of the array.\n image = image.squeeze()\n\n # Append to the list of images.\n images.append(image)\n\n # Convert to numpy-array so we can index all dimensions easily.\n images = np.array(images)\n\n # Plot the images.\n plot_images10(images=images)", "First Convolutional Layer\nThese are the input images that maximize the features in the first convolutional layer, so these are the images that it likes to see.", "optimize_images(conv_id=0)", "Note how these are very simple shapes such as lines and angles. Some of these images may be completely white, which suggests that those features of the neural network are perhaps unused, so the number of features could be reduced in this layer.\nSecond Convolutional Layer\nThis shows the images that maximize the features or neurons in the second convolutional layer, so these are the input images it likes to see. Note how these are more complex lines and patterns compared to the first convolutional layer.", "optimize_images(conv_id=1)", "Final output layer\nNow find the image for the 2nd feature of the final output of the neural network. That is, we want to find an image that makes the neural network classify that image as the digit 2. This is the image that the neural network likes to see the most for the digit 2.", "image = optimize_image(conv_id=None, feature=2,\n num_iterations=10, show_progress=True)", "Note how the predicted class indeed becomes 2 already within the first few iterations so the optimization is working as intended. Also note how the loss-measure is increasing rapidly until it apparently converges. This is because the loss-measure is actually just the value of the feature or neuron that we are trying to maximize. Because this is the logits-layer prior to the softmax, these values can potentially be infinitely high, but they are limited because we limit the image-values between 0 and 1.\nNow plot the image that was found. This is the image that the neural network believes looks most like the digit 2.", "plot_image(image)", "Although some of the curves do hint somewhat at the digit 2, it is hard for a human to see why the neural network believes this is the optimal image for the digit 2. This can only be understood when the optimal images for the remaining digits are also shown.", "optimize_images(conv_id=None)", "These images may vary each time you run the optimization. Some of the images can be seen to somewhat resemble the hand-written digits. But the other images are often impossible to recognize and it is hard to understand why the neural network thinks these are the optimal input images for those digits.\nThe reason is perhaps that the neural network tries to recognize all digits simultaneously, and it has found that certain pixels often determine whether the image shows one digit or another. So the neural network has learned to differentiate those pixels that it has found to be important, but not the underlying curves and shapes of the digits, in the same way that a human recognizes the digits.\nAnother possibility is that the data-set contains mis-classified digits which may confuse the neural network during training. We have previously seen how some of the digits in the data-set are very hard to read even for humans, and this may cause the neural network to become distorted and trying to recognize strange artifacts in the images.\nYet another possibility is that the optimization process has stagnated in a local optimum. One way to test this, would be to run the optimization 50 times for the digits that are unclear, and see if some of the resulting images become more clear.\nClose TensorFlow Session\nWe are now done using TensorFlow, so we close the session to release its resources.", "# This has been commented out in case you want to modify and experiment\n# with the Notebook without having to restart it.\n# session.close()", "Conclusion\nThis tutorial showed how to find the input images that maximize certain features inside a neural network. These are the images that the neural network likes to see the most in order to activate a certain feature or neuron inside the network.\nThis was tested on a simple convolutional neural network using the MNIST data-set. The neural network had clearly learned to recognize the general shape of some of the digits, while it was impossible to see how it recognized other digits.\nExercises\nThese are a few suggestions for exercises that may help improve your skills with TensorFlow. It is important to get hands-on experience with TensorFlow in order to learn how to use it properly.\nYou may want to backup this Notebook before making any changes.\n\n\nPlot the images for all features in each convolutional layer instead of just the first 10 features. How many of them appear to be unused or redundant? What happens if you lower the number of features in that layer and train the network again, does it still perform just as well?\n\n\nTry adding more convolutional layers and find the input images that maximize their features. What do the images show? Do you think it is useful to add more convolutional layers than two?\n\n\nTry adding more fully-connected layers and modify the code so it can find input images that maximize the features of the fully-connected / dense layers as well. Currently the code can only maximize the features of the convolutional layers and the final fully-connected layer.\n\n\nFor the input images that are unclear, run the optimization e.g. 50 times for each of those digits, to see if it produces more clear input images. It is possible that the optimization has simply become stuck in a local optimum.\n\n\nExplain to a friend how the program works.\n\n\nLicense (MIT)\nCopyright (c) 2016-2017 by Magnus Erik Hvass Pedersen\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
y2ee201/Deep-Learning-Nanodegree
sentiment_network/Sentiment Classification - How to Best Frame a Problem for a Neural Network (Lesson 5).ipynb
mit
[ "Sentiment Classification & How To \"Frame Problems\" for a Neural Network\nby Andrew Trask\n\nTwitter: @iamtrask\nBlog: http://iamtrask.github.io\n\nWhat You Should Already Know\n\nneural networks, forward and back-propagation\nstochastic gradient descent\nmean squared error\nand train/test splits\n\nWhere to Get Help if You Need it\n\nRe-watch previous Udacity Lectures\nLeverage the recommended Course Reading Material - Grokking Deep Learning (40% Off: traskud17)\nShoot me a tweet @iamtrask\n\nTutorial Outline:\n\n\nIntro: The Importance of \"Framing a Problem\"\n\n\nCurate a Dataset\n\nDeveloping a \"Predictive Theory\"\n\nPROJECT 1: Quick Theory Validation\n\n\nTransforming Text to Numbers\n\n\nPROJECT 2: Creating the Input/Output Data\n\n\nPutting it all together in a Neural Network\n\n\nPROJECT 3: Building our Neural Network\n\n\nUnderstanding Neural Noise\n\n\nPROJECT 4: Making Learning Faster by Reducing Noise\n\n\nAnalyzing Inefficiencies in our Network\n\n\nPROJECT 5: Making our Network Train and Run Faster\n\n\nFurther Noise Reduction\n\n\nPROJECT 6: Reducing Noise by Strategically Reducing the Vocabulary\n\n\nAnalysis: What's going on in the weights?\n\n\nLesson: Curate a Dataset", "def pretty_print_review_and_label(i):\n print(labels[i] + \"\\t:\\t\" + reviews[i][:80] + \"...\")\n\ng = open('reviews.txt','r') # What we know!\nreviews = list(map(lambda x:x[:-1],g.readlines()))\ng.close()\n\ng = open('labels.txt','r') # What we WANT to know!\nlabels = list(map(lambda x:x[:-1].upper(),g.readlines()))\ng.close()\n\nlen(reviews)\n\nreviews[0]\n\nlabels[0]", "Lesson: Develop a Predictive Theory", "print(\"labels.txt \\t : \\t reviews.txt\\n\")\npretty_print_review_and_label(2137)\npretty_print_review_and_label(12816)\npretty_print_review_and_label(6267)\npretty_print_review_and_label(21934)\npretty_print_review_and_label(5297)\npretty_print_review_and_label(4998)", "Project 1: Quick Theory Validation", "from collections import Counter\nimport numpy as np\n\npositive_counts = Counter()\nnegative_counts = Counter()\ntotal_counts = Counter()\n\nfor i in range(len(reviews)):\n if(labels[i] == 'POSITIVE'):\n for word in reviews[i].split(\" \"):\n positive_counts[word] += 1\n total_counts[word] += 1\n else:\n for word in reviews[i].split(\" \"):\n negative_counts[word] += 1\n total_counts[word] += 1\n\npositive_counts.most_common()\n\npos_neg_ratios = Counter()\n\nfor term,cnt in list(total_counts.most_common()):\n if(cnt > 100):\n pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)\n pos_neg_ratios[term] = pos_neg_ratio\n\nfor word,ratio in pos_neg_ratios.most_common():\n if(ratio > 1):\n pos_neg_ratios[word] = np.log(ratio)\n else:\n pos_neg_ratios[word] = -np.log((1 / (ratio+0.01)))\n\n# words most frequently seen in a review with a \"POSITIVE\" label\npos_neg_ratios.most_common()\n\n# words most frequently seen in a review with a \"NEGATIVE\" label\nlist(reversed(pos_neg_ratios.most_common()))[0:30]", "Transforming Text into Numbers", "from IPython.display import Image\n\nreview = \"This was a horrible, terrible movie.\"\n\nImage(filename='sentiment_network.png')\n\nreview = \"The movie was excellent\"\n\nImage(filename='sentiment_network_pos.png')", "Project 2: Creating the Input/Output Data", "vocab = set(total_counts.keys())\nvocab_size = len(vocab)\nprint(vocab_size)\n\nlist(vocab)\n\nimport numpy as np\n\nlayer_0 = np.zeros((1,vocab_size))\nlayer_0\n\nfrom IPython.display import Image\nImage(filename='sentiment_network.png')\n\nword2index = {}\n\nfor i,word in enumerate(vocab):\n word2index[word] = i\nword2index\n\ndef update_input_layer(review):\n \n global layer_0\n \n # clear out previous state, reset the layer to be all 0s\n layer_0 *= 0\n for word in review.split(\" \"):\n layer_0[0][word2index[word]] += 1\n\nupdate_input_layer(reviews[0])\n\nlayer_0\n\ndef get_target_for_label(label):\n if(label == 'POSITIVE'):\n return 1\n else:\n return 0\n\nlabels[0]\n\nget_target_for_label(labels[0])\n\nlabels[1]\n\nget_target_for_label(labels[1])", "Project 3: Building a Neural Network\n\nStart with your neural network from the last chapter\n3 layer neural network\nno non-linearity in hidden layer\nuse our functions to create the training data\ncreate a \"pre_process_data\" function to create vocabulary for our training data generating functions\nmodify \"train\" to train over the entire corpus\n\nWhere to Get Help if You Need it\n\nRe-watch previous week's Udacity Lectures\nChapters 3-5 - Grokking Deep Learning - (40% Off: traskud17)", "import time\nimport sys\nimport numpy as np\n\n# Let's tweak our network from before to model these phenomena\nclass SentimentNetwork:\n def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):\n \n # set our random number generator \n np.random.seed(1)\n \n self.pre_process_data(reviews, labels)\n \n self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)\n \n \n def pre_process_data(self, reviews, labels):\n \n review_vocab = set()\n for review in reviews:\n for word in review.split(\" \"):\n review_vocab.add(word)\n self.review_vocab = list(review_vocab)\n \n label_vocab = set()\n for label in labels:\n label_vocab.add(label)\n \n self.label_vocab = list(label_vocab)\n \n self.review_vocab_size = len(self.review_vocab)\n self.label_vocab_size = len(self.label_vocab)\n \n self.word2index = {}\n for i, word in enumerate(self.review_vocab):\n self.word2index[word] = i\n \n self.label2index = {}\n for i, label in enumerate(self.label_vocab):\n self.label2index[label] = i\n \n \n def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Initialize weights\n self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))\n \n self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5, \n (self.hidden_nodes, self.output_nodes))\n \n self.learning_rate = learning_rate\n \n self.layer_0 = np.zeros((1,input_nodes))\n \n \n def update_input_layer(self,review):\n\n # clear out previous state, reset the layer to be all 0s\n self.layer_0 *= 0\n for word in review.split(\" \"):\n if(word in self.word2index.keys()):\n self.layer_0[0][self.word2index[word]] += 1\n \n def get_target_for_label(self,label):\n if(label == 'POSITIVE'):\n return 1\n else:\n return 0\n \n def sigmoid(self,x):\n return 1 / (1 + np.exp(-x))\n \n \n def sigmoid_output_2_derivative(self,output):\n return output * (1 - output)\n \n def train(self, training_reviews, training_labels):\n \n assert(len(training_reviews) == len(training_labels))\n \n correct_so_far = 0\n \n start = time.time()\n \n for i in range(len(training_reviews)):\n \n review = training_reviews[i]\n label = training_labels[i]\n \n #### Implement the forward pass here ####\n ### Forward pass ###\n\n # Input Layer\n self.update_input_layer(review)\n\n # Hidden layer\n layer_1 = self.layer_0.dot(self.weights_0_1)\n\n # Output layer\n layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))\n\n #### Implement the backward pass here ####\n ### Backward pass ###\n\n # TODO: Output error\n layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.\n layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)\n\n # TODO: Backpropagated error\n layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer\n layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error\n\n # TODO: Update the weights\n self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step\n self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step\n\n if(np.abs(layer_2_error) < 0.5):\n correct_so_far += 1\n \n reviews_per_second = i / float(time.time() - start)\n \n sys.stdout.write(\"\\rProgress:\" + str(100 * i/float(len(training_reviews)))[:4] + \"% Speed(reviews/sec):\" + str(reviews_per_second)[0:5] + \" #Correct:\" + str(correct_so_far) + \" #Trained:\" + str(i+1) + \" Training Accuracy:\" + str(correct_so_far * 100 / float(i+1))[:4] + \"%\")\n if(i % 2500 == 0):\n print(\"\")\n \n def test(self, testing_reviews, testing_labels):\n \n correct = 0\n \n start = time.time()\n \n for i in range(len(testing_reviews)):\n pred = self.run(testing_reviews[i])\n if(pred == testing_labels[i]):\n correct += 1\n \n reviews_per_second = i / float(time.time() - start)\n \n sys.stdout.write(\"\\rProgress:\" + str(100 * i/float(len(testing_reviews)))[:4] \\\n + \"% Speed(reviews/sec):\" + str(reviews_per_second)[0:5] \\\n + \"% #Correct:\" + str(correct) + \" #Tested:\" + str(i+1) + \" Testing Accuracy:\" + str(correct * 100 / float(i+1))[:4] + \"%\")\n \n def run(self, review):\n \n # Input Layer\n self.update_input_layer(review.lower())\n\n # Hidden layer\n layer_1 = self.layer_0.dot(self.weights_0_1)\n\n # Output layer\n layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))\n \n if(layer_2[0] > 0.5):\n return \"POSITIVE\"\n else:\n return \"NEGATIVE\"\n \n\nmlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)\n\n# evaluate our model before training (just to show how horrible it is)\nmlp.test(reviews[-1000:],labels[-1000:])\n\n# train the network\nmlp.train(reviews[:-1000],labels[:-1000])\n\nmlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01)\n\n# train the network\nmlp.train(reviews[:-1000],labels[:-1000])\n\nmlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001)\n\n# train the network\nmlp.train(reviews[:-1000],labels[:-1000])", "Understanding Neural Noise", "from IPython.display import Image\nImage(filename='sentiment_network.png')\n\ndef update_input_layer(review):\n \n global layer_0\n \n # clear out previous state, reset the layer to be all 0s\n layer_0 *= 0\n for word in review.split(\" \"):\n layer_0[0][word2index[word]] += 1\n\nupdate_input_layer(reviews[0])\n\nlayer_0\n\nreview_counter = Counter()\n\nfor word in reviews[0].split(\" \"):\n review_counter[word] += 1\n\nreview_counter.most_common()", "Project 4: Reducing Noise in our Input Data", "import time\nimport sys\nimport numpy as np\n\n# Let's tweak our network from before to model these phenomena\nclass SentimentNetwork:\n def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):\n \n # set our random number generator \n np.random.seed(1)\n \n self.pre_process_data(reviews, labels)\n \n self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)\n \n \n def pre_process_data(self, reviews, labels):\n \n review_vocab = set()\n for review in reviews:\n for word in review.split(\" \"):\n review_vocab.add(word)\n self.review_vocab = list(review_vocab)\n \n label_vocab = set()\n for label in labels:\n label_vocab.add(label)\n \n self.label_vocab = list(label_vocab)\n \n self.review_vocab_size = len(self.review_vocab)\n self.label_vocab_size = len(self.label_vocab)\n \n self.word2index = {}\n for i, word in enumerate(self.review_vocab):\n self.word2index[word] = i\n \n self.label2index = {}\n for i, label in enumerate(self.label_vocab):\n self.label2index[label] = i\n \n \n def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Initialize weights\n self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))\n \n self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5, \n (self.hidden_nodes, self.output_nodes))\n \n self.learning_rate = learning_rate\n \n self.layer_0 = np.zeros((1,input_nodes))\n \n \n def update_input_layer(self,review):\n\n # clear out previous state, reset the layer to be all 0s\n self.layer_0 *= 0\n for word in review.split(\" \"):\n if(word in self.word2index.keys()):\n self.layer_0[0][self.word2index[word]] = 1\n \n def get_target_for_label(self,label):\n if(label == 'POSITIVE'):\n return 1\n else:\n return 0\n \n def sigmoid(self,x):\n return 1 / (1 + np.exp(-x))\n \n \n def sigmoid_output_2_derivative(self,output):\n return output * (1 - output)\n \n def train(self, training_reviews, training_labels):\n \n assert(len(training_reviews) == len(training_labels))\n \n correct_so_far = 0\n \n start = time.time()\n \n for i in range(len(training_reviews)):\n \n review = training_reviews[i]\n label = training_labels[i]\n \n #### Implement the forward pass here ####\n ### Forward pass ###\n\n # Input Layer\n self.update_input_layer(review)\n\n # Hidden layer\n layer_1 = self.layer_0.dot(self.weights_0_1)\n\n # Output layer\n layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))\n\n #### Implement the backward pass here ####\n ### Backward pass ###\n\n # TODO: Output error\n layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.\n layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)\n\n # TODO: Backpropagated error\n layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer\n layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error\n\n # TODO: Update the weights\n self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step\n self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step\n\n if(np.abs(layer_2_error) < 0.5):\n correct_so_far += 1\n \n reviews_per_second = i / float(time.time() - start)\n \n sys.stdout.write(\"\\rProgress:\" + str(100 * i/float(len(training_reviews)))[:4] + \"% Speed(reviews/sec):\" + str(reviews_per_second)[0:5] + \" #Correct:\" + str(correct_so_far) + \" #Trained:\" + str(i+1) + \" Training Accuracy:\" + str(correct_so_far * 100 / float(i+1))[:4] + \"%\")\n if(i % 2500 == 0):\n print(\"\")\n \n def test(self, testing_reviews, testing_labels):\n \n correct = 0\n \n start = time.time()\n \n for i in range(len(testing_reviews)):\n pred = self.run(testing_reviews[i])\n if(pred == testing_labels[i]):\n correct += 1\n \n reviews_per_second = i / float(time.time() - start)\n \n sys.stdout.write(\"\\rProgress:\" + str(100 * i/float(len(testing_reviews)))[:4] \\\n + \"% Speed(reviews/sec):\" + str(reviews_per_second)[0:5] \\\n + \"% #Correct:\" + str(correct) + \" #Tested:\" + str(i+1) + \" Testing Accuracy:\" + str(correct * 100 / float(i+1))[:4] + \"%\")\n \n def run(self, review):\n \n # Input Layer\n self.update_input_layer(review.lower())\n\n # Hidden layer\n layer_1 = self.layer_0.dot(self.weights_0_1)\n\n # Output layer\n layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))\n \n if(layer_2[0] > 0.5):\n return \"POSITIVE\"\n else:\n return \"NEGATIVE\"\n \n\nmlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)\n\nmlp.train(reviews[:-1000],labels[:-1000])\n\n# evaluate our model before training (just to show how horrible it is)\nmlp.test(reviews[-1000:],labels[-1000:])", "Analyzing Inefficiencies in our Network", "Image(filename='sentiment_network_sparse.png')\n\nlayer_0 = np.zeros(10)\n\nlayer_0\n\nlayer_0[4] = 1\nlayer_0[9] = 1\n\nlayer_0\n\nweights_0_1 = np.random.randn(10,5)\n\nlayer_0.dot(weights_0_1)\n\nindices = [4,9]\n\nlayer_1 = np.zeros(5)\n\nfor index in indices:\n layer_1 += (weights_0_1[index])\n\nlayer_1\n\nImage(filename='sentiment_network_sparse_2.png')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
Startupsci/data-science-notebooks
titanic-data-science-solutions.ipynb
mit
[ "Titanic Data Science Solutions\nThis notebook is companion to the book Data Science Solutions. The notebook walks us through a typical workflow for solving data science competitions at sites like Kaggle.\nThere are several excellent notebooks to study data science competition entries. However many will skip some of the explanation on how the solution is developed as these notebooks are developed by experts for experts. The objective of this notebook is to follow a step-by-step workflow, explaining each step and rationale for every decision we take during solution development.\nWorkflow stages\nThe competition solution workflow goes through seven stages described in the Data Science Solutions book's sample chapter online here.\n\nQuestion or problem definition.\nAcquire training and testing data.\nWrangle, prepare, cleanse the data.\nAnalyze, identify patterns, and explore the data.\nModel, predict and solve the problem.\nVisualize, report, and present the problem solving steps and final solution.\nSupply or submit the results.\n\nThe workflow indicates general sequence of how each stage may follow the other. However there are use cases with exceptions.\n\nWe may combine mulitple workflow stages. We may analyze by visualizing data.\nPerform a stage earlier than indicated. We may analyze data before and after wrangling.\nPerform a stage multiple times in our workflow. Visualize stage may be used multiple times.\nDrop a stage altogether. We may not need supply stage to productize or service enable our dataset for a competition.\n\nQuestion and problem definition\nCompetition sites like Kaggle define the problem to solve or questions to ask while providing the datasets for training your data science model and testing the model results against a test dataset. The question or problem definition for Titanic Survival competition is described here at Kaggle.\n\nKnowing from a training set of samples listing passengers who survived or did not survive the Titanic disaster, can our model determine based on a given test dataset not containing the survival information, if these passengers in the test dataset survived or not.\n\nWe may also want to develop some early understanding about the domain of our problem. This is described on the Kaggle competition description page here. Here are the highlights to note.\n\nOn April 15, 1912, during her maiden voyage, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 passengers and crew. Translated 32% survival rate.\nOne of the reasons that the shipwreck led to such loss of life was that there were not enough lifeboats for the passengers and crew.\nAlthough there was some element of luck involved in surviving the sinking, some groups of people were more likely to survive than others, such as women, children, and the upper-class.\n\nWorkflow goals\nThe data science solutions workflow solves for seven major goals.\nClassifying. We may want to classify or categorize our samples. We may also want to understand the implications or correlation of different classes with our solution goal.\nCorrelating. One can approach the problem based on available features within the training dataset. Which features within the dataset contribute significantly to our solution goal? Statistically speaking is there a correlation among a feature and solution goal? As the feature values change does the solution state change as well, and visa-versa? This can be tested both for numerical and categorical features in the given dataset. We may also want to determine correlation among features other than survival for subsequent goals and workflow stages. Correlating certain features may help in creating, completing, or correcting features.\nConverting. For modeling stage, one needs to prepare the data. Depending on the choice of model algorithm one may require all features to be converted to numerical equivalent values. So for instance converting text categorical values to numeric values.\nCompleting. Data preparation may also require us to estimate any missing values within a feature. Model algorithms may work best when there are no missing values.\nCorrecting. We may also analyze the given training dataset for errors or possibly innacurate values within features and try to corrent these values or exclude the samples containing the errors. One way to do this is to detect any outliers among our samples or features. We may also completely discard a feature if it is not contribting to the analysis or may significantly skew the results.\nCreating. Can we create new features based on an existing feature or a set of features, such that the new feature follows the correlation, conversion, completeness goals.\nCharting. How to select the right visualization plots and charts depending on nature of the data and the solution goals. A good start is to read the Tableau paper on Which chart or graph is right for you?.", "# data analysis and wrangling\nimport pandas as pd\nimport numpy as np\nimport random as rnd\n\n# visualization\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n# machine learning\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.tree import DecisionTreeClassifier", "Acquire data\nThe Python Pandas packages helps us work with our datasets. We start by acquiring the training and testing datasets into Pandas DataFrames.", "# read titanic training & test csv files as a pandas DataFrame\ntrain_df = pd.read_csv('data/titanic-kaggle/train.csv')\ntest_df = pd.read_csv('data/titanic-kaggle/test.csv')", "Analyze by describing data\nPandas also helps describe the datasets answering following questions early in our project.\nWhich features are available in the dataset?\nNoting the feature names for directly manipulating or analyzing these. These feature names are described on the Kaggle data page here.", "print train_df.columns.values", "Which features are categorical?\nThese values classify the samples into sets of similar samples. Within categorical features are the values nominal, ordinal, ratio, or interval based? Among other things this helps us select the appropriate plots for visualization.\n\nCategorical: Survived, Sex, and Embarked. Ordinal: Pclass.\n\nWhich features are numerical?\nWhich features are numerical? These values change from sample to sample. Within numerical features are the values discrete, continuous, or timeseries based? Among other things this helps us select the appropriate plots for visualization.\n\nContinous: Age, Fare. Discrete: SibSp, Parch.", "# preview the data\ntrain_df.head()", "Which features are mixed data types?\nNumerical, alphanumeric data within same feature. These are candidates for correcting goal.\n\nTicket is a mix of numeric and alphanumeric data types. Cabin is alphanumeric.\n\nWhich features may contain errors or typos?\nThis is harder to review for a large dataset, however reviewing a few samples from a smaller dataset may just tell us outright, which features may require correcting.\n\nName feature may contain errors or typos as there are several ways used to describe a name including titles, round brackets, and quotes used for alternative or short names.", "train_df.tail()", "Which features contain blank, null or empty values?\nThese will require correcting.\n\nCabin > Age > Embarked features contain a number of null values in that order for the training dataset.\nCabin > Age are incomplete in case of test dataset.\n\nWhat are the data types for various features?\nHelping us during converting goal.\n\nSeven features are integer or floats. Six in case of test dataset.\nFive features are strings (object).", "train_df.info()\nprint('_'*40)\ntest_df.info()", "What is the distribution of numerical feature values across the samples?\nThis helps us determine, among other early insights, how representative is the training dataset of the actual problem domain.\n\nTotal samples are 891 or 40% of the actual number of passengers on board the Titanic (2,224).\nSurvived is a categorical feature with 0 or 1 values.\nAround 38% samples survived representative of the actual survival rate.\nMost passengers (> 75%) did not travel with parents or children.\nMore than 35% passengers had a sibling on board.\nFares varied significantly with few passengers (<1%) paying as high as $512.\nFew elderly passengers (<1%) within age range 65-80.", "train_df.describe(percentiles=[.25, .5, .75])\n# Review survived rate using `percentiles=[.61, .62]` knowing our problem description mentions 38% survival rate.\n# Review Parch distribution using `percentiles=[.75, .8]`\n# Sibling distribution `[.65, .7]`\n# Age and Fare `[.1, .2, .3, .4, .5, .6, .7, .8, .9, .99]`", "What is the distribution of categorical features?\n\nNames are unique across the dataset (count=unique=891)\nSex variable as two possible values with 65% male (top=male, freq=577/count=891).\nCabin values have several dupicates across samples. Alternatively several passengers shared a cabin.\nEmbarked takes three possible values. S port used by most passengers (top=S)\nTicket feature has high ratio (22%) of duplicate values (unique=681). Possibly an error as two passengers may not travel on the same ticket.", "train_df.describe(include=['O'])", "Assumtions based on data analysis\nWe arrive at following assumptions based on data analysis done so far. We may validate these assumptions further before taking appropriate actions.\nCompleting.\n\nWe may want to complete Age feature as it is definitely correlated to survival.\nWe may want to complete the Embarked feature as it may also correlate with survival or another important feature.\n\nCorrecting.\n\nTicket feature may be dropped from our analysis as it contains high ratio of duplicates (22%) and there may not be a correlation between Ticket and survival.\nCabin feature may be dropped as it is highly incomplete or contains many null values both in training and test dataset.\nPassengerId may be dropped from training dataset as it does not contribute to survival.\nName feature is relatively non-standard, may not contribute directly to survival, so maybe dropped.\n\nCreating.\n\nWe may want to create a new feature called Family based on Parch and SibSp to get total count of family members on board.\nWe may want to engineer the Name feature to extract Title as a new feature.\nWe may want to create new feature for Age bands. This turns a continous numerical feature into an ordinal categorical feature.\nWe may also want to create a Fare range feature if it helps our analysis.\n\nCorrelating.\n\nDoes port of embarkation (Embarked) correlate with survival?\nDoes fare paid (range) correlate with survival?\n\nWe may also add to our assumptions based on the problem description noted earlier.\nClassifying.\n\nWomen (Sex=female) were more likely to have survived.\nChildren (Age<?) were more likely to have survived. \nThe upper-class passengers (Pclass=1) were more likely to have survived.\n\nAnalyze by visualizing data\nNow we can start confirming some of our assumptions using visualizations for analyzing the data.\nCorrelating numerical features\nLet us start by understanding correlations between numerical features and our solution goal (Survived).\nA histogram chart is useful for analyzing continous numerical variables like Age where banding or ranges will help identify useful patterns. The histogram can indicate distribution of samples using automatically defined bins or equally ranged bands. This helps us answer questions relating to specific bands (Did infants have better survival rate?)\nNote that x-axis in historgram visualizations represents the count of samples or passengers.\nObservations.\n\nInfants (Age <=4) had high survival rate.\nOldest passengers (Age = 80) survived.\nLarge number of 15-25 year olds did not survive.\nMost passengers are in 15-35 age range.\n\nDecisions.\nThis simple analysis confirms our assumptions as decisions for subsequent workflow stages.\n\nWe should consider Age (our assumption classifying #2) in our model training.\nComplete the Age feature for null values (completing #1).", "g = sns.FacetGrid(train_df, col='Survived')\ng.map(plt.hist, 'Age', bins=20)", "We can combine multiple features for identifying correlations using a single plot. This can be done with numerical and categorical features which have numeric values.\nObservations.\n\nPclass=3 had most passengers, however most did not survive. Confirms our classifying assumption #2.\nInfant passengers in Pclass=2 mostly survived. Further qualifies our classifying assumption #2.\nMost passengers in Pclass=1 survived. Confirms our classifying assumption #3.\nPclass varies in terms of Age distribution of passengers.\n\nDecisions.\n\nConsider Pclass for model training.", "grid = sns.FacetGrid(train_df, col='Pclass', hue='Survived')\ngrid.map(plt.hist, 'Age', alpha=.5, bins=20)\ngrid.add_legend();", "Correlating categorical features\nNow we can correlate categorical features with our solution goal.\nObservations.\n\nFemale passengers had much better survival rate than males. Confirms classifying (#1).\nException in Embarked=C where males had higher survival rate.\nMales had better survival rate in Pclass=3 when compared with Pclass=2 for C and Q ports. Completing (#2).\nPorts of embarkation have varying survival rates for Pclass=3 and among male passengers. Correlating (#1).\n\nDecisions.\n\nAdd Sex feature to model training.\nComplete and add Embarked feature to model training.", "grid = sns.FacetGrid(train_df, col='Embarked')\ngrid.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette='deep')\ngrid.add_legend()", "Correlating categorical and numerical features\nWe may also want to correlate categorical features (with non-numeric values) and numeric features. We can consider correlating Embarked (Categorical non-numeric), Sex (Categorical non-numeric), Fare (Numeric continuous), with Survived (Categorical numeric).\nObservations.\n\nHigher fare paying passengers had better survival. Confirms our assumption for creating (#4) fare ranges.\nPort of embarkation correlates with survival rates. Confirms correlating (#1) and completing (#2).\n\nDecisions.\n\nConsider banding Fare feature.", "grid = sns.FacetGrid(train_df, col='Embarked', hue='Survived', palette={0: 'k', 1: 'w'})\ngrid.map(sns.barplot, 'Sex', 'Fare', alpha=.5, ci=None)\ngrid.add_legend()", "Wrangle data\nWe have collected several assumptions and decisions regarding our datasets and solution requirements. So far we did not have to change a single feature or value to arrive at these. Let us now execute our decisions and assumptions for correcting, creating, and completing goals.\nCorrecting by dropping features\nThis is a good starting goal to execute. By dropping features we are dealing with fewer data points. Speeds up our notebook and eases the analysis.\nBased on our assumptions and decisions we want to drop the Cabin (correcting #2) and Ticket (correcting #1) features.\nNote that where applicable we perform operations on both training and testing datasets together to stay consistent.", "train_df = train_df.drop(['Ticket', 'Cabin'], axis=1)\ntest_df = test_df.drop(['Ticket', 'Cabin'], axis=1)", "Creating new feature extracting from existing\nWe want to analyze if Name feature can be engineered to extract titles and test correlation between titles and survival, before dropping Name and PassengerId features.\nIn the following code we extract Title feature using regular expressions. The RegEx pattern (\\w+\\.) matches the first word which ends with a dot character within Name feature. The expand=False flag returns a DataFrame.\nObservations.\nWhen we plot Title, Age, and Survived, we note the following observations.\n\nMost titles band Age groups accurately. For example: Master title has Age mean of 5 years.\nSurvival among Title Age bands varies slightly.\nCertain titles mostly survived (Mme, Lady, Sir) or did not (Don, Rev, Jonkheer).\n\nDecision.\n\nWe decide to retain the new Title feature for model training.", "train_df['Title'] = train_df.Name.str.extract('(\\w+\\.)', expand=False)\nsns.barplot(hue=\"Survived\", x=\"Age\", y=\"Title\", data=train_df, ci=False)", "Let us extract the Title feature for the training dataset as well.\nThen we can safely drop the Name feature from training and testing datasets and the PassengerId feature from the training dataset.", "test_df['Title'] = test_df.Name.str.extract('(\\w+\\.)', expand=False)\n\ntrain_df = train_df.drop(['Name', 'PassengerId'], axis=1)\ntest_df = test_df.drop(['Name'], axis=1)\ntest_df.describe(include=['O'])", "Converting a categorical feature\nNow we can convert features which contain strings to numerical values. This is required by most model algorithms. Doing so will also help us in achieving the feature completing goal.\nLet us start by converting Sex feature to a new feature called Gender where female=1 and male=0.", "train_df['Gender'] = train_df['Sex'].map( {'female': 1, 'male': 0} ).astype(int)\ntrain_df.loc[:, ['Gender', 'Sex']].head()", "We do this both for training and test datasets.", "test_df['Gender'] = test_df['Sex'].map( {'female': 1, 'male': 0} ).astype(int)\ntest_df.loc[:, ['Gender', 'Sex']].head()", "We can now drop the Sex feature from our datasets.", "train_df = train_df.drop(['Sex'], axis=1)\ntest_df = test_df.drop(['Sex'], axis=1)\ntrain_df.head()", "Completing a numerical continuous feature\nNow we should start estimating and completing features with missing or null values. We will first do this for the Age feature.\nWe can consider three methods to complete a numerical continuous feature.\n\n\nA simple way is to generate random numbers between mean and standard deviation.\n\n\nMore accurate way of guessing missing values is to use other correlated features. In our case we note correlation among Age, Gender, and Pclass. Guess Age values using median values for Age across sets of Pclass and Gender feature combinations. So, median Age for Pclass=1 and Gender=0, Pclass=1 and Gender=1, and so on...\n\n\nCombine methods 1 and 2. So instead of guessing age values based on median, use random numbers between mean and standard deviation, based on sets of Pclass and Gender combinations.\n\n\nMethod 1 and 3 will introduce random noise into our models. The results from multiple executions might vary. We will prefer method 2.", "grid = sns.FacetGrid(train_df, col='Pclass', hue='Gender')\ngrid.map(plt.hist, 'Age', alpha=.5, bins=20)\ngrid.add_legend();", "Let us start by preparing an empty array to contain guessed Age values based on Pclass x Gender combinations.", "guess_ages = np.zeros((2,3))\nguess_ages", "Now we iterate over Gender (0 or 1) and Pclass (1, 2, 3) to calculate guessed values of Age for the six combinations.\nNote that we also tried creating the AgeFill feature using method 3 and realized during model stage that the correlation coeffficient of AgeFill is better when compared with the method 2.", "for i in range(0, 2):\n for j in range(0, 3):\n guess_df = train_df[(train_df['Gender'] == i) & \\\n (train_df['Pclass'] == j+1)]['Age'].dropna()\n \n # Correlation of AgeFill is -0.014850\n # age_mean = guess_df.mean()\n # age_std = guess_df.std()\n # age_guess = rnd.uniform(age_mean - age_std, age_mean + age_std)\n \n # Correlation of AgeFill is -0.011304\n age_guess = guess_df.median()\n\n # Convert random age float to nearest .5 age\n guess_ages[i,j] = int( age_guess/0.5 + 0.5 ) * 0.5\n \nguess_ages\n\ntrain_df['AgeFill'] = train_df['Age']\n\nfor i in range(0, 2):\n for j in range(0, 3):\n train_df.loc[ (train_df.Age.isnull()) & (train_df.Gender == i) & (train_df.Pclass == j+1),\\\n 'AgeFill'] = guess_ages[i,j]\n\ntrain_df[train_df['Age'].isnull()][['Gender','Pclass','Age','AgeFill']].head(10)", "We repeat the feature completing goal for the test dataset.", "guess_ages = np.zeros((2,3))\n\nfor i in range(0, 2):\n for j in range(0, 3):\n guess_df = test_df[(test_df['Gender'] == i) & \\\n (test_df['Pclass'] == j+1)]['Age'].dropna()\n\n # Correlation of AgeFill is -0.014850\n # age_mean = guess_df.mean()\n # age_std = guess_df.std()\n # age_guess = rnd.uniform(age_mean - age_std, age_mean + age_std)\n\n # Correlation of AgeFill is -0.011304\n age_guess = guess_df.median()\n\n guess_ages[i,j] = int( age_guess/0.5 + 0.5 ) * 0.5\n\ntest_df['AgeFill'] = test_df['Age']\n\nfor i in range(0, 2):\n for j in range(0, 3):\n test_df.loc[ (test_df.Age.isnull()) & (test_df.Gender == i) & (test_df.Pclass == j+1),\\\n 'AgeFill'] = guess_ages[i,j]\n\ntest_df[test_df['Age'].isnull()][['Gender','Pclass','Age','AgeFill']].head(10)", "We can now drop the Age feature from our datasets.", "train_df = train_df.drop(['Age'], axis=1)\ntest_df = test_df.drop(['Age'], axis=1)\ntrain_df.head()", "Create new feature combining existing features\nWe can create a new feature for FamilySize which combines Parch and SibSp. This will enable us to drop Parch and SibSp from our datasets.\nNote that we commented out this code as we realized during model stage that the combined feature is reducing the confidence score of our dataset instead of improving it. The correlation score of separate Parch feature is also better than combined FamilySize feature.", "# Logistic Regression Score is 0.81032547699214363\n# Parch correlation is -0.065878 and SibSp correlation is -0.370618\n\n# Decision: Retain Parch and SibSp as separate features\n\n# Logistic Regression Score is 0.80808080808080807\n# FamilySize correlation is -0.233974\n\n# train_df['FamilySize'] = train_df['SibSp'] + train_df['Parch']\n# test_df['FamilySize'] = test_df['SibSp'] + test_df['Parch']\n# train_df.loc[:, ['Parch', 'SibSp', 'FamilySize']].head(10)\n\n# train_df = train_df.drop(['Parch', 'SibSp'], axis=1)\n# test_df = test_df.drop(['Parch', 'SibSp'], axis=1)\n# train_df.head()", "We can also create an artificial feature combining Pclass and AgeFill.", "test_df['Age*Class'] = test_df.AgeFill * test_df.Pclass\ntrain_df['Age*Class'] = train_df.AgeFill * train_df.Pclass\ntrain_df.loc[:, ['Age*Class', 'AgeFill', 'Pclass']].head(10)", "Completing a categorical feature\nEmbarked feature takes S, Q, C values based on port of embarkation. Our training dataset has two missing values. We simply fill these with the most common occurance.", "freq_port = train_df.Embarked.dropna().mode()[0]\nfreq_port\n\ntrain_df['EmbarkedFill'] = train_df['Embarked']\ntrain_df.loc[train_df['Embarked'].isnull(), 'EmbarkedFill'] = freq_port\ntrain_df[train_df['Embarked'].isnull()][['Embarked','EmbarkedFill']].head(10)", "We can now drop the Embarked feature from our datasets.", "test_df['EmbarkedFill'] = test_df['Embarked']\ntrain_df = train_df.drop(['Embarked'], axis=1)\ntest_df = test_df.drop(['Embarked'], axis=1)\ntrain_df.head()", "Converting categorical feature to numeric\nWe can now convert the EmbarkedFill feature by creating a new numeric Port feature.", "Ports = list(enumerate(np.unique(train_df['EmbarkedFill'])))\nPorts_dict = { name : i for i, name in Ports } \ntrain_df['Port'] = train_df.EmbarkedFill.map( lambda x: Ports_dict[x]).astype(int)\n\nPorts = list(enumerate(np.unique(test_df['EmbarkedFill'])))\nPorts_dict = { name : i for i, name in Ports }\ntest_df['Port'] = test_df.EmbarkedFill.map( lambda x: Ports_dict[x]).astype(int)\n\ntrain_df[['EmbarkedFill', 'Port']].head(10)", "Similarly we can convert the Title feature to numeric enumeration TitleBand banding age groups with titles.", "Titles = list(enumerate(np.unique(train_df['Title'])))\nTitles_dict = { name : i for i, name in Titles } \ntrain_df['TitleBand'] = train_df.Title.map( lambda x: Titles_dict[x]).astype(int)\n\nTitles = list(enumerate(np.unique(test_df['Title'])))\nTitles_dict = { name : i for i, name in Titles } \ntest_df['TitleBand'] = test_df.Title.map( lambda x: Titles_dict[x]).astype(int)\n\ntrain_df[['Title', 'TitleBand']].head(10)", "Now we can safely drop the EmbarkedFill and Title features. We this we now have a dataset that only contains numerical values, a requirement for the model stage in our workflow.", "train_df = train_df.drop(['EmbarkedFill', 'Title'], axis=1)\ntest_df = test_df.drop(['EmbarkedFill', 'Title'], axis=1)\ntrain_df.head()", "Quick completing and converting a numeric feature\nWe can now complete the Fare feature for single missing value in test dataset using mode to get the value that occurs most frequently for this feature. We do this in a single line of code.\nNote that we are not creating an intermediate new feature or doing any further analysis for correlation to guess missing feature as we are replacing only a single value. The completion goal achieves desired requirement for model algorithm to operate on non-null values.\nWe may also want round off the fare to two decimals as it represents currency.", "test_df['Fare'].fillna(test_df['Fare'].dropna().median(), inplace=True)\n\ntrain_df['Fare'] = train_df['Fare'].round(2)\ntest_df['Fare'] = test_df['Fare'].round(2)\n\ntest_df.head(10)", "Model, predict and solve\nNow we are ready to train a model and predict the required solution. There are 60+ predictive modelling algorithms to choose from. We must understand the type of problem and solution requirement to narrow down to a select few models which we can evaluate. Our problem is a classification and regression problem. We want to identify relationship between output (Survived or not) with other variables or features (Gender, Age, Port...). We are also perfoming a category of machine learning which is called supervised learning as we are training our model with a given dataset. With these two criteria - Supervised Learning plus Classification and Regression, we can narrow down our choice of models to a few. These include:\n\nLogistic Regression\nKNN or k-Nearest Neighbors\nSupport Vector Machines\nNaive Bayes classifier\nDecision Tree\nRandom Forrest\nPerceptron\nArtificial neural network\nRVM or Relevance Vector Machine", "X_train = train_df.drop(\"Survived\", axis=1)\nY_train = train_df[\"Survived\"]\nX_test = test_df.drop(\"PassengerId\", axis=1).copy()\nX_train.shape, Y_train.shape, X_test.shape", "Logistic Regression is a useful model to run early in the workflow. Logistic regression measures the relationship between the categorical dependent variable (feature) and one or more independent variables (features) by estimating probabilities using a logistic function, which is the cumulative logistic distribution. Reference Wikipedia.\nNote the confidence score generated by the model based on our training dataset.", "# Logistic Regression\n\nlogreg = LogisticRegression()\nlogreg.fit(X_train, Y_train)\nY_pred = logreg.predict(X_test)\nacc_log = round(logreg.score(X_train, Y_train) * 100, 2)\nacc_log", "We can use Logistic Regression to validate our assumptions and decisions for feature creating and completing goals. This can be done by calculating the correlation coefficient for all features as these relate to survival.\n\nGender as expected has the highest corrlation with Survived.\nSurprisingly Fare ranks higher than Age.\nOur decision to extract TitleBand feature from name is a good one.\nThe artificial feature Age*Class scores well against existing features.\nWe tried creating a feature combining Parch and SibSp into FamilySize. Parch ended up with better correlation coefficient and FamilySize reduced our LogisticRegression confidence score.\nAnother surprise is that Pclass contributes least to our model, even worse than Port of embarkation, or the artificial feature Age*Class.", "coeff_df = pd.DataFrame(train_df.columns.delete(0))\ncoeff_df.columns = ['Feature']\ncoeff_df[\"Correlation\"] = pd.Series(logreg.coef_[0])\n\ncoeff_df.sort_values(by='Correlation', ascending=False)", "Next we model using Support Vector Machines which are supervised learning models with associated learning algorithms that analyze data used for classification and regression analysis. Given a set of training samples, each marked as belonging to one or the other of two categories, an SVM training algorithm builds a model that assigns new test samples to one category or the other, making it a non-probabilistic binary linear classifier. Reference Wikipedia.\nNote that the model generates a confidence score which is higher than Logistics Regression model.", "# Support Vector Machines\n\nsvc = SVC()\nsvc.fit(X_train, Y_train)\nY_pred = svc.predict(X_test)\nacc_svc = round(svc.score(X_train, Y_train) * 100, 2)\nacc_svc", "In pattern recognition, the k-Nearest Neighbors algorithm (or k-NN for short) is a non-parametric method used for classification and regression. A sample is classified by a majority vote of its neighbors, with the sample being assigned to the class most common among its k nearest neighbors (k is a positive integer, typically small). If k = 1, then the object is simply assigned to the class of that single nearest neighbor. Reference Wikipedia.\nKNN confidence score is better than Logistics Regression but worse than SVM.", "knn = KNeighborsClassifier(n_neighbors = 3)\nknn.fit(X_train, Y_train)\nY_pred = knn.predict(X_test)\nacc_knn = round(knn.score(X_train, Y_train) * 100, 2)\nacc_knn", "In machine learning, naive Bayes classifiers are a family of simple probabilistic classifiers based on applying Bayes' theorem with strong (naive) independence assumptions between the features. Naive Bayes classifiers are highly scalable, requiring a number of parameters linear in the number of variables (features) in a learning problem. Reference Wikipedia.\nThe model generated confidence score is the lowest among the models evaluated so far.", "# Gaussian Naive Bayes\n\ngaussian = GaussianNB()\ngaussian.fit(X_train, Y_train)\nY_pred = gaussian.predict(X_test)\nacc_gaussian = round(gaussian.score(X_train, Y_train) * 100, 2)\nacc_gaussian", "The perceptron is an algorithm for supervised learning of binary classifiers (functions that can decide whether an input, represented by a vector of numbers, belongs to some specific class or not). It is a type of linear classifier, i.e. a classification algorithm that makes its predictions based on a linear predictor function combining a set of weights with the feature vector. The algorithm allows for online learning, in that it processes elements in the training set one at a time. Reference Wikipedia.", "# Perceptron\n\nperceptron = Perceptron()\nperceptron.fit(X_train, Y_train)\nY_pred = perceptron.predict(X_test)\nacc_perceptron = round(perceptron.score(X_train, Y_train) * 100, 2)\nacc_perceptron\n\n# Linear SVC\n\nlinear_svc = LinearSVC()\nlinear_svc.fit(X_train, Y_train)\nY_pred = linear_svc.predict(X_test)\nacc_linear_svc = round(linear_svc.score(X_train, Y_train) * 100, 2)\nacc_linear_svc\n\n# Stochastic Gradient Descent\n\nsgd = SGDClassifier()\nsgd.fit(X_train, Y_train)\nY_pred = sgd.predict(X_test)\nacc_sgd = round(sgd.score(X_train, Y_train) * 100, 2)\nacc_sgd", "This model uses a decision tree as a predictive model which maps features (tree branches) to conclusions about the target value (tree leaves). Tree models where the target variable can take a finite set of values are called classification trees; in these tree structures, leaves represent class labels and branches represent conjunctions of features that lead to those class labels. Decision trees where the target variable can take continuous values (typically real numbers) are called regression trees. Reference Wikipedia.\nThe model confidence score is the highest among models evaluated so far.", "# Decision Tree\n\ndecision_tree = DecisionTreeClassifier()\ndecision_tree.fit(X_train, Y_train)\nY_pred = decision_tree.predict(X_test)\nacc_decision_tree = round(decision_tree.score(X_train, Y_train) * 100, 2)\nacc_decision_tree", "The next model Random Forests is one of the most popular. Random forests or random decision forests are an ensemble learning method for classification, regression and other tasks, that operate by constructing a multitude of decision trees (n_estimators=100) at training time and outputting the class that is the mode of the classes (classification) or mean prediction (regression) of the individual trees. Reference Wikipedia.\nThe model confidence score is the highest among models evaluated so far. We decide to use this model's output (Y_pred) for creating our competition submission of results.", "# Random Forest\n\nrandom_forest = RandomForestClassifier(n_estimators=100)\nrandom_forest.fit(X_train, Y_train)\nY_pred = random_forest.predict(X_test)\nrandom_forest.score(X_train, Y_train)\nacc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)\nacc_random_forest", "Model evaluation\nWe can now rank our evaluation of all the models to choose the best one for our problem. While both Decision Tree and Random Forest score the same, we choose to use Random Forest as they correct for decision trees' habit of overfitting to their training set.", "models = pd.DataFrame({\n 'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression', \n 'Random Forest', 'Naive Bayes', 'Perceptron', \n 'Stochastic Gradient Decent', 'Linear SVC', \n 'Decision Tree'],\n 'Score': [acc_svc, acc_knn, acc_log, \n acc_random_forest, acc_gaussian, acc_perceptron, \n acc_sgd, acc_linear_svc, acc_decision_tree]})\nmodels.sort_values(by='Score', ascending=False)\n\nsubmission = pd.DataFrame({\n \"PassengerId\": test_df[\"PassengerId\"],\n \"Survived\": Y_pred\n })\nsubmission.to_csv('data/titanic-kaggle/submission.csv', index=False)", "Our submission to the competition site Kaggle results in scoring 3,883 of 6,082 competition entries. This result is indicative while the competition is running. This result only accounts for part of the submission dataset. Not bad for our first attempt. Any suggestions to improve our score are most welcome.\nReferences\nThis notebook has been created based on great work done solving the Titanic competition and other sources.\n\nA journey through Titanic\nGetting Started with Pandas: Kaggle's Titanic Competition\nTitanic Best Working Classifier" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mannyfin/IRAS
Type C calibrations/TypeC calcs corrected.ipynb
bsd-3-clause
[ "The situation\nType C thermocouples are not NIST calibrated to below 273.15 K. For my research specific scenario, I need to cool my sample (Molybdenum) to cryogenic temperatures and also anneal to very high ~2000 K. There is no thermocouple with these properties. \nThe solution\nWe know that Type K thermocouples are accurate down to cryogenic temperatures. So what I've done here is to read the Type K temperature and record the corresponding Type C mV to create a calibration table. Both thermocouples were spot welded to a large mass very close to one another to ensure the temperature readings will be accurate.\nThen I will use a polynomial fit to get the low T calibration for the Type C thermocouple.", "# import a few packages\n%matplotlib notebook\nfrom thermocouples_reference import thermocouples\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sympy as sp\nfrom scipy import optimize, interpolate, signal\n\n\ntypeC=thermocouples['C']\n\n# make sure you are in the same dir as the file\n# read in the file and drop Na cols\ndf = pd.read_excel('Type C Table 4-2-18.xlsx')\ndf.dropna(axis=1, inplace=True)\ndf.head()\n\n# NIST has values calibrated for T > 273.15 K, lets find the Tref based on these points\n# I am using Kelvin for all T. The CJC is quoted in deg C.\ntempdf = df.query('T>273.15')\ntempdf.head()\n\n# Let's find the T_ref by using this function to take the TypeC mV and the T to find the Tref\ndef find_Tref(mV, T):\n x = np.arange(290, 301, 0.01)\n x = x[::-1] # lets reverse x\n i = 1\n while typeC.inverse_KmV(mV, Tref=x[i]) - T >= 0:\n i += 1\n# print(x[i])\n return x[i]\n\n# This isn't the fastest way to do things, but since its just a short amount of rows, lets iterate over the mV and T\n# to find Tref\nTreflist=[]\nfor idx in tempdf.index:\n # print(idx)\n Treflist.append(find_Tref(mV=tempdf['TypeCmV'][idx], T=tempdf['T'][idx]))\n\n\nprint( ['%0.2f'% x for x in Treflist])\n\n# now average the Trefs:\navg_Tref = np.mean(Treflist)\nprint(avg_Tref)\n# I will use this Tref for further calcs\nTref_emf = typeC.emf_mVK(avg_Tref)\nprint(Tref_emf)\n# The Tref_emf value is very close to the value in the table at 273.15 K, so we'll use this value to correct the new values\n# The value taken at 273.15 K was during the cooling process and is likely to be less accurate than the room temperature value\n# across these multiple observations\n\n# The emf correction for 273.15 K is then: calibrated_emf = raw_emf + Tref_emf\n\n# Let's add this to the df we initially imported...\n\ndf['TypeC_calib_mV'] = df['TypeCmV'] + Tref_emf\n\ndf.head()\n# Compared to the NIST table, we appear to be off at most a little less than 1 deg K\n\n\n# Had we used the CJC temperature as a proxy for room temp, we would've been even more off.\n# compare the TypeCmV using Tref = CJC vs using Tref = 294.67:\nprint(typeC.emf_mVK(291.22, Tref =(25.26+273.15)))\nprint(typeC.emf_mVK(291.22, Tref =avg_Tref))\n\n# Let's visualize these results\nplt.plot(df['T'], df['TypeC_calib_mV'], 'o', ms=0.5 )\nplt.xlabel('Temperature (K)')\nplt.ylabel('Type C calibrated emf (mV)')\n\n# Interesting. I cooled first to LN2 temperatures and then allowed the sample to heat up slowly by evaporating LN2\n# The data agrees fairly well (within ~3 K) between the heating and cooling curves. I didn't heat all the way back up.\n\n# Now lets fit the data to a polynowmial using least squares\nfit_coeffs = np.polyfit(df['T'],df['TypeC_calib_mV'], deg = 10 , full=True)\n# print(fit_coeffs)\nfit_poly = np.poly1d(fit_coeffs[0])\nprint(fit_poly)\n\n\nfig, ax = plt.subplots()\nax.plot(df['T'], df['TypeC_calib_mV'],'o',ms='0.5')\nax.plot(df['T'], fit_poly(df['T']) , 'o', ms='0.5')\n", "The 10th degree polynomial appears to give the best fit overall. \nThe lower order polynomials dont fit the curve exceedingly well below 100 K\nAlso, the polynomial tracks the heating curve (the slightly higher mV points from 80-150K) a little more closely than the cooling curve (295 to 80 K). Heating occurred much more slowly than cooling, so I expect it to me more accurate anyways.", "# These mV values are also close ~0.5 degrees K of one another\nprint(fit_poly(273.15)) # fit\nprint(typeC.emf_mVK(273.15)) # NIST value ", "It's also a good idea to check that the polynomial does not have any inflection points, at least in the area we are interested in using the polynomial (77 K - 273.15 K). We can use the second derivative test to see if this will be important for our case.", "x = sp.symbols('x')\npolynom = sp.Poly(fit_coeffs[0],x)\n# print(fit_coeffs[0])\n# find the second derivative of the polynomial\nsecond_derivative = polynom.diff(x,x)\nprint(second_derivative)\nsp.solve(second_derivative,x, domain= sp.S.Reals)\n\nprint(second_derivative.evalf(subs={x:77}))\nprint(second_derivative.evalf(subs={x:80}))\nprint('\\n')\nprint(second_derivative.evalf(subs={x:120}))\nprint(second_derivative.evalf(subs={x:125}))\nprint('\\n')\nprint(second_derivative.evalf(subs={x:135}))\nprint(second_derivative.evalf(subs={x:145}))\nprint('\\n')\nprint(second_derivative.evalf(subs={x:283}))\nprint(second_derivative.evalf(subs={x:291}))\n\nfirst_deriv = polynom.diff(x)\nprint(first_deriv)\nsp.solve(first_deriv,x, domain= sp.S.Reals)\n\nprint(first_deriv.evalf(subs={x:80}))\nprint(first_deriv.evalf(subs={x:84}))", "Well this is not optimal-- there exists a local minimum at 83.86 K in our polynomial fit. We can attempt to fit an exponential curve to this very low temperature data and append this to the polynomial function.", "lowT_df = df.query('T<103')\n# Now lets fit the data to an exponential\n# print(np.min(lowT_df['TypeC_calib_mV']))\ndef func(x, a, b, c, d):\n return a * np.exp(b * x - c) + d\n\nfit_coeffs = optimize.curve_fit(func, lowT_df['T'],lowT_df['TypeC_calib_mV'], p0=(1, 1, 90, -3))\nprint(fit_coeffs)\na = fit_coeffs[0][0]\nb = fit_coeffs[0][1]\nc = fit_coeffs[0][2]\nd = fit_coeffs[0][3]\nexpfunc = func(lowT_df['T'],a,b,c,d)\n\nfig3, ax3 = plt.subplots()\n# ax3.plot(lowT_df['T'], a*np.exp(b*lowT_df['TypeC_calib_mV']), 'o',ms='0.5')\nax3.plot(lowT_df['T'], lowT_df['TypeC_calib_mV'], 'o',ms='0.5')\nax3.plot(lowT_df['T'], expfunc, 'o',ms='0.5',color='r')", "This appears to be a better fit than the polynomial in this regime. Now lets concatenate these two functions and interpolate near the points around 100 K to smooth things out if necessary. Recall that the two functions are fit_poly and expfunc", "# select data from 103 to 120 K just so we can see the point of intersection a little better\ncheckT_df = df.query('77<=T<=120') \nfig4, ax4 = plt.subplots()\nax4.plot(checkT_df['T'], fit_poly(checkT_df['T']), 'o', ms=0.5, label='polyfit', color='g')\nax4.plot(lowT_df['T'], expfunc, 'o', ms=0.5, label='expfunc', color='r')\nax4.plot(df['T'], df['TypeC_calib_mV'],'o',ms='0.5', label='Data', color='b')\nax4.set_xlim([80,110])\nax4.set_ylim([-1.88,-1.75])\nax4.legend()", "The two fitted plots almost match near 103 K, but there is a little 'cusp'-like shape near the point of intersection. Let's smooth it out. Also, notice that the expfunc fit is a little better than the polyfit.", "def switch_fcn(x, switchpoint, smooth):\n s = 0.5 + 0.5*np.tanh((x - switchpoint)/smooth)\n return s\nsw = switch_fcn(df['T'], 103, 0.2)\nexpfunc2 = func(df['T'],a,b,c,d)\nlen(expfunc2)\n\nfig, ax = plt.subplots()\nax.plot(df['T'], sw,'o', ms=0.5)\n\n\ndef combined(switch, low_f1, high_f2):\n comb = (1-switch)*low_f1 + switch*high_f2\n return comb\ncomb_fcn = combined(sw, expfunc2,fit_poly(df['T']))\nfig, ax = plt.subplots()\nax.plot(df['T'], comb_fcn, 'o', ms=0.5)\n\nfig5, ax5 = plt.subplots()\nax5.plot(df['T'],comb_fcn, 'o', ms=2, label='combined')\nax5.plot(checkT_df['T'], fit_poly(checkT_df['T']), 'o', ms=0.5, label='polyfit', color='g')\nax5.plot(lowT_df['T'], expfunc, 'o', ms=0.5, label='expfunc2', color='r')\nax5.set_xlim([80,110])\nax5.set_ylim([-1.88,-1.75])\nax5.legend()", "Now I will take the polynomial and take the values from 77 K to 273 K for calibration and append them to the NIST values", "# low temperature array\nlow_temp = np.arange(77.15,273.15, 0.1)\n# low_temp_calib = fit_poly(low_temp)\nlow_temp_calib = combined(switch_fcn(low_temp, 103, 3), func(low_temp,a,b,c,d), fit_poly(low_temp))\n\n# high temperature array\nhigh_temp = np.arange(273.15,2588.15, 0.1)\nhigh_temp_nist = typeC.emf_mVK(high_temp)\n\n# concatentate and put into a dataframe and output to excel\nTemperature = np.concatenate([low_temp, high_temp])\nTypeC_mV = np.concatenate([low_temp_calib, high_temp_nist])\n\ntypeC_calibration = pd.DataFrame(data=TypeC_mV, index=Temperature, dtype='float32', columns = ['Type C (mV)'])\ntypeC_calibration.index.name = 'Temperature (Kelvin)'\n\nprint(typeC_calibration.head())\nprint(typeC_calibration.tail())\n\n# Uncomment these lines and run the cell to output a calibration table\n# write to excel\n\n# xlwrite = pd.ExcelWriter('Type C calibration_low_res.xlsx')\n# typeC_calibration.to_excel(xlwrite)\n# xlwrite.save()", "But wait! Suppose we also want to fix that discontinuity at 273.15 K? We can apply the same procudure as before.\n1. Apply a tanh(x) function: $switch = 0.5 + 0.5np.tanh((x - switchpoint)/smooth)$\n2. Combine both functions: $comb = (1-switch)f1 + (switch)*f2 $", "\nlow_calib = combined(switch_fcn(Temperature, 103, 3), func(Temperature,a,b,c,d), fit_poly(Temperature))\n\nhigh_calib = pd.DataFrame(index=high_temp, data=high_temp_nist,columns=['mV'])\ndummy_df = pd.DataFrame(index=low_temp, data=np.zeros(len(low_temp)),columns=['mV'])\nconcat_high_calib = dummy_df.append(high_calib)\nprint(concat_high_calib.loc[272.9:273.5])\n\nfreezept_calib = combined(switch_fcn(Temperature, 273.15, 0.45), low_calib, concat_high_calib['mV'] )\nfreezept_calib.index.name = 'T'\n\nfreezept_calib.loc[272.9:273.5]", "The prior value at 273.15 K was -0.00867, when the actual value is 0. After the smoothing, the new value is -0.004336, about half of the prior value. Some of the values a little after 273.15 do not match exactly with the NIST table, but it is much better than the jump that we had before.", "fig, ax = plt.subplots()\nfreezept_calib.plot(ax=ax, label ='combined')\nax.plot(Temperature,low_calib, label = 'low calib')\nax.plot(Temperature,concat_high_calib, label= 'high_calib')\nax.set_ylim([-.04,0.04])\nax.set_xlim([268,277])\nax.legend()\n\nprint(signal.argrelmin(freezept_calib.values))\n# print(signal.argrelextrema(freezept_calib.values,np.less))\n# print(signal.argrelextrema(freezept_calib.values,np.greater))\n# No local maxima or minima!\n\n# How about candidates for inflection points?\n\ndf = np.gradient(freezept_calib,0.1,)\nfig, ax = plt.subplots()\nax.plot(Temperature, df)\n\nd2f = np.gradient(df, 0.1)\nax.plot(Temperature, d2f)\n\nd2f[:5]\n\n# Uncomment these lines and run the cell to output a calibration table\n# write to excel\n\nxlwrite = pd.ExcelWriter('Type C calibration_corrected_temp.xlsx')\n# freezept_calib is a Series, not a Dataframe, so use the line below\nfreezept_calib.to_frame().to_excel(xlwrite)\nxlwrite.save()\n\n# 4212018\nfig, ax = plt.subplots()\nax.plot(Temperature,TypeC_mV)\n\n# np.fft.fft(TypeC_mV)\n\ndef scaled_data(data, low_range=0, high_range=1, standardize=False, print_=False):\n \"\"\"\n scale data from input range to (low_range,high_range)\n assumes data is a np 1d array\n also allows capability for standardization (mean=0 and variance = 1)\n \"\"\"\n _min_ = np.min(data)\n _max_ = np.max(data)\n \n if standardize is True:\n data = data - np.mean(data) #remove mean\n scaled_data = data/np.std(data) #unit std\n if print_ is True:\n print('mean: '+str(np.mean(scaled_data)))\n print('std: '+str(np.std(scaled_data)))\n else:\n scaled_data = (high_range - low_range)*(data - _min_)/(_max_ - _min_) + low_range\n if print_ is True:\n print('max: = '+str(np.max(scaled_data)))\n print('min: = '+str(np.min(scaled_data)))\n \n return scaled_data\n\ndef revert_to_unscaled(scaled, original):\n \"\"\"\n reverts normzlied data back to original scaling\n \"\"\"\n scaled_min = np.min(scaled)\n scaled_max = np.max(scaled)\n orig_min = np.min(original)\n orig_max = np.max(original)\n \n data = (scaled - scaled_min)*(orig_max - orig_min)/(scaled_max - scaled_min) + orig_min\n \n return data\n\nnormmV = scaled_data(TypeC_mV, standardize=False, print_=True)\nnormT = scaled_data(Temperature, standardize=False, print_=True)\nstdmV = scaled_data(TypeC_mV, standardize=True, print_=True)\nstdT = scaled_data(Temperature, standardize=True, print_=True)\n\nfig,ax = plt.subplots()\nax.plot(stdmV, stdT, label ='std')\nplt.xlabel('stdmV')\nplt.ylabel('stdT')\n\nfig,ax = plt.subplots()\nax.plot(normmV, normT, label ='norm')\nplt.xlabel('normmV')\nplt.ylabel('normT')\n\ndef fit_fcn(mV, param):\n return param[0]*np.tanh(param[1]*mV+param[2]) +param[3]\nfig, ax = plt.subplots()\n\nax.plot(normmV,normT, label='norm')\nax.plot(normmV,fit_fcn(normmV, param=[1,0.1,-1, 1]), label='start')\ntry:\n# popt, pcov = optimize.differential_evolution(fit_fcn, normmV, normT, p0=(2,1,-1,2), method='lm')\n bounds = [slice(0,100.,1), slice(-10.,100.,1), slice(-10.,100.,1),slice(-10.,100.,1)]\n# result = optimize.differential_evolution(lambda param:np.sum(fit_fcn(normmV, param) - normT)**2,bounds)\n# result = optimize.basinhopping(lambda param:np.sum(fit_fcn(normmV, param) - normT)**2,bounds)\n result = optimize.brute(lambda param:np.sum(fit_fcn(normmV, param) - normT)**2,bounds)\n# result = optimize.least_squares(lambda param:np.sum(fit_fcn(normmV, param) - normT)**2,[1,1,-1, 1])\n\n print(result)\n ax.plot(normmV,fit_fcn(normmV, result), label='fitted')\nexcept RuntimeError:\n print(\"curve fit failed\")\n\nplt.legend()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
pxcandeias/py-notebooks
FRF_plots.ipynb
mit
[ "<a id='top'></a>\nFrequency Response Functions (FRFs) plots\nThis notebook is about frequency response functions (FRFs) and the various ways they can be plotted.\nTable of contents\nPreamble\nDynamic system setup\nFrequency response function\nNyquist plot\nBode plot\nNichols plot\nOdds and ends\nPreamble\nWe will start by setting up the computational environment for this notebook. Since it was created with Python 2.7, we will import a few things from the \"future\". Furthermore, we will need numpy and scipy for the numerical simulations and matplotlib for the plots:", "from __future__ import division, print_function\n\nimport sys\nimport numpy as np\nimport scipy as sp\nimport matplotlib as mpl\n\nprint('System: {}'.format(sys.version))\nprint('numpy version: {}'.format(np.__version__))\nprint('scipy version: {}'.format(sp.__version__))\nprint('matplotlib version: {}'.format(mpl.__version__))", "We will also need some specific modules and a litle \"IPython magic\" to show the plots:", "from numpy import linalg as LA\nfrom scipy import signal\nimport matplotlib.pyplot as plt\n\n%matplotlib inline", "Back to top\nDynamic system setup\nIn this example we will simulate a two degree of freedom system (2DOF) as a LTI system. For that purpose, we will define a mass and a stiffness matrix and use proportional damping:", "MM = np.asmatrix(np.diag([1., 2.]))\nprint(MM)\n\nKK = np.asmatrix([[20., -10.],[-10., 10.]])\nprint(KK)\n\nC1 = 0.1*MM+0.02*KK\nprint(C1)", "For the LTI system we will use a state space formulation. For that we will need the four matrices describing the system (A), the input (B), the output (C) and the feedthrough (D):", "A = np.bmat([[np.zeros_like(MM), np.identity(MM.shape[0])], [LA.solve(-MM,KK), LA.solve(-MM,C1)]])\nprint(A)\n\nBf = KK*np.asmatrix(np.ones((2, 1)))\nB = np.bmat([[np.zeros_like(Bf)],[LA.solve(MM,Bf)]])\nprint(B)\n\nCd = np.matrix((1,0))\nCv = np.asmatrix(np.zeros((1,MM.shape[1])))\nCa = np.asmatrix(np.zeros((1,MM.shape[1])))\nC = np.bmat([Cd-Ca*LA.solve(MM,KK),Cv-Ca*LA.solve(MM,C1)])\nprint(C)\n\nD = Ca*LA.solve(MM,Bf)\nprint(D)", "The LTI system is simply defined as:", "system = signal.lti(A, B, C, D)", "To check the results presented ahead we will need the angular frequencies and damping coefficients of this system. The eigenanalysis of the system matrix yields them after some computations:", "w1, v1 = LA.eig(A)\nix = np.argsort(np.absolute(w1)) # order of ascending eigenvalues\nw1 = w1[ix] # sorted eigenvalues\nv1 = v1[:,ix] # sorted eigenvectors\nzw = -w1.real # damping coefficient time angular frequency\nwD = w1.imag # damped angular frequency\nzn = 1./np.sqrt(1.+(wD/-zw)**2) # the minus sign is formally correct!\nwn = zw/zn # undamped angular frequency\nprint('Angular frequency: {}'.format(wn[[0,2]]))\nprint('Damping coefficient: {}'.format(zn[[0,2]]))", "Back to top\nFrequency response function\nA frequency response function is a complex valued function of frequency. Let us see how it looks when we plot the real and imaginary parts in separate:", "w, H = system.freqresp()\nfig, ax = plt.subplots(2, 1)\nfig.suptitle('Real and imaginary plots')\n# Real part plot\nax[0].plot(w, H.real, label='FRF')\nax[0].axvline(wn[0], color='k', label='First mode', linestyle='--')\nax[0].axvline(wn[2], color='k', label='Second mode', linestyle='--')\nax[0].set_ylabel('Real [-]')\nax[0].grid(True)\nax[0].legend()\n# Imaginary part plot\nax[1].plot(w, H.imag, label='FRF')\nax[1].axvline(wn[0], color='k', label='First mode', linestyle='--')\nax[1].axvline(wn[2], color='k', label='Second mode', linestyle='--')\nax[1].set_ylabel('Imaginary [-]')\nax[1].set_xlabel('Frequency [rad/s]')\nax[1].grid(True)\nax[1].legend()\nplt.show()", "Back to top\nNyquist plot\nA Nyquist plot represents the real and imaginary parts of the complex FRF in a single plot:", "plt.figure()\nplt.title('Nyquist plot')\nplt.plot(H.real, H.imag, 'b')\nplt.plot(H.real, -H.imag, 'r')\nplt.xlabel('Real [-]')\nplt.ylabel('Imaginary[-]')\nplt.grid(True)\nplt.axis('equal')\nplt.show()", "Back to top\nBode plot\nA Bode plot represents the complex FRF in magnitude-phase versus frequency:", "w, mag, phase = system.bode()\nfig, ax = plt.subplots(2, 1)\nfig.suptitle('Bode plot')\n# Magnitude plot\nax[0].plot(w, mag, label='FRF')\nax[0].axvline(wn[0], color='k', label='First mode', linestyle='--')\nax[0].axvline(wn[2], color='k', label='Second mode', linestyle='--')\nax[0].set_ylabel('Magnitude [dB]')\nax[0].grid(True)\nax[0].legend()\n# Phase plot\nax[1].plot(w, phase*np.pi/180., label='FRF')\nax[1].axvline(wn[0], color='k', label='First mode', linestyle='--')\nax[1].axvline(wn[2], color='k', label='Second mode', linestyle='--')\nax[1].set_ylabel('Phase [rad]')\nax[1].set_xlabel('Frequency [rad/s]')\nax[1].grid(True)\nax[1].legend()\nplt.show()", "Back to top\nNichols plot\nA Nichols plot combines the Bode plot in a single plot of magnitude versus phase:", "plt.figure()\nplt.title('Nichols plot')\nplt.plot(phase*np.pi/180., mag)\nplt.xlabel('Phase [rad/s]')\nplt.ylabel('Magnitude [dB]')\nplt.grid(True)\nplt.show()", "Back to top\nOdds and ends\nThis notebook was created by Paulo Xavier Candeias.\nBack to top" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
lithiumdenis/MLSchool
2. Бостон.ipynb
mit
[ "import pandas as pd\nimport numpy as np", "Загрузим данные", "from sklearn.datasets import load_boston\n\nbunch = load_boston()\n\nprint(bunch.DESCR)\n\nX, y = pd.DataFrame(data=bunch.data, columns=bunch.feature_names.astype(str)), bunch.target\n\nX.head()", "Зафиксируем генератор случайных чисел для воспроизводимости:", "SEED = 22\nnp.random.seed = SEED", "Домашка!\nРазделим данные на условно обучающую и отложенную выборки:", "from sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=SEED)\n\nX_train.shape, y_train.shape, X_test.shape, y_test.shape", "Измерять качество будем с помощью метрики среднеквадратичной ошибки:", "from sklearn.metrics import mean_squared_error", "<div class=\"panel panel-info\" style=\"margin: 50px 0 0 0\">\n <div class=\"panel-heading\">\n <h3 class=\"panel-title\">Задача 1.</h3> \n </div>\n <div class=\"panel\">\n Обучите <b>LinearRegression</b> из пакета <b>sklearn.linear_model</b> на обучающей выборке (<i>X_train, y_train</i>) и измерьте качество на <i>X_test</i>.\n <br>\n <br>\n <i>P.s. Ошибка должна быть в районе 20. </i>\n </div>\n</div>", "from sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import cross_val_score\n\nclf = LinearRegression()\nclf.fit(X_train, y_train);\n\nprint('Вышла средняя ошибка, равная %5.4f' % \\\n (-np.mean(cross_val_score(clf, X_test, y_test, cv=5, scoring='neg_mean_squared_error'))))", "<div class=\"panel panel-info\" style=\"margin: 50px 0 0 0\">\n <div class=\"panel-heading\">\n <h3 class=\"panel-title\">Задача 2. (с подвохом)</h3> \n </div>\n <div class=\"panel\">\n Обучите <b>SGDRegressor</b> из пакета <b>sklearn.linear_model</b> на обучающей выборке (<i>X_train, y_train</i>) и измерьте качество на <i>X_test</i>.\n </div>\n</div>", "from sklearn.linear_model import SGDRegressor\nfrom sklearn.preprocessing import StandardScaler\n\nss = StandardScaler()\nX_scaled = ss.fit_transform(X_train)\ny_scaled = ss.fit_transform(y_train)\n\nsgd = SGDRegressor()\nsgd.fit(X_scaled, y_scaled);\n\nprint('Вышла средняя ошибка, равная %5.4f' % \\\n (-np.mean(cross_val_score(sgd, X_scaled, y_scaled, cv=5, scoring='neg_mean_squared_error'))))", "<div class=\"panel panel-info\" style=\"margin: 50px 0 0 0\">\n <div class=\"panel-heading\">\n <h3 class=\"panel-title\">Задача 3.</h3>\n </div>\n <div class=\"panel\">\n Попробуйте все остальные классы:\n <ul>\n <li>Ridge\n <li>Lasso\n <li>ElasticNet\n </ul>\n\n <br>\n\n В них, как вам уже известно, используются параметры регуляризации <b>alpha</b>. Настройте его как с помощью <b>GridSearchCV</b>, так и с помощью готовых <b>-CV</b> классов (<b>RidgeCV</b>, <b>LassoCV</b> и т.д.).\n\n <br><br>\n\n Найдите уже, в конце-концов, самую точную линейную модель!\n\n </div>\n</div>", "from sklearn.preprocessing import StandardScaler, PolynomialFeatures\nfrom sklearn.pipeline import Pipeline, make_pipeline\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.linear_model import RidgeCV\n\n############Ridge\nparams = { \n 'alpha': [10**x for x in range(-2,3)]\n}\n\nfrom sklearn.linear_model import Ridge\n\ngsR = RidgeCV() #GridSearchCV(Ridge(), param_grid=params)\ngsR.fit(X_train, y_train);\n\nprint('Вышла средняя ошибка, равная %5.4f' % \\\n (-np.mean(cross_val_score(gsR, X_test, y_test, cv=5, scoring='neg_mean_squared_error'))))\n\n\n\n############Lasso\nfrom sklearn.linear_model import Lasso\nfrom sklearn.linear_model import LassoCV\n\ngsL = GridSearchCV(Lasso(), param_grid=params) #LassoCV() - медленнее\ngsL.fit(X_train, y_train);\n\nprint('Вышла средняя ошибка, равная %5.4f' % \\\n (-np.mean(cross_val_score(gsL, X_test, y_test, cv=5, scoring='neg_mean_squared_error'))))\n\n\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.linear_model import ElasticNetCV\n\ngsE = GridSearchCV(ElasticNet(), param_grid=params) #ElasticNetCV() - просто заменить, не слишком точен\ngsE.fit(X_train, y_train);\n\nprint('Вышла средняя ошибка, равная %5.4f' % \\\n (-np.mean(cross_val_score(gsE, X_test, y_test, cv=5, scoring='neg_mean_squared_error'))))", "Итого самый точный среди этих трёх - GridSearchCV + Lasso\n<div class=\"panel panel-info\" style=\"margin: 50px 0 0 0\">\n <div class=\"panel-heading\">\n <h3 class=\"panel-title\">Задача 4.</h3>\n </div>\n <div class=\"panel\">\n Проверять качество правильно на кросс-валидации, как известно. Вы знаете, что делать: подключаем <b>cross_val_score</b> из <b>sklearn.model_selection</b>. Параметр <b>cv</b> установите равным 5.\n <br><br>\n Вспомните про все штуки, которым мы с вами научились.\n <br><br>\n Добейтесь <b>MSE < 27</b>.\n </div>\n</div>\n\nOops! Все случаи уже были рассмотрены для cross_val_score" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ES-DOC/esdoc-jupyterhub
notebooks/bnu/cmip6/models/sandbox-3/land.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Land\nMIP Era: CMIP6\nInstitute: BNU\nSource ID: SANDBOX-3\nTopic: Land\nSub-Topics: Soil, Snow, Vegetation, Energy Balance, Carbon Cycle, Nitrogen Cycle, River Routing, Lakes. \nProperties: 154 (96 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:53:41\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'bnu', 'sandbox-3', 'land')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Conservation Properties\n3. Key Properties --&gt; Timestepping Framework\n4. Key Properties --&gt; Software Properties\n5. Grid\n6. Grid --&gt; Horizontal\n7. Grid --&gt; Vertical\n8. Soil\n9. Soil --&gt; Soil Map\n10. Soil --&gt; Snow Free Albedo\n11. Soil --&gt; Hydrology\n12. Soil --&gt; Hydrology --&gt; Freezing\n13. Soil --&gt; Hydrology --&gt; Drainage\n14. Soil --&gt; Heat Treatment\n15. Snow\n16. Snow --&gt; Snow Albedo\n17. Vegetation\n18. Energy Balance\n19. Carbon Cycle\n20. Carbon Cycle --&gt; Vegetation\n21. Carbon Cycle --&gt; Vegetation --&gt; Photosynthesis\n22. Carbon Cycle --&gt; Vegetation --&gt; Autotrophic Respiration\n23. Carbon Cycle --&gt; Vegetation --&gt; Allocation\n24. Carbon Cycle --&gt; Vegetation --&gt; Phenology\n25. Carbon Cycle --&gt; Vegetation --&gt; Mortality\n26. Carbon Cycle --&gt; Litter\n27. Carbon Cycle --&gt; Soil\n28. Carbon Cycle --&gt; Permafrost Carbon\n29. Nitrogen Cycle\n30. River Routing\n31. River Routing --&gt; Oceanic Discharge\n32. Lakes\n33. Lakes --&gt; Method\n34. Lakes --&gt; Wetlands \n1. Key Properties\nLand surface key properties\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of land surface model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of land surface model code (e.g. MOSES2.2)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of the processes modelled (e.g. dymanic vegation, prognostic albedo, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.4. Land Atmosphere Flux Exchanges\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nFluxes exchanged with the atmopshere.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"water\" \n# \"energy\" \n# \"carbon\" \n# \"nitrogen\" \n# \"phospherous\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.5. Atmospheric Coupling Treatment\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the treatment of land surface coupling with the Atmosphere model component, which may be different for different quantities (e.g. dust: semi-implicit, water vapour: explicit)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.6. Land Cover\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nTypes of land cover defined in the land surface model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.land_cover') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"bare soil\" \n# \"urban\" \n# \"lake\" \n# \"land ice\" \n# \"lake ice\" \n# \"vegetated\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.7. Land Cover Change\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe how land cover change is managed (e.g. the use of net or gross transitions)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.land_cover_change') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.8. Tiling\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general tiling procedure used in the land surface (if any). Include treatment of physiography, land/sea, (dynamic) vegetation coverage and orography/roughness", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Conservation Properties\nTODO\n2.1. Energy\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how energy is conserved globally and to what level (e.g. within X [units]/year)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.conservation_properties.energy') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.2. Water\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how water is conserved globally and to what level (e.g. within X [units]/year)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.conservation_properties.water') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.3. Carbon\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe if/how carbon is conserved globally and to what level (e.g. within X [units]/year)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.conservation_properties.carbon') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Timestepping Framework\nTODO\n3.1. Timestep Dependent On Atmosphere\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs a time step dependent on the frequency of atmosphere coupling?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "3.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverall timestep of land surface model (i.e. time between calls)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.3. Timestepping Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of time stepping method and associated time step(s)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Software Properties\nSoftware properties of land surface code\n4.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5. Grid\nLand surface grid\n5.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of the grid in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Grid --&gt; Horizontal\nThe horizontal grid in the land surface\n6.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general structure of the horizontal grid (not including any tiling)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.horizontal.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.2. Matches Atmosphere Grid\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the horizontal grid match the atmosphere?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "7. Grid --&gt; Vertical\nThe vertical grid in the soil\n7.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general structure of the vertical grid in the soil (not including any tiling)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.vertical.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Total Depth\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe total depth of the soil (in metres)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.grid.vertical.total_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "8. Soil\nLand surface soil\n8.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of soil in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Heat Water Coupling\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the coupling between heat and water in the soil", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_water_coupling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.3. Number Of Soil layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe number of soil layers", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.number_of_soil layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "8.4. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the soil scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Soil --&gt; Soil Map\nKey properties of the land surface soil map\n9.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of soil map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.2. Structure\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil structure map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.structure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.3. Texture\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil texture map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.texture') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.4. Organic Matter\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil organic matter map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.organic_matter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.5. Albedo\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil albedo map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.albedo') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.6. Water Table\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil water table map, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.water_table') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.7. Continuously Varying Soil Depth\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the soil properties vary continuously with depth?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "9.8. Soil Depth\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil depth map", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.soil_map.soil_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Soil --&gt; Snow Free Albedo\nTODO\n10.1. Prognostic\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs snow free albedo prognostic?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "10.2. Functions\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf prognostic, describe the dependancies on snow free albedo calculations", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.snow_free_albedo.functions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"vegetation type\" \n# \"soil humidity\" \n# \"vegetation state\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.3. Direct Diffuse\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf prognostic, describe the distinction between direct and diffuse albedo", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"distinction between direct and diffuse albedo\" \n# \"no distinction between direct and diffuse albedo\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.4. Number Of Wavelength Bands\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf prognostic, enter the number of wavelength bands used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11. Soil --&gt; Hydrology\nKey properties of the land surface soil hydrology\n11.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of the soil hydrological model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of river soil hydrology in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.3. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil hydrology tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.4. Vertical Discretisation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the typical vertical discretisation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.5. Number Of Ground Water Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe number of soil layers that may contain water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.6. Lateral Connectivity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nDescribe the lateral connectivity between tiles", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"perfect connectivity\" \n# \"Darcian flow\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "11.7. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe hydrological dynamics scheme in the land surface model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Bucket\" \n# \"Force-restore\" \n# \"Choisnel\" \n# \"Explicit diffusion\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "12. Soil --&gt; Hydrology --&gt; Freezing\nTODO\n12.1. Number Of Ground Ice Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow many soil layers may contain ground ice", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "12.2. Ice Storage Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method of ice storage", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12.3. Permafrost\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the treatment of permafrost, if any, within the land surface scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Soil --&gt; Hydrology --&gt; Drainage\nTODO\n13.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral describe how drainage is included in the land surface scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.drainage.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13.2. Types\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nDifferent types of runoff represented by the land surface model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.hydrology.drainage.types') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Gravity drainage\" \n# \"Horton mechanism\" \n# \"topmodel-based\" \n# \"Dunne mechanism\" \n# \"Lateral subsurface flow\" \n# \"Baseflow from groundwater\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14. Soil --&gt; Heat Treatment\nTODO\n14.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral description of how heat treatment properties are defined", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of soil heat scheme in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14.3. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the soil heat treatment tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.4. Vertical Discretisation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the typical vertical discretisation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.5. Heat Storage\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify the method of heat storage", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.heat_storage') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Force-restore\" \n# \"Explicit diffusion\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.6. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nDescribe processes included in the treatment of soil heat", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.soil.heat_treatment.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"soil moisture freeze-thaw\" \n# \"coupling with snow temperature\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15. Snow\nLand surface snow\n15.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of snow in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the snow tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.3. Number Of Snow Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe number of snow levels used in the land surface scheme/model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.number_of_snow_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15.4. Density\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of snow density", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.density') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"constant\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.5. Water Equivalent\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of the snow water equivalent", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.water_equivalent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.6. Heat Content\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of the heat content of snow", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.heat_content') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.7. Temperature\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of snow temperature", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.temperature') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.8. Liquid Water Content\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescription of the treatment of snow liquid water", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.liquid_water_content') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.9. Snow Cover Fractions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSpecify cover fractions used in the surface snow scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.snow_cover_fractions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"ground snow fraction\" \n# \"vegetation snow fraction\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.10. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSnow related processes in the land surface scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"snow interception\" \n# \"snow melting\" \n# \"snow freezing\" \n# \"blowing snow\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.11. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the snow scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16. Snow --&gt; Snow Albedo\nTODO\n16.1. Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the treatment of snow-covered land albedo", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.snow_albedo.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"prescribed\" \n# \"constant\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.2. Functions\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\n*If prognostic, *", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.snow.snow_albedo.functions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"vegetation type\" \n# \"snow age\" \n# \"snow density\" \n# \"snow grain type\" \n# \"aerosol deposition\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17. Vegetation\nLand surface vegetation\n17.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of vegetation in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.2. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of vegetation scheme in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "17.3. Dynamic Vegetation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there dynamic evolution of vegetation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.dynamic_vegetation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "17.4. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the vegetation tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.5. Vegetation Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nVegetation classification used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.vegetation_representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"vegetation types\" \n# \"biome types\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.6. Vegetation Types\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of vegetation types in the classification, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.vegetation_types') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"broadleaf tree\" \n# \"needleleaf tree\" \n# \"C3 grass\" \n# \"C4 grass\" \n# \"vegetated\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.7. Biome Types\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of biome types in the classification, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biome_types') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"evergreen needleleaf forest\" \n# \"evergreen broadleaf forest\" \n# \"deciduous needleleaf forest\" \n# \"deciduous broadleaf forest\" \n# \"mixed forest\" \n# \"woodland\" \n# \"wooded grassland\" \n# \"closed shrubland\" \n# \"opne shrubland\" \n# \"grassland\" \n# \"cropland\" \n# \"wetlands\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.8. Vegetation Time Variation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow the vegetation fractions in each tile are varying with time", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.vegetation_time_variation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"fixed (not varying)\" \n# \"prescribed (varying from files)\" \n# \"dynamical (varying from simulation)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.9. Vegetation Map\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf vegetation fractions are not dynamically updated , describe the vegetation map used (common name and reference, if possible)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.vegetation_map') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.10. Interception\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs vegetation interception of rainwater represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.interception') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "17.11. Phenology\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTreatment of vegetation phenology", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.phenology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic (vegetation map)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.12. Phenology Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of vegetation phenology", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.phenology_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.13. Leaf Area Index\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTreatment of vegetation leaf area index", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.leaf_area_index') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prescribed\" \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.14. Leaf Area Index Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of leaf area index", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.leaf_area_index_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.15. Biomass\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\n*Treatment of vegetation biomass *", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biomass') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.16. Biomass Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of vegetation biomass", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biomass_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.17. Biogeography\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTreatment of vegetation biogeography", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biogeography') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.18. Biogeography Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of vegetation biogeography", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.biogeography_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.19. Stomatal Resistance\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSpecify what the vegetation stomatal resistance depends on", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.stomatal_resistance') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"light\" \n# \"temperature\" \n# \"water availability\" \n# \"CO2\" \n# \"O3\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.20. Stomatal Resistance Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of the treatment of vegetation stomatal resistance", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.stomatal_resistance_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "17.21. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the vegetation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.vegetation.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18. Energy Balance\nLand surface energy balance\n18.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of energy balance in land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the energy balance tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18.3. Number Of Surface Temperatures\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThe maximum number of distinct surface temperatures in a grid cell (for example, each subgrid tile may have its own temperature)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "18.4. Evaporation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSpecify the formulation method for land surface evaporation, from soil and vegetation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.evaporation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"alpha\" \n# \"beta\" \n# \"combined\" \n# \"Monteith potential evaporation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18.5. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nDescribe which processes are included in the energy balance scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.energy_balance.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"transpiration\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "19. Carbon Cycle\nLand surface carbon cycle\n19.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of carbon cycle in land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the carbon cycle tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19.3. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of carbon cycle in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "19.4. Anthropogenic Carbon\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nDescribe the treament of the anthropogenic carbon pool", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"grand slam protocol\" \n# \"residence time\" \n# \"decay time\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "19.5. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the carbon scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20. Carbon Cycle --&gt; Vegetation\nTODO\n20.1. Number Of Carbon Pools\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEnter the number of carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "20.2. Carbon Pools\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "20.3. Forest Stand Dynamics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the treatment of forest stand dyanmics", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "21. Carbon Cycle --&gt; Vegetation --&gt; Photosynthesis\nTODO\n21.1. Method\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the general method used for photosynthesis (e.g. type of photosynthesis, distinction between C3 and C4 grasses, Nitrogen depencence, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22. Carbon Cycle --&gt; Vegetation --&gt; Autotrophic Respiration\nTODO\n22.1. Maintainance Respiration\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the general method used for maintainence respiration", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.2. Growth Respiration\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the general method used for growth respiration", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23. Carbon Cycle --&gt; Vegetation --&gt; Allocation\nTODO\n23.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general principle behind the allocation scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "23.2. Allocation Bins\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify distinct carbon bins used in allocation", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"leaves + stems + roots\" \n# \"leaves + stems + roots (leafy + woody)\" \n# \"leaves + fine roots + coarse roots + stems\" \n# \"whole plant (no distinction)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.3. Allocation Fractions\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how the fractions of allocation are calculated", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"fixed\" \n# \"function of vegetation type\" \n# \"function of plant allometry\" \n# \"explicitly calculated\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "24. Carbon Cycle --&gt; Vegetation --&gt; Phenology\nTODO\n24.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general principle behind the phenology scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "25. Carbon Cycle --&gt; Vegetation --&gt; Mortality\nTODO\n25.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the general principle behind the mortality scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26. Carbon Cycle --&gt; Litter\nTODO\n26.1. Number Of Carbon Pools\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEnter the number of carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "26.2. Carbon Pools\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26.3. Decomposition\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the decomposition methods used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.litter.decomposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "26.4. Method\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the general method used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.litter.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27. Carbon Cycle --&gt; Soil\nTODO\n27.1. Number Of Carbon Pools\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEnter the number of carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "27.2. Carbon Pools\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the carbon pools used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27.3. Decomposition\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the decomposition methods used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.soil.decomposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "27.4. Method\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the general method used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.soil.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28. Carbon Cycle --&gt; Permafrost Carbon\nTODO\n28.1. Is Permafrost Included\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs permafrost included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "28.2. Emitted Greenhouse Gases\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the GHGs emitted", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28.3. Decomposition\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList the decomposition methods used", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "28.4. Impact On Soil Properties\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the impact of permafrost on soil properties", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29. Nitrogen Cycle\nLand surface nitrogen cycle\n29.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of the nitrogen cycle in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.nitrogen_cycle.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the notrogen cycle tiling, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.nitrogen_cycle.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "29.3. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of nitrogen cycle in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.nitrogen_cycle.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "29.4. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the nitrogen scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30. River Routing\nLand surface river routing\n30.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of river routing in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.2. Tiling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the river routing, if any.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.tiling') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.3. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of river routing scheme in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "30.4. Grid Inherited From Land Surface\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the grid inherited from land surface?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "30.5. Grid Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nGeneral description of grid, if not inherited from land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.grid_description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.6. Number Of Reservoirs\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEnter the number of reservoirs", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.number_of_reservoirs') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "30.7. Water Re Evaporation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nTODO", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.water_re_evaporation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"flood plains\" \n# \"irrigation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.8. Coupled To Atmosphere\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIs river routing coupled to the atmosphere model component?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "30.9. Coupled To Land\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the coupling between land and rivers", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.coupled_to_land') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.10. Quantities Exchanged With Atmosphere\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf couple to atmosphere, which quantities are exchanged between river routing and the atmosphere model components?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"heat\" \n# \"water\" \n# \"tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.11. Basin Flow Direction Map\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat type of basin flow direction map is being used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.basin_flow_direction_map') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"present day\" \n# \"adapted for other periods\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "30.12. Flooding\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the representation of flooding, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.flooding') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "30.13. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the river routing", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "31. River Routing --&gt; Oceanic Discharge\nTODO\n31.1. Discharge Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify how rivers are discharged to the ocean", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"direct (large rivers)\" \n# \"diffuse\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "31.2. Quantities Transported\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nQuantities that are exchanged from river-routing to the ocean model component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"heat\" \n# \"water\" \n# \"tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "32. Lakes\nLand surface lakes\n32.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of lakes in the land surface", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "32.2. Coupling With Rivers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre lakes coupled to the river routing model component?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.coupling_with_rivers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "32.3. Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTime step of lake scheme in seconds", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "32.4. Quantities Exchanged With Rivers\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf coupling with rivers, which quantities are exchanged between the lakes and rivers", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"heat\" \n# \"water\" \n# \"tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "32.5. Vertical Grid\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the vertical grid of lakes", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.vertical_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "32.6. Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList the prognostic variables of the lake scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "33. Lakes --&gt; Method\nTODO\n33.1. Ice Treatment\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs lake ice included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.ice_treatment') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "33.2. Albedo\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the treatment of lake albedo", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.albedo') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "33.3. Dynamics\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhich dynamics of lakes are treated? horizontal, vertical, etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.dynamics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"No lake dynamics\" \n# \"vertical\" \n# \"horizontal\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "33.4. Dynamic Lake Extent\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs a dynamic lake extent scheme included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "33.5. Endorheic Basins\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBasins not flowing to ocean included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.method.endorheic_basins') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "34. Lakes --&gt; Wetlands\nTODO\n34.1. Description\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the treatment of wetlands, if any", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.land.lakes.wetlands.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
DS-100/sp17-materials
sp17/disc/disc11/disc11_solution.ipynb
gpl-3.0
[ "Discussion 11: Logistic Regression and Gradient Descent", "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import patches, cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom mpl_toolkits.mplot3d import Axes3D\n%matplotlib inline\n\nfrom IPython.display import display, Latex, Markdown\nfrom ipywidgets import interact, interactive, fixed\nimport ipywidgets as widgets", "Understanding Gradient Descent\nIn order to better understand gradient descent, let's implement it to solve a familiar problem - least-squares linear regression. While we are able to find the solution to ordinary least-squares linear regression analytically (recall its value as $\\theta = (X^TX)^{−1}X^TY$), we can also find it using gradient descent.\nQuestion 1:\nFirst, let's consider the gradient function for ordinary least squares regression. Recall the OLS loss function as\n$$Loss(\\theta) = \\frac{1}{n} \\sum_{i=1}^n \\left(y_i - f_\\theta(x_i)\\right)^2$$\nAnd the function $f_\\theta(x_i)$, for input data with $p$ dimensions, as\n$$f_\\theta(x_i) = \\sum_{j=1}^p \\theta_j x_{i,j} $$\nGiven these functions, what is the gradient function for OLS regression? First, state it in terms of a single component of $\\theta$, $\\theta_j$, using a sum over each data point $i$ in $X$.", "q1_answer = r\"\"\"\n\nPut your answer here, replacing this text.\n\n$$\\frac{\\partial}{\\partial \\theta_j} Loss(\\theta) = \\frac{1}{n} \\sum_{i=1}^n \\dots$$\n\n\"\"\"\n\ndisplay(Markdown(q1_answer))\n\nq1_answer = r\"\"\"\n\n**SOLUTION:** \n\n$$\\frac{\\partial}{\\partial \\theta_j} Loss(\\theta) = \\frac{2}{n} \\sum_{i=1}^n -x_{i,j} \\left(y_i - f_\\theta(x_i)\\right)$$\n\n\"\"\"\n\ndisplay(Markdown(q1_answer))", "Question 2:\nNow, try to write that formula in terms of the matricies $X$, $y$, and $\\theta$.", "q2_answer = r\"\"\"\n\nPut your answer here, replacing this text.\n\n$$\\frac{\\partial}{\\partial \\theta} Loss(X) = \\dots$$\n\n\"\"\"\n\ndisplay(Markdown(q2_answer))\n\nq2_answer = r\"\"\"\n\n**SOLUTION:** \n\n$$\\frac{\\partial}{\\partial \\theta} Loss(X) = -\\frac{2}{n} X^T (y - X^T \\theta)$$\n\n\"\"\"\n\ndisplay(Markdown(q2_answer))", "Question 3:\nUsing this gradient function, complete the python function below which calculates the gradient for inputs $X$, $y$, and $\\theta$. You should get a gradient of $[7, 48]$ on the simple data below.", "def linear_regression_grad(X, y, theta):\n grad = -2/X.shape[0] * X.T @ (y - X @ theta) #SOLUTION\n return grad\n\ntheta = [1, 4]\nsimple_X = np.vstack([np.ones(10), np.arange(10)]).T \nsimple_y = np.arange(10) * 3 + 2\nlinear_regression_grad(simple_X, simple_y, theta)", "Question 4:\nBefore we perform gradient descent, let's visualize the surface we're attempting to descend over. Run the next few cells to plot the loss surface as a function of $\\theta_0$ and $\\theta_1$, for some toy data.", "def plot_surface_3d(X, Y, Z, angle):\n highest_Z = max(Z.reshape(-1,1))\n lowest_Z = min(Z.reshape(-1,1))\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n surf = ax.plot_surface(X, Y, Z, \n cmap=cm.coolwarm, \n linewidth=0, \n antialiased=False, \n rstride=5, cstride=5)\n ax.zaxis.set_major_locator(LinearLocator(5))\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.1f'))\n ax.view_init(45, angle)\n fig.colorbar(surf, shrink=0.5, aspect=5)\n plt.title(\"Regression Loss Function\")\n plt.xlabel(\"Theta_0\")\n plt.ylabel(\"Theta_1\")\n plt.show()", "We create some toy data in two dimensions to perform our regressions on:", "np.random.seed(100)\nX_1 = np.arange(50)/5 + 5\nX = np.vstack([np.ones(50), X_1]).T \ny = (X_1 * 2 + 3) + np.random.normal(0, 2.5, size=50)\nplt.plot(X_1, y, \".\")", "And plot our loss:", "angle_slider = widgets.FloatSlider(min=0, max=360, step=15, value=45)\n\ndef plot_regression_loss(angle):\n\n t0_vals = np.linspace(-10,10,100)\n t1_vals = np.linspace(-2,5,100)\n theta_0,theta_1 = np.meshgrid(t0_vals, t1_vals)\n thetas = np.vstack((theta_0.flatten(), theta_1.flatten()))\n loss_vals = 2/X.shape[0] * sum(((y - (X @ thetas).T)**2).T)\n loss_vals = loss_vals.reshape(100, -100)\n plot_surface_3d(theta_0, theta_1, loss_vals, angle)\n \ninteract(plot_regression_loss, angle=angle_slider);", "Consider: \n- What do you notice about the loss surface for this simple regression example? \n- Where are the optimal values $(\\theta_0, \\theta_1)$? \n- Do you think that the shape of this surface will make gradient descent a viable solution to find these optimal values? \n- What other loss surface shapes could you imagine?\nQuestion 5:\nNow, let's implement a general function to perform batch gradient descent. Given data X and y, initial weights $\\theta_0$, a learning rate $\\rho$, and a function gradient_function that has the same function signature as linear_regression_grad, implement a general gradient descent algorithm for finding optimal $\\theta$.", "def gradient_descent(X, y, theta0, gradient_function, learning_rate = 0.001, max_iter=1000000, epsilon=0.001):\n \n theta_hat = theta0 # Initial guess\n for t in range(1, max_iter):\n \n grad = gradient_function(X, y, theta_hat)\n \n # Now for the update step\n theta_hat = theta_hat - learning_rate * grad #SOLUTION\n \n # When our gradient is small enough, we have converged\n if np.linalg.norm(grad) < epsilon:\n print(\"converged after {} steps\".format(t))\n return theta_hat\n \n # If we hit max_iter iterations\n print(\"Warning - Failed to converge\")\n return theta_hat\n\ntheta_0 = [10, -1]\ngradient_descent(X, y, theta_0, linear_regression_grad)", "Now let's visualize how our regression estimates change as we perform gradient descent:", "theta_0s = []\ntheta_1s = []\nplot_idx = [1, 5, 20, 100, 500, 2000, 10000]\n\ndef plot_gradient_wrapper(X, y, theta):\n grad = linear_regression_grad(X, y, theta)\n theta_0s.append(theta[0])\n theta_1s.append(theta[1])\n t = len(theta_0s)\n if t in plot_idx:\n plt.subplot(121)\n plt.xlim([4, 12])\n plt.ylim([-2, 3])\n plt.plot(theta_0s, theta_1s)\n plt.plot(theta[0], theta[1], \".\", color=\"b\")\n plt.title('theta(s) over time, t={}'.format(t))\n plt.subplot(122)\n plt.xlim([0, 20])\n plt.ylim([-10, 40])\n plt.plot(np.arange(50)/2.5, y, \".\")\n plt.plot(np.arange(50)/2.5, X @ theta)\n plt.title('Regression line vs. data, t={}'.format(t))\n plt.show()\n return grad\n\ngradient_descent(X, y, theta_0, plot_gradient_wrapper)", "Question 6:\nIn Prof. Gonzalez's lecture, instead of using a constant learning rate, he used a learning rate that decreased over time, according to a function:\n$$\\rho(t) = \\frac{r}{t}$$\nWhere $r$ represents some initial learning rate. This has the feature of decreasing the learning rate as we get closer to the optimal solution.\n- Why might this be useful, compared to a constant learning rate? \n- What problems might be caused by using too high of a learning rate? \n- What about too low?\nExtending to Logistic Regression\nQuestion 7\nAs discussed in lecture, while ordinary least squares has a simple analytical solution, logistic regression must be fitted using gradient descent. Using the tools we've constructed, we can do just that. First, create a new function, logistic_regression_grad, which functions similarly to its counterpart linear_regression_grad. In the case of logistic regression, this should be the gradient of the logistic regression log-likelihood function - you may wish to refer to the lecture slides to find this gradient equation.\nFirst, we define the sigmoid function:", "def sigmoid(t):\n return 1/(1 + np.e**-t)", "And then complete the gradient function. You should get a gradient of about $[0.65, 0.61]$ for the given values $\\theta$ on this example dataset.", "def logistic_regression_grad(X, y, theta):\n grad = (sigmoid(X @ theta) - y) @ X #SOLUTION\n return grad\n\ntheta = [0, 1]\nsimple_X_1 = np.hstack([np.arange(10)/10, np.arange(10)/10 + 0.75])\nsimple_X = np.vstack([np.ones(20), simple_X_1]).T\nsimple_y = np.hstack([np.zeros(10), np.ones(10)])\nlinear_regression_grad(simple_X, simple_y, theta)", "Now let's see how we can use our gradient descent tools to fit a regression on some real data! First, let's load the breast cancer dataset from lecture, and plot breast mass radius versus category - malignant or benign. As in lecture, we jitter the response variable to avoid overplotting.", "import sklearn.datasets\ndata_dict = sklearn.datasets.load_breast_cancer()\ndata = pd.DataFrame(data_dict['data'], columns=data_dict['feature_names'])\ndata['malignant'] = (data_dict['target'] == 0)\ndata['malignant'] = data['malignant'] + 0.1*np.random.rand(len(data['malignant'])) - 0.05\n\nX_log_1 = data['mean radius']\nX_log = np.vstack([np.ones(len(X_log_1)), X_log_1.values]).T\ny_log = data['malignant'].values\nplt.plot(X_log_1, y_log, \".\")", "Question 8:\nNow, using our earlier defined gradient_descent function, find optimal parameters $(\\theta_0, \\theta_1)$ to fit the breast cancer data. You will have to tune the learning rate beyond the default of the function, and think of what a good initial guess for $\\theta$ would be, in both dimensions.", "theta_log = gradient_descent(X_log, y_log, [0, 1], logistic_regression_grad, learning_rate=0.0001) #SOLUTION\ntheta_log", "With optimal $\\theta$ chosen, we can now plot our logistic curve and our decision boundary, and look at how our model categorizes our data:", "y_lowX = X_log_1[sigmoid(X_log @ theta_log) < 0.5]\ny_lowy = y_log[sigmoid(X_log @ theta_log) < 0.5]\ny_highX = X_log_1[sigmoid(X_log @ theta_log) > 0.5]\ny_highy = y_log[sigmoid(X_log @ theta_log) > 0.5]\n\nsigrange = np.arange(5, 30, 0.05)\nsigrange_X = np.vstack([np.ones(500), sigrange]).T\nd_boundary = -theta_log[0]/theta_log[1]\n\nplt.plot(sigrange, sigmoid(sigrange_X @ theta_log), \".\", color=\"g\")\nplt.hlines(0.5, 5, 30, \"g\")\nplt.vlines(d_boundary, -0.2, 1.2, \"g\")\nplt.plot(y_lowX, y_lowy, \".\", color=\"b\")\nplt.plot(y_highX, y_highy, \".\", color=\"r\")\nplt.title(\"Classification (blue=benign, red=malignant), assuming a P=0.5 decision boundary\")", "And, we can calculate our classification accuracy.", "n_errors = sum(y_lowy > 0.5) + sum(y_highy < 0.5)\naccuracy = round((len(y_log)-n_errors)/len(y_log) * 1000)/10\nprint(\"Classification Accuracy - {}%\".format(accuracy))" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
probml/pyprobml
deprecated/simulated_annealing_2d_demo.ipynb
mit
[ "<a href=\"https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/simulated_annealing_2d_demo.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\nSimulated annealing on a 2d surface\nCode is based on\nhttps://krischer.github.io/seismo_live_build/html/Seismic%20Inverse%20Problems/Probabilistic%20Inversion/pi_simann_wrapper.html\nand modified by murphyk@ and Neoanarika@", "import numpy as np\nimport matplotlib\n\nmatplotlib.use(\"nbagg\")\nimport matplotlib.pyplot as plt\nfrom IPython import display\n\n\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n!mkdir figures\n!mkdir scripts\n%cd /content/scripts\n!wget -q https://raw.githubusercontent.com/probml/pyprobml/master/scripts/pyprobml_utils.py\nimport pyprobml_utils as pml\n", "Target distribution\nWe use the peaks function from matlab, modified so it is positive:\n$$\np(x,y) \\propto |3 (1-x)^2 e^{-x^2 - (y+1)^2}\n - 10 (\\frac{x}{5} - x^3 - y^5) e^{-x^2 -y^2} \n - \\frac{1}{3} e^{-(x+1)^2 - y^2} |\n$$", "# Generate a pdf\n\n# the following steps generate a pdf; this is equivalent to the function \"peaks(n)\" in matlab\nn = 100 # number of dimension\npdf = np.zeros([n, n])\nsigma = np.zeros([n, n])\ns = np.zeros([n, n])\nx = -3.0\nfor i in range(0, n):\n y = -3.0\n for j in range(0, n):\n pdf[j, i] = (\n 3.0 * (1 - x) ** 2 * np.exp(-(x**2) - (y + 1) ** 2)\n - 10.0 * (x / 5 - x**3 - y**5) * np.exp(-(x**2) - y**2)\n - 1.0 / 3 * np.exp(-((x + 1) ** 2) - y**2)\n )\n if pdf[j, i] < 0:\n pdf[j, i] = pdf[j, i] * (\n -1\n ) # in contrast to the peaks function: all negative values are multiplied by (-1)\n y = y + 6.0 / (n - 1)\n x = x + 6.0 / (n - 1)\n\npdf = pdf / pdf.max()\nenergy = -np.log(pdf)\n\n# Plot the 3D plot of pdf\n# --------------------------\n\nX = np.arange(0, 100 + 100.0 / (n - 1), 100.0 / (n - 1))\nY = np.arange(0, 100 + 100.0 / (n - 1), 100.0 / (n - 1))\nfig0 = plt.figure()\nax = fig0.gca(projection=\"3d\")\nX, Y = np.meshgrid(X, Y)\nsurf = ax.plot_surface(Y, X, pdf, rstride=2, cstride=2, cmap=plt.cm.coolwarm, linewidth=0.1)\n# plt.gca().invert_xaxis()\nplt.tight_layout()\npml.savefig(\"sim_anneal_2d_peaks.pdf\")\nplt.show()\n\n# Plot the 3D plot of Energy function\n# --------------------------\n\nX = np.arange(0, 100 + 100.0 / (n - 1), 100.0 / (n - 1))\nY = np.arange(0, 100 + 100.0 / (n - 1), 100.0 / (n - 1))\nfig0 = plt.figure()\nax = fig0.gca(projection=\"3d\")\nX, Y = np.meshgrid(X, Y)\nsurf = ax.plot_surface(Y, X, energy / energy.max(), rstride=2, cstride=2, cmap=plt.cm.coolwarm, linewidth=0.1)\n# plt.gca().invert_xaxis()\nplt.tight_layout()\npml.savefig(\"sim_anneal_2d_energy.pdf\")\nplt.show()", "Heat bath\nThe \"heat bath\" refers to a modified version of the distribution in which we vary the temperature.", "Tplots = 10 # initial temperature for the plots\nstepT = 4 # how many steps should the Temperature be *0.2 for\n\nfor i in range(0, stepT):\n sigma = np.exp(-(energy) / Tplots)\n sigma = sigma / sigma.max()\n ttl = \"T={:0.2f}\".format(Tplots)\n Tplots = Tplots * 0.2\n X = np.arange(0, 100 + 100.0 / (n - 1), 100.0 / (n - 1))\n Y = np.arange(0, 100 + 100.0 / (n - 1), 100.0 / (n - 1))\n fig = plt.figure()\n ax = fig.gca(projection=\"3d\")\n X, Y = np.meshgrid(X, Y)\n ax.set_title(ttl)\n ax.plot_surface(Y, X, sigma, rstride=2, cstride=2, cmap=plt.cm.coolwarm, linewidth=0, antialiased=False)\n # plt.gca().invert_xaxis()\n plt.tight_layout()\n pml.savefig(f\"sim_anneal_2d_cooled{i}.pdf\")\n\nplt.show()", "SA algorithm", "def sim_anneal(proposal=\"gaussian\", sigma=10):\n np.random.seed(42)\n xcur = np.array([np.floor(np.random.uniform(0, 100)), np.floor(np.random.uniform(0, 100))])\n xcur = xcur.astype(int)\n ns = 300 # number of samples to keep\n T = 1 # start temperature\n alpha = 0.99999 # cooling schedule\n alpha = 0.99 # cooling schedule\n\n # list of visited points, temperatures, probabilities\n x_hist = xcur # will be (N,2) array\n prob_hist = []\n temp_hist = []\n\n nreject = 0\n iis = 0 # number of accepted points\n npp = 0 # num proposed points\n while npp < ns:\n npp = npp + 1\n if proposal == \"uniform\":\n xnew = np.array([np.floor(np.random.uniform(0, 100)), np.floor(np.random.uniform(0, 100))])\n elif proposal == \"gaussian\":\n xnew = xcur + np.random.normal(size=2) * sigma\n xnew = np.maximum(xnew, 0)\n xnew = np.minimum(xnew, 99)\n else:\n raise ValueError(\"Unknown proposal\")\n xnew = xnew.astype(int)\n\n # compare energies\n Ecur = energy[xcur[0], xcur[1]]\n Enew = energy[xnew[0], xnew[1]]\n deltaE = Enew - Ecur\n # print([npp, xcur, xnew, Ecur, Enew, deltaE])\n\n temp_hist.append(T)\n T = alpha * T\n P = np.exp(-1.0 * deltaE / T)\n P = min(1, P)\n test = np.random.uniform(0, 1)\n if test <= P:\n xcur = xnew\n iis = iis + 1\n else:\n nreject += 1\n\n x_hist = np.vstack((x_hist, xcur))\n prob_hist.append(pdf[xcur[0], xcur[1]])\n\n npp = npp + 1\n print(f\"nproposed {npp}, naccepted {iis}, nreject {nreject}\")\n return x_hist, prob_hist, temp_hist", "Run experiments", "proposals = {\"gaussian\", \"uniform\"}\nx_hist = {}\nprob_hist = {}\ntemp_hist = {}\nfor proposal in proposals:\n print(proposal)\n x_hist[proposal], prob_hist[proposal], temp_hist[proposal] = sim_anneal(proposal=proposal)\n\nfor proposal in proposals:\n plt.figure()\n plt.plot(temp_hist[proposal])\n plt.title(\"temperature vs time\")\n plt.tight_layout()\n pml.savefig(f\"sim_anneal_2d_temp_vs_time_{proposal}.pdf\")\n plt.show()\n\nfor proposal in proposals:\n plt.figure()\n plt.plot(prob_hist[proposal])\n plt.xlabel(\"iteration\")\n plt.ylabel(\"probability\")\n plt.tight_layout()\n pml.savefig(f\"sim_anneal_2d_prob_vs_time_{proposal}.pdf\")\n plt.show()\n\n# Plot points visited\nfor proposal in proposals:\n probs = prob_hist[proposal]\n xa = x_hist[proposal]\n\n f1, ax = plt.subplots()\n ax.imshow(pdf.transpose(), aspect=\"auto\", extent=[0, 100, 100, 0], interpolation=\"none\")\n\n # Maximum value achieved ploted with white cirlce\n # maxi = np.argmax(probs) # index of best model\n # ax.plot(xa[maxi,0],xa[maxi,1],'wo', markersize=10)\n\n # Starting point with white cirlce\n ax.plot(xa[0, 0], xa[0, 1], \"wo\", markersize=10)\n\n # Global maximm with red cirlce\n ind = np.unravel_index(np.argmax(pdf, axis=None), pdf.shape)\n ax.plot(ind[0], ind[1], \"ro\", markersize=10)\n\n ax.plot(xa[:, 0], xa[:, 1], \"w+\") # Plot the steps with white +\n\n ax.set_ylabel(\"y\")\n ax.set_xlabel(\"x\")\n plt.tight_layout()\n pml.savefig(f\"sim_anneal_2d_samples_{proposal}.pdf\")\n plt.show()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mne-tools/mne-tools.github.io
0.20/_downloads/bf3ad991f7c7776e245520709f49cb04/plot_cwt_sensor_connectivity.ipynb
bsd-3-clause
[ "%matplotlib inline", "Compute seed-based time-frequency connectivity in sensor space\nComputes the connectivity between a seed-gradiometer close to the visual cortex\nand all other gradiometers. The connectivity is computed in the time-frequency\ndomain using Morlet wavelets and the debiased squared weighted phase lag index\n[1]_ is used as connectivity metric.\n.. [1] Vinck et al. \"An improved index of phase-synchronization for electro-\n physiological data in the presence of volume-conduction, noise and\n sample-size bias\" NeuroImage, vol. 55, no. 4, pp. 1548-1565, Apr. 2011.", "# Author: Martin Luessi <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport numpy as np\n\nimport mne\nfrom mne import io\nfrom mne.connectivity import spectral_connectivity, seed_target_indices\nfrom mne.datasets import sample\nfrom mne.time_frequency import AverageTFR\n\nprint(__doc__)", "Set parameters", "data_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nevent_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\n\n# Setup for reading the raw data\nraw = io.read_raw_fif(raw_fname)\nevents = mne.read_events(event_fname)\n\n# Add a bad channel\nraw.info['bads'] += ['MEG 2443']\n\n# Pick MEG gradiometers\npicks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,\n exclude='bads')\n\n# Create epochs for left-visual condition\nevent_id, tmin, tmax = 3, -0.2, 0.5\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6),\n preload=True)\n\n# Use 'MEG 2343' as seed\nseed_ch = 'MEG 2343'\npicks_ch_names = [raw.ch_names[i] for i in picks]\n\n# Create seed-target indices for connectivity computation\nseed = picks_ch_names.index(seed_ch)\ntargets = np.arange(len(picks))\nindices = seed_target_indices(seed, targets)\n\n# Define wavelet frequencies and number of cycles\ncwt_freqs = np.arange(7, 30, 2)\ncwt_n_cycles = cwt_freqs / 7.\n\n# Run the connectivity analysis using 2 parallel jobs\nsfreq = raw.info['sfreq'] # the sampling frequency\ncon, freqs, times, _, _ = spectral_connectivity(\n epochs, indices=indices,\n method='wpli2_debiased', mode='cwt_morlet', sfreq=sfreq,\n cwt_freqs=cwt_freqs, cwt_n_cycles=cwt_n_cycles, n_jobs=1)\n\n# Mark the seed channel with a value of 1.0, so we can see it in the plot\ncon[np.where(indices[1] == seed)] = 1.0\n\n# Show topography of connectivity from seed\ntitle = 'WPLI2 - Visual - Seed %s' % seed_ch\n\nlayout = mne.find_layout(epochs.info, 'meg') # use full layout\n\ntfr = AverageTFR(epochs.info, con, times, freqs, len(epochs))\ntfr.plot_topo(fig_facecolor='w', font_color='k', border='k')" ]
[ "code", "markdown", "code", "markdown", "code" ]
jorgedominguezchavez/dlnd_first_neural_network
Your_first_neural_network.ipynb
mit
[ "Your first neural network\nIn this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.", "%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n", "Load and prepare the data\nA critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!", "\ndata_path = 'Bike-Sharing-Dataset/hour.csv'\n\nrides = pd.read_csv(data_path)\n\nrides.head()", "Checking out the data\nThis dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above.\nBelow is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.", "rides[:24*10].plot(x='dteday', y='cnt')", "Dummy variables\nHere we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies().", "dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']\nfor each in dummy_fields:\n dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)\n rides = pd.concat([rides, dummies], axis=1)\n\nfields_to_drop = ['instant', 'dteday', 'season', 'weathersit', \n 'weekday', 'atemp', 'mnth', 'workingday', 'hr']\ndata = rides.drop(fields_to_drop, axis=1)\ndata.head()", "Scaling target variables\nTo make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.\nThe scaling factors are saved so we can go backwards when we use the network for predictions.", "quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']\n# Store scalings in a dictionary so we can convert back later\nscaled_features = {}\nfor each in quant_features:\n mean, std = data[each].mean(), data[each].std()\n scaled_features[each] = [mean, std]\n data.loc[:, each] = (data[each] - mean)/std", "Splitting the data into training, testing, and validation sets\nWe'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.", "# Save data for approximately the last 21 days \ntest_data = data[-21*24:]\n\n# Now remove the test data from the data set \ndata = data[:-21*24]\n\n# Separate the data into features and targets\ntarget_fields = ['cnt', 'casual', 'registered']\nfeatures, targets = data.drop(target_fields, axis=1), data[target_fields]\ntest_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]", "We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).", "# Hold out the last 60 days or so of the remaining data as a validation set\ntrain_features, train_targets = features[:-60*24], targets[:-60*24]\nval_features, val_targets = features[-60*24:], targets[-60*24:]", "Time to build the network\nBelow you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.\n<img src=\"assets/neural_network.png\" width=300px>\nThe network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation.\nWe use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation.\n\nHint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.\n\nBelow, you have these tasks:\n1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function.\n2. Implement the forward pass in the train method.\n3. Implement the backpropagation algorithm in the train method, including calculating the output error.\n4. Implement the forward pass in the run method.", "class NeuralNetwork(object):\n def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Initialize weights\n self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5, \n (self.input_nodes, self.hidden_nodes))\n\n self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5, \n (self.hidden_nodes, self.output_nodes))\n self.lr = learning_rate\n \n #### TODO: Set self.activation_function to your implemented sigmoid function ####\n #\n # Note: in Python, you can define a function with a lambda expression,\n # as shown below.\n self.activation_function = lambda x : 1/(1+np.exp(-x)) # Replace 0 with sigmoid calculation. DONE \n ### If the lambda code above is not something you're familiar with,\n # You can uncomment out the following three lines and put your \n # implementation there instead.\n #\n #def sigmoid(x):\n # return 0 # Replace 0 with your sigmoid calculation here\n #self.activation_function = sigmoid\n \n \n def train(self, features, targets):\n ''' Train the network on batch of features and targets. \n \n Arguments\n ---------\n \n features: 2D array, each row is one data record, each column is a feature\n targets: 1D array of target values\n \n '''\n n_records = features.shape[0]\n delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)\n delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)\n for X, y in zip(features, targets):\n #### Implement the forward pass here ####\n ### Forward pass ###\n # TODO: Hidden layer - Replace these values with your calculations.\n hidden_inputs = np.dot(X, self.weights_input_to_hidden ) # signals into hidden layer DONE\n hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer DONE\n\n # TODO: Output layer - Replace these values with your calculations.\n final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer\n final_outputs = final_inputs # signals from final output layer\n \n #### Implement the backward pass here ####\n ### Backward pass ###\n\n # TODO: Output error - Replace this value with your calculations.\n error = y - final_outputs # Output layer error is the difference between desired target and actual output.\n \n \n # TODO: Backpropagated error terms - Replace these values with your calculations.\n output_error_term = error \n \n # TODO: Calculate the hidden layer's contribution to the error\n hidden_error = np.dot(self.weights_hidden_to_output, output_error_term)\n \n # TODO: Backpropagated error terms - Replace these values with your calculations.\n hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)\n\n # Weight step (input to hidden)\n delta_weights_i_h += hidden_error_term * X[:, None]\n # Weight step (hidden to output)\n delta_weights_h_o += output_error_term * hidden_outputs[:, None] \n\n # TODO: Update the weights - Replace these values with your calculations.\n self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step\n self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step\n \n def run(self, features):\n ''' Run a forward pass through the network with input features \n \n Arguments\n ---------\n features: 1D array of feature values\n '''\n \n #### Implement the forward pass here ####\n # TODO: Hidden layer - replace these values with the appropriate calculations.\n hidden_inputs = np.dot(features, self.weights_input_to_hidden ) # signals into hidden layer\n hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer\n \n # TODO: Output layer - Replace these values with the appropriate calculations.\n final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer\n final_outputs = final_inputs # signals from final output layer \n \n return final_outputs\n\ndef MSE(y, Y):\n return np.mean((y-Y)**2)", "Unit tests\nRun these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly befor you starting trying to train it. These tests must all be successful to pass the project.", "import unittest\n\ninputs = np.array([[0.5, -0.2, 0.1]])\ntargets = np.array([[0.4]])\ntest_w_i_h = np.array([[0.1, -0.2],\n [0.4, 0.5],\n [-0.3, 0.2]])\ntest_w_h_o = np.array([[0.3],\n [-0.1]])\n\nclass TestMethods(unittest.TestCase):\n \n ##########\n # Unit tests for data loading\n ##########\n \n def test_data_path(self):\n # Test that file path to dataset has been unaltered\n self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')\n \n def test_data_loaded(self):\n # Test that data frame loaded\n self.assertTrue(isinstance(rides, pd.DataFrame))\n \n ##########\n # Unit tests for network functionality\n ##########\n\n def test_activation(self):\n network = NeuralNetwork(3, 2, 1, 0.5)\n # Test that the activation function is a sigmoid\n self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))\n\n def test_train(self):\n # Test that weights are updated correctly on training\n network = NeuralNetwork(3, 2, 1, 0.5)\n network.weights_input_to_hidden = test_w_i_h.copy()\n network.weights_hidden_to_output = test_w_h_o.copy()\n \n network.train(inputs, targets)\n self.assertTrue(np.allclose(network.weights_hidden_to_output, \n np.array([[ 0.37275328], \n [-0.03172939]])))\n self.assertTrue(np.allclose(network.weights_input_to_hidden,\n np.array([[ 0.10562014, -0.20185996], \n [0.39775194, 0.50074398], \n [-0.29887597, 0.19962801]])))\n\n def test_run(self):\n # Test correctness of run method\n network = NeuralNetwork(3, 2, 1, 0.5)\n network.weights_input_to_hidden = test_w_i_h.copy()\n network.weights_hidden_to_output = test_w_h_o.copy() \n self.assertTrue(np.allclose(network.run(inputs), 0.09998924))\n\nsuite = unittest.TestLoader().loadTestsFromModule(TestMethods())\nunittest.TextTestRunner().run(suite)", "Training the network\nHere you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.\nYou'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.\nChoose the number of iterations\nThis is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, if you use too many iterations, then the model with not generalize well to other data, this is called overfitting. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. As you start overfitting, you'll see the training loss continue to decrease while the validation loss starts to increase.\nChoose the learning rate\nThis scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.\nChoose the number of hidden nodes\nThe more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.", "import sys\n\n### Set the hyperparameters here ###\niterations = 40000\nlearning_rate = 0.5\nhidden_nodes = 35\noutput_nodes = 1\n\nN_i = train_features.shape[1]\nnetwork = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)\n\nlosses = {'train':[], 'validation':[]}\nfor ii in range(iterations):\n # Go through a random batch of 128 records from the training data set\n batch = np.random.choice(train_features.index, size=128)\n X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']\n \n network.train(X, y)\n \n # Printing out the training progress\n train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)\n val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)\n sys.stdout.write(\"\\rProgress: {:2.1f}\".format(100 * ii/float(iterations)) \\\n + \"% ... Training loss: \" + str(train_loss)[:5] \\\n + \" ... Validation loss: \" + str(val_loss)[:5])\n sys.stdout.flush()\n \n losses['train'].append(train_loss)\n losses['validation'].append(val_loss)\n\nplt.plot(losses['train'], label='Training loss')\nplt.plot(losses['validation'], label='Validation loss')\nplt.legend()\n_ = plt.ylim()", "Check out your predictions\nHere, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.", "fig, ax = plt.subplots(figsize=(8,4))\n\nmean, std = scaled_features['cnt']\npredictions = network.run(test_features).T*std + mean\nax.plot(predictions[0], label='Prediction')\nax.plot((test_targets['cnt']*std + mean).values, label='Data')\nax.set_xlim(right=len(predictions))\nax.legend()\n\ndates = pd.to_datetime(rides.ix[test_data.index]['dteday'])\ndates = dates.apply(lambda d: d.strftime('%b %d'))\nax.set_xticks(np.arange(len(dates))[12::24])\n_ = ax.set_xticklabels(dates[12::24], rotation=45)", "OPTIONAL: Thinking about your results(this question will not be evaluated in the rubric).\nAnswer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does?\n\nNote: You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter\n\nYour answer below" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
WillenZh/deep-learning-project
tutorials/autoencoder/Convolutional_Autoencoder.ipynb
mit
[ "Convolutional Autoencoder\nSticking with the MNIST dataset, let's improve our autoencoder's performance using convolutional layers. Again, loading modules and the data.", "%matplotlib inline\n\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data', validation_size=0)\n\nimg = mnist.train.images[2]\nplt.imshow(img.reshape((28, 28)), cmap='Greys_r')", "Network Architecture\nThe encoder part of the network will be a typical convolutional pyramid. Each convolutional layer will be followed by a max-pooling layer to reduce the dimensions of the layers. The decoder though might be something new to you. The decoder needs to convert from a narrow representation to a wide reconstructed image. For example, the representation could be a 4x4x8 max-pool layer. This is the output of the encoder, but also the input to the decoder. We want to get a 28x28x1 image out from the decoder so we need to work our way back up from the narrow decoder input layer. A schematic of the network is shown below.\n\nHere our final encoder layer has size 4x4x8 = 128. The original images have size 28x28 = 784, so the encoded vector is roughly 16% the size of the original image. These are just suggested sizes for each of the layers. Feel free to change the depths and sizes, but remember our goal here is to find a small representation of the input data.\nWhat's going on with the decoder\nOkay, so the decoder has these \"Upsample\" layers that you might not have seen before. First off, I'll discuss a bit what these layers aren't. Usually, you'll see deconvolutional layers used to increase the width and height of the layers. They work almost exactly the same as convolutional layers, but it reverse. A stride in the input layer results in a larger stride in the deconvolutional layer. For example, if you have a 3x3 kernel, a 3x3 patch in the input layer will be reduced to one unit in a convolutional layer. Comparatively, one unit in the input layer will be expanded to a 3x3 path in a deconvolutional layer. Deconvolution is often called \"transpose convolution\" which is what you'll find with the TensorFlow API, with tf.nn.conv2d_transpose. \nHowever, deconvolutional layers can lead to artifacts in the final images, such as checkerboard patterns. This is due to overlap in the kernels which can be avoided by setting the stride and kernel size equal. In this Distill article from Augustus Odena, et al, the authors show that these checkerboard artifacts can be avoided by resizing the layers using nearest neighbor or bilinear interpolation (upsampling) followed by a convolutional layer. In TensorFlow, this is easily done with tf.image.resize_images, followed by a convolution. Be sure to read the Distill article to get a better understanding of deconvolutional layers and why we're using upsampling.\n\nExercise: Build the network shown above. Remember that a convolutional layer with strides of 1 and 'same' padding won't reduce the height and width. That is, if the input is 28x28 and the convolution layer has stride = 1 and 'same' padding, the convolutional layer will also be 28x28. The max-pool layers are used the reduce the width and height. A stride of 2 will reduce the size by 2. Odena et al claim that nearest neighbor interpolation works best for the upsampling, so make sure to include that as a parameter in tf.image.resize_images or use tf.image.resize_nearest_neighbor.", "learning_rate = 0.001\ninputs_ = \ntargets_ = \n\n### Encoder\nconv1 = \n# Now 28x28x16\nmaxpool1 = \n# Now 14x14x16\nconv2 = \n# Now 14x14x8\nmaxpool2 = \n# Now 7x7x8\nconv3 = \n# Now 7x7x8\nencoded = \n# Now 4x4x8\n\n### Decoder\nupsample1 = \n# Now 7x7x8\nconv4 = \n# Now 7x7x8\nupsample2 = \n# Now 14x14x8\nconv5 = \n# Now 14x14x8\nupsample3 = \n# Now 28x28x8\nconv6 = \n# Now 28x28x16\n\nlogits = \n#Now 28x28x1\n\n# Pass logits through sigmoid to get reconstructed image\ndecoded =\n\n# Pass logits through sigmoid and calculate the cross-entropy loss\nloss = \n\n# Get cost and define the optimizer\ncost = tf.reduce_mean(loss)\nopt = tf.train.AdamOptimizer(learning_rate).minimize(cost)", "Training\nAs before, here wi'll train the network. Instead of flattening the images though, we can pass them in as 28x28x1 arrays.", "sess = tf.Session()\n\nepochs = 20\nbatch_size = 200\nsess.run(tf.global_variables_initializer())\nfor e in range(epochs):\n for ii in range(mnist.train.num_examples//batch_size):\n batch = mnist.train.next_batch(batch_size)\n imgs = batch[0].reshape((-1, 28, 28, 1))\n batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs,\n targets_: imgs})\n\n print(\"Epoch: {}/{}...\".format(e+1, epochs),\n \"Training loss: {:.4f}\".format(batch_cost))\n\nfig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))\nin_imgs = mnist.test.images[:10]\nreconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))})\n\nfor images, row in zip([in_imgs, reconstructed], axes):\n for img, ax in zip(images, row):\n ax.imshow(img.reshape((28, 28)), cmap='Greys_r')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n\nfig.tight_layout(pad=0.1)\n\nsess.close()", "Denoising\nAs I've mentioned before, autoencoders like the ones you've built so far aren't too useful in practive. However, they can be used to denoise images quite successfully just by training the network on noisy images. We can create the noisy images ourselves by adding Gaussian noise to the training images, then clipping the values to be between 0 and 1. We'll use noisy images as input and the original, clean images as targets. Here's an example of the noisy images I generated and the denoised images.\n\nSince this is a harder problem for the network, we'll want to use deeper convolutional layers here, more feature maps. I suggest something like 32-32-16 for the depths of the convolutional layers in the encoder, and the same depths going backward through the decoder. Otherwise the architecture is the same as before.\n\nExercise: Build the network for the denoising autoencoder. It's the same as before, but with deeper layers. I suggest 32-32-16 for the depths, but you can play with these numbers, or add more layers.", "learning_rate = 0.001\ninputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')\ntargets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')\n\n### Encoder\nconv1 = \n# Now 28x28x32\nmaxpool1 = \n# Now 14x14x32\nconv2 = \n# Now 14x14x32\nmaxpool2 = \n# Now 7x7x32\nconv3 = \n# Now 7x7x16\nencoded = \n# Now 4x4x16\n\n### Decoder\nupsample1 = \n# Now 7x7x16\nconv4 = \n# Now 7x7x16\nupsample2 = \n# Now 14x14x16\nconv5 = \n# Now 14x14x32\nupsample3 = \n# Now 28x28x32\nconv6 = \n# Now 28x28x32\n\nlogits = \n#Now 28x28x1\n\n# Pass logits through sigmoid to get reconstructed image\ndecoded =\n\n# Pass logits through sigmoid and calculate the cross-entropy loss\nloss = \n\n# Get cost and define the optimizer\ncost = tf.reduce_mean(loss)\nopt = tf.train.AdamOptimizer(learning_rate).minimize(cost)\n\nsess = tf.Session()\n\nepochs = 100\nbatch_size = 200\n# Set's how much noise we're adding to the MNIST images\nnoise_factor = 0.5\nsess.run(tf.global_variables_initializer())\nfor e in range(epochs):\n for ii in range(mnist.train.num_examples//batch_size):\n batch = mnist.train.next_batch(batch_size)\n # Get images from the batch\n imgs = batch[0].reshape((-1, 28, 28, 1))\n \n # Add random noise to the input images\n noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape)\n # Clip the images to be between 0 and 1\n noisy_imgs = np.clip(noisy_imgs, 0., 1.)\n \n # Noisy images as inputs, original images as targets\n batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs,\n targets_: imgs})\n\n print(\"Epoch: {}/{}...\".format(e+1, epochs),\n \"Training loss: {:.4f}\".format(batch_cost))", "Checking out the performance\nHere I'm adding noise to the test images and passing them through the autoencoder. It does a suprisingly great job of removing the noise, even though it's sometimes difficult to tell what the original number is.", "fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))\nin_imgs = mnist.test.images[:10]\nnoisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape)\nnoisy_imgs = np.clip(noisy_imgs, 0., 1.)\n\nreconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))})\n\nfor images, row in zip([noisy_imgs, reconstructed], axes):\n for img, ax in zip(images, row):\n ax.imshow(img.reshape((28, 28)), cmap='Greys_r')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\nfig.tight_layout(pad=0.1)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
google-research/language
language/multiberts/coref.ipynb
apache-2.0
[ "Application: Gender Bias in Coreference Systems\nThis notebook walks through the analysis in Section 4 of the paper. We'll look at accuracy and bias correlation metrics on the Winogender dataset of Rudinger et al. 2018, and show how the multibootstrap can be used in two different ways:\n\nA paired analysis of an intervention (incremental CDA) applied to pretrained checkpoints.\nAn unpaired analysis comparing to a new set of checkpoints trained with a different procedure (CDA full).\n\nThis notebook will download pre-computed predictions, which are exactly the predictions used in the paper; the cells below should allow you to directly reproduce Figure 3, Table 1, and Table 2 from Section 4, as well as Figure 5, Figure 6, and Table 4 from Appendix D.\nImport packages", "#@title Import libraries and multibootstrap code\nimport re\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport sklearn.metrics\nimport scipy.stats\n\nfrom tqdm.notebook import tqdm # for progress indicator\n\nimport multibootstrap\n\n#@title Import and configure plotting libraries\nimport matplotlib\nfrom matplotlib import pyplot\nimport seaborn as sns\nsns.set_style('white')\n%config InlineBackend.figure_format = 'retina' # make matplotlib plots look better\n\nfrom IPython.display import display", "Download prediction files\nWe release four groups of predictions:\n\nbase: the base MultiBERTs models (bert-base-uncased), with 5 coreference runs for each of 25 pretraining checkpoints.\ncda_intervention-50k: as above, but with 50k steps of CDA applied to each checkpoint. 5 coreference runs for each of 25 pretraining checkpoints, paired with base.\nfrom_scratch: trained from-scratch using CDA data. 5 coreference runs for each of 25 pretraining checkpoints, which are not paired with the above.\nbase_extra_seeds: 25 coreference runs for each of the first five pretraining seeds from base; used in Figure 6.\n\nFor each group, there are three files:\n* run_info.tsv: run information, with columns pretrain_seed and finetune_seed\n* label_info.tsv : labels and other metadata for each instance. 720 rows, \n one for each Winogender example.\n* preds.tsv: predictions on each instance, with rows aligned to those of\n run_info.tsv and 720 columns which align to the rows of label_info.tsv.\nThe values in preds.tsv represent the index of the predicted referent, so for\nWinogender this means:\n- 0 is the occupation term\n- 1 is the other_participant\nYou can also browse these files manually here: https://console.cloud.google.com/storage/browser/multiberts/public/example-predictions/coref", "#@title Download predictions and metadata\nscratch_dir = \"/tmp/multiberts_coref\"\nif not os.path.isdir(scratch_dir): \n os.mkdir(scratch_dir)\n \npreds_root = \"https://storage.googleapis.com/multiberts/public/example-predictions/coref\"\nGROUP_NAMES = [\n 'base',\n 'base_extra_seeds',\n 'cda_intervention-50k',\n 'from_scratch'\n]\nfor name in GROUP_NAMES:\n !mkdir -p $scratch_dir/$name\n for fname in ['label_info.tsv', 'preds.tsv', 'run_info.tsv']:\n !curl -s -O $preds_root/$name/$fname --output-dir $scratch_dir/$name\n\n# Fetch Winogender occupations data from official repo https://github.com/rudinger/winogender-schemas\n!curl -s -O https://raw.githubusercontent.com/rudinger/winogender-schemas/master/data/occupations-stats.tsv \\\n --output-dir $scratch_dir\n \n!ls $scratch_dir/**\n\n#@title Load run information\ndata_root = scratch_dir\n\nall_run_info = []\nfor group_name in GROUP_NAMES:\n run_info_path = os.path.join(data_root, group_name, \"run_info.tsv\")\n run_info = pd.read_csv(run_info_path, sep='\\t', index_col=0)\n run_info['group_name'] = group_name\n all_run_info.append(run_info)\n\nrun_info = pd.concat(all_run_info, axis=0, ignore_index=True)\nrun_info\n\n# Count the number of runs in each group\nrun_info.groupby(by='group_name').apply(len)\n\n#@title Load predictions\nall_preds = []\nfor group_name in GROUP_NAMES:\n preds_path = os.path.join(data_root, group_name, \"preds.tsv\")\n all_preds.append(np.loadtxt(preds_path))\n\npreds = np.concatenate(all_preds, axis=0)\npreds.shape\n\n#@title Load label info\nlabel_info_path = os.path.join(data_root, GROUP_NAMES[0], \"label_info.tsv\")\nlabel_info = pd.read_csv(label_info_path, sep='\\t', index_col=0)\n\nlabel_info", "Finally, load the occupations data from the U.S. Bureau of Labor Statistics, which we'll use to compute the bias correlation.", "#@title Load occupations data\noccupation_tsv_path = os.path.join(data_root, \"occupations-stats.tsv\")\n\n# Link to BLS data\noccupation_data = pd.read_csv(occupation_tsv_path, sep=\"\\t\").set_index(\"occupation\")\noccupation_pf = (occupation_data['bls_pct_female'] / 100.0).sort_index()\noccupation_pf", "Define metrics\nThe values in preds.tsv represent binary predictions about whether each of our models predicts that the pronoun corresponds to the occupation term (0) or the other participant (1) in each Winogender example.\nWith this, we can compute two metrics:\n- Accuracy against binary labels (whether the pronoun should refer to the occupation term, the answer column in label_info). For this, we'll run bootstrap over all 720 examples.\n- Correlation of bias score against each occupation's P(female), according to the U.S. Bureau of Labor Statistics. This is done as in Webster et al. 2020 and Rudinger et al. 2018: for each profession, we compute the fraction of time when female pronouns resolve to it, the fraction of time that male pronouns resolve to it, and take the bias score to be the difference of these two quantities. For this, we'll aggregate to the 60 occupations, then run bootstrap over the set of occupations.\nThese will be used inside the bootstrap, so get_accuracy(), get_bias_corr(), and get_bias_slope() should all take two arguments, aligned lists of labels and predictions.", "#@title Define metrics, test on one run\ndef get_accuracy(answers, binary_preds):\n return np.mean(answers == binary_preds)\n\ndef get_bias_score(preds_row):\n df = label_info.copy()\n df['pred_occupation'] = (preds_row == 0)\n m_pct = df[df[\"gender\"] == \"MASCULINE\"].groupby(by=\"occupation\")['pred_occupation'].agg('mean')\n f_pct = df[df[\"gender\"] == \"FEMININE\"].groupby(by=\"occupation\")['pred_occupation'].agg('mean')\n return (f_pct - m_pct).sort_index()\n\n# Ensure this aligns with result of get_bias_score\nsorted_occupations = sorted(list(label_info.occupation.unique()))\npf_bls = np.array([occupation_pf[occ] for occ in sorted_occupations])\n\ndef get_bias_corr_and_slope(pf_bls, bias_scores):\n lr = scipy.stats.linregress(pf_bls, bias_scores)\n return (lr.rvalue, lr.slope)\n\ndef get_bias_corr(pf_bls, bias_scores):\n return get_bias_corr_and_slope(pf_bls, bias_scores)[0]\n\ndef get_bias_slope(pf_bls, bias_scores):\n return get_bias_corr_and_slope(pf_bls, bias_scores)[1]\n\nprint(\"Accuracy:\" , get_accuracy(label_info['answer'], preds[0]))\nprint(\"Bias r, slope:\", get_bias_corr_and_slope(pf_bls, get_bias_score(preds[0])))", "Computing the bias scores can be slow because of the grouping operations, so we preprocess all runs before running the bootstrap. This gives us a [num_runs, 60] matrix, and we can compute the final bias correlation inside the multibootstrap routine.", "bias_scores = np.stack([get_bias_score(p) for p in preds], axis=0)\nbias_scores.shape", "Finally, attach these to the run info dataframe - this will make it easier to filter by row later.", "run_info['coref_preds'] = list(preds)\nrun_info['bias_scores'] = list(bias_scores)", "Plot overall scores for each group\nBefore we introduce the multibootstrap, let's get a high-level idea of what our metrics look like by just computing the mean scores for each group:", "run_info['accuracy'] = [get_accuracy(label_info['answer'], p) for p in preds]\nrs, slopes = zip(*[get_bias_corr_and_slope(pf_bls, bs) for bs in bias_scores])\nrun_info['bias_r'] = rs\nrun_info['bias_slope'] = slopes\n\nrun_info.groupby(by='group_name')[['accuracy', 'bias_r']].agg('mean')", "Note that accuracy is very similar across all groups, while - as we might expect - the bias correlation (bias_r) decreases significantly for the CDA runs.", "# Accuracy across runs\ndata = run_info[run_info.group_name == 'base']\ndesc = data.groupby(by='pretrain_seed').agg(dict(accuracy='mean')).describe()\nprint(f\"{desc.accuracy['mean']:.1%} +/- {desc.accuracy['std']:.1%}\")", "You can also check how much this varies by pretraining seed. As it turns out, not a lot. Here's a plot showing this for the base runs:", "#@title Accuracy variation by pretrain run\nfig = pyplot.figure(figsize=(15, 5))\nax = fig.gca()\nsns.boxplot(ax=ax, x='pretrain_seed', y='accuracy', data=run_info[run_info.group_name == 'base'])\nax.set_title(\"Accuracy variation by pretrain seed, base\")\nax.set_ylim(0, 1.0)\nax.axhline(0)", "As a quick check, we can permute the seeds and see if much changes about our estimate:", "# Accuracy across runs - randomized seed baseline\nrng = np.random.RandomState(42)\ndata = run_info[run_info.group_name == 'base'].copy()\nbs = data.accuracy.to_numpy()\ndata['accuracy_bs'] = rng.choice(bs, size=len(bs))\ndesc = data.groupby(by='pretrain_seed').agg(dict(accuracy_bs='mean')).describe()\nprint(f\"With replacement: {desc.accuracy_bs['mean']:.1%} +/- {desc.accuracy_bs['std']:.1%}\")\n\nrng = np.random.RandomState(42)\ndata = run_info[run_info.group_name == 'base'].copy()\nbs = data.accuracy.to_numpy()\nrng.shuffle(bs)\ndata['accuracy_bs'] = bs\ndesc = data.groupby(by='pretrain_seed').agg(dict(accuracy_bs='mean')).describe()\nprint(f\"Without replacement: {desc.accuracy_bs['mean']:.1%} +/- {desc.accuracy_bs['std']:.1%}\")", "Figure 5 (Appendix): Bias correlation for each pre-training seed\nLet's do the same as above, but for bias correlation. Again, this is on the whole run - no bootstrap yet - but should give us a sense of the variation you'd expect if you were to run this experiment ad-hoc on different pretraining seeds. As above, we'll just show the base runs:", "fig = pyplot.figure(figsize=(15, 7))\nax = fig.gca()\nbase = sns.boxplot(ax=ax, x='pretrain_seed', y='bias_r', data=run_info[run_info.group_name == 'base'], palette=['darkslategray'])\nax.set_title(\"Winogender bias correlation (r) by pretrain seed\")\nax.set_ylim(-0.2, 1.0)\nax.axhline(0)\n\nlegend_elements = [matplotlib.patches.Patch(facecolor='darkslategray', label='Base')]\nax.legend(handles=legend_elements, loc='upper right', fontsize=14)\n\nax.title.set_fontsize(16)\nax.set_xlabel(\"Pretraining Seed\", fontsize=14)\nax.tick_params(axis='x', labelsize=14)\nax.set_ylabel(\"Bias correlation (r)\", fontsize=14)\nax.tick_params(axis='y', labelsize=14)\n\n# Expected range of accuracy if we randomly sampled data\nimport scipy.stats\n[n/720.0 - 0.624 for n in scipy.stats.binom.interval(0.682, 720, 0.624)]", "Figure 3: Bias correlation by pretrain seed, base and CDA intervention\nNow let's compare the base runs to running CDA for 50k steps. Again, no bootstrap yet - just plotting scores on full runs, to get a sense of how much difference we might expect to see if we did this ad-hoc and measured the effect size of CDA using just a single pretraining run.", "expt_group = \"cda_intervention-50k\"\n\nfig = pyplot.figure(figsize=(15, 7))\nax = fig.gca()\nbase = sns.boxplot(ax=ax, x='pretrain_seed', y='bias_r', data=run_info[run_info.group_name == 'base'], palette=['darkslategray'])\nexpt = sns.boxplot(ax=ax, x='pretrain_seed', y='bias_r', data=run_info[run_info.group_name == expt_group], palette=['lightgray'])\nax.set_title(\"Winogender bias correlation (r) by pretrain seed\")\nax.set_ylim(-0.2, 1.0)\nax.axhline(0)\n\nlegend_elements = [matplotlib.patches.Patch(facecolor='darkslategray', label='Base'),\n matplotlib.patches.Patch(facecolor='lightgray', label='CDA-incr')]\nax.legend(handles=legend_elements, loc='upper right', fontsize=14)\n\nax.title.set_fontsize(16)\nax.set_xlabel(\"Pretraining Seed\", fontsize=14)\nax.tick_params(axis='x', labelsize=14)\nax.set_ylabel(\"Bias correlation (r)\", fontsize=14)\nax.tick_params(axis='y', labelsize=14)", "Appendix D: Cross-Seed Variation\nYou might ask: how much of this variation is actually due to the coreference task training? We can see decently large error bars for each pretraining seed above, and we only had five coreference runs each.\nOne simple test is to ignore the pretraining seed. We'll create groups by randomly sampling (with replacement) five runs from the set of runs we have, then looking at the variance in the metrics. We can see that for bias_r, the variance is about 4x as high when using the real seeds (stdev = 0.097 vs 0.049), suggesting that most of the variation does in fact come from pretraining variation.", "data = run_info[run_info.group_name == 'base'].copy()\nbs = data.bias_r.to_numpy()\nfor i in range (5):\n rng = np.random.RandomState(i)\n data[f'bias_r_bs_{i}'] = rng.choice(bs, size=len(bs))\n \ndata.groupby(by='pretrain_seed').agg('mean').describe().loc[['mean', 'std']]", "Figure 6: Extra task runs\nAnother way to test this is to look at the base_extra_seeds runs, where we ran 5 different pretraining seeds with 25 task runs. This gives us a better estimate of the mean for each pretraining seed.", "fig = pyplot.figure(figsize=(8, 7))\nax = fig.gca()\nsns.boxplot(ax=ax, x='pretrain_seed', y='bias_r', data=run_info[run_info.group_name == 'base_extra_seeds'])\nax.set_title(\"Bias variation by pretrain seed, base w/extra seeds\")\nax.set_ylim(-0.2, 1.0)\nax.axhline(0)\n\nax.title.set_fontsize(16)\nax.set_xlabel(\"Pretraining Seed\", fontsize=14)\nax.tick_params(axis='x', labelsize=14)\nax.set_ylabel(\"Bias correlation (r)\", fontsize=14)\n#ax.tick_params(axis='y', labelsize=14)", "Now we can also use the multibootstrap as a statistical test to check for differences between these seeds. We'll compare seed 0 to seed 1, and do an unpaired analysis:", "#@title Bootstrap to test if seed 1 is different from seed 0\nnum_bootstrap_samples = 1000 #@param {type: \"integer\"}\nrseed=42\n\nmask = (run_info.group_name == 'base_extra_seeds')\nmask &= (run_info.pretrain_seed == 0) | (run_info.pretrain_seed == 1)\nselected_runs = run_info[mask].copy()\n\n# Set intervention and seed columns\nselected_runs['intervention'] = (selected_runs.pretrain_seed == 1)\nselected_runs['seed'] = selected_runs.pretrain_seed\nprint(\"Available runs:\", len(selected_runs))\n\n##\n# Compute bias r\nprint(\"Computing bias r\")\nlabels = pf_bls.copy()\nprint(\"Labels:\", labels.dtype, labels.shape)\npreds = np.stack(selected_runs.bias_scores)\nprint(\"Preds:\", preds.dtype, preds.shape)\n\nmetric = get_bias_corr\nsamples = multibootstrap.multibootstrap(selected_runs, preds, labels,\n metric, nboot=num_bootstrap_samples,\n paired_seeds=False,\n rng=rseed,\n progress_indicator=tqdm)\n\nmultibootstrap.report_ci(samples, c=0.95, expect_negative_effect=False);", "Section 4.1 / Table 1: Paired analysis: base vs. CDA intervention\nWe've seen how much variation there can be across pretraining checkpoints, so let's use the multibootstrap to help us get a better estimate of the effectiveness of CDA. Here, we'll look at CDA for 50k steps as an intervention on the base checkpoints, and so we'll perform a paired analysis where we sample the same pretraining seeds from both sides.\nbase (L) is MultiBERTs following the original BERT recipe, and expt (L') has additional steps with counterfactual data applied to these same checkpoints. We have 25 pretraining seeds on base and the same 25 pretraining seeds on expt.", "num_bootstrap_samples = 1000 #@param {type: \"integer\"}\nrseed=42\n\nexpt_group = \"cda_intervention-50k\"\n\nmask = (run_info.group_name == 'base')\nmask |= (run_info.group_name == expt_group)\nselected_runs = run_info[mask].copy()\n\n# Set intervention and seed columns\nselected_runs['intervention'] = selected_runs.group_name == expt_group\nselected_runs['seed'] = selected_runs.pretrain_seed\nprint(\"Available runs:\", len(selected_runs))\n\nall_samples = {}\n\n##\n# Compute accuracy\nprint(\"Computing accuracy\")\nlabels = np.array(label_info['answer'])\nprint(\"Labels:\", labels.dtype, labels.shape)\npreds = np.stack(selected_runs.coref_preds)\nprint(\"Preds:\", preds.dtype, preds.shape)\n\nmetric = get_accuracy\nsamples = multibootstrap.multibootstrap(selected_runs, preds, labels,\n metric, nboot=num_bootstrap_samples,\n paired_seeds=True,\n rng=rseed,\n progress_indicator=tqdm)\nall_samples['accuracy'] = samples\nmultibootstrap.report_ci(all_samples['accuracy'], c=0.95, expect_negative_effect=True);\n\nprint()\n\n##\n# Compute bias r\nprint(\"Computing bias r\")\nlabels = pf_bls.copy()\nprint(\"Labels:\", labels.dtype, labels.shape)\npreds = np.stack(selected_runs.bias_scores)\nprint(\"Preds:\", preds.dtype, preds.shape)\n\nmetric = get_bias_corr\nsamples = multibootstrap.multibootstrap(selected_runs, preds, labels,\n metric, nboot=num_bootstrap_samples,\n paired_seeds=True,\n rng=rseed,\n progress_indicator=tqdm)\nall_samples['bias_r'] = samples\n\nmultibootstrap.report_ci(all_samples['bias_r'], c=0.95, expect_negative_effect=True);", "Plot result distribution\nIt can also be illustrative to look directly at the distribution of samples:", "#@title Bias r\ncolumns = ['Base', 'CDA intervention']\nvar_name = 'Group Name'\nval_name = \"Bias Correlation\"\nsamples = all_samples['bias_r']\n\nfig, axs = pyplot.subplots(1, 2, gridspec_kw=dict(width_ratios=[2, 1]), figsize=(15, 7))\n\nbdf = pd.DataFrame(samples, columns=columns).melt(var_name=var_name, value_name=val_name)\nbdf['x'] = 0\nfig = pyplot.figure(figsize=(10, 7))\nax = axs[0]\nsns.violinplot(ax=ax, x=var_name, y=val_name, data=bdf, inner='quartile')\nax.set_title(\"MultiBERTs CDA intervention - bias r\")\nax.axhline(0)\n\nvar_name = 'Pretraining Steps'\nval_name = \"Accuracy delta\"\nbdf = pd.DataFrame(samples, columns=columns)\nbdf['deltas'] = bdf['CDA intervention'] - bdf['Base']\nbdf = bdf.drop(axis=1, labels=columns).melt(var_name=var_name, value_name=val_name)\nbdf['x'] = 0\nax = axs[1]\nsns.violinplot(ax=ax, x=var_name, y=val_name, data=bdf, inner='quartile',\n palette='gray')\nax.set_title(\"MultiBERTs CDA intervention - bias r deltas\")\nax.axhline(0)\n\nmultibootstrap.report_ci(samples, c=0.95, expect_negative_effect=True);", "Section 4.2 / Table 2: Unpaired analysis: CDA intervention vs. CDA from-scratch\nHere, we'll compare our CDA 50k intervention to a set of models trained from-scratch with CDA data.\nbase (L) is the intevention CDA above, and expt (L') is a similar setup but pretraining from scratch with the counterfactually-augmented data. We have 25 pretraining seeds on base and 25 pretraining seeds on expt, but these are independent runs so we'll do an unpaired analysis.", "num_bootstrap_samples = 1000 #@param {type: \"integer\"}\nrseed=42\n\nbase_group = \"cda_intervention-50k\"\nexpt_group = \"from_scratch\"\n\nmask = (run_info.group_name == base_group)\nmask |= (run_info.group_name == expt_group)\nselected_runs = run_info[mask].copy()\n\n# Set intervention and seed columns\nselected_runs['intervention'] = selected_runs.group_name == expt_group\nselected_runs['seed'] = selected_runs.pretrain_seed\nprint(\"Available runs:\", len(selected_runs))\n\nall_samples = {}\n\n##\n# Compute accuracy\nprint(\"Computing accuracy\")\nlabels = np.array(label_info['answer'])\nprint(\"Labels:\", labels.dtype, labels.shape)\npreds = np.stack(selected_runs.coref_preds)\nprint(\"Preds:\", preds.dtype, preds.shape)\n\nmetric = get_accuracy\nsamples = multibootstrap.multibootstrap(selected_runs, preds, labels,\n metric, nboot=num_bootstrap_samples,\n paired_seeds=False,\n rng=rseed,\n progress_indicator=tqdm)\nall_samples['accuracy'] = samples\nmultibootstrap.report_ci(all_samples['accuracy'], c=0.95, expect_negative_effect=True);\n\nprint()\n\n##\n# Compute bias r\nprint(\"Computing bias r\")\nlabels = pf_bls.copy()\nprint(\"Labels:\", labels.dtype, labels.shape)\npreds = np.stack(selected_runs.bias_scores)\nprint(\"Preds:\", preds.dtype, preds.shape)\n\nmetric = get_bias_corr\nsamples = multibootstrap.multibootstrap(selected_runs, preds, labels,\n metric, nboot=num_bootstrap_samples,\n paired_seeds=False,\n rng=rseed,\n progress_indicator=tqdm)\nall_samples['bias_r'] = samples\n\nmultibootstrap.report_ci(all_samples['bias_r'], c=0.95, expect_negative_effect=True);\n\n#@title Bias r\ncolumns = ['CDA intervention', 'CDA from-scratch']\nvar_name = 'Group Name'\nval_name = \"Bias Correlation\"\nsamples = all_samples['bias_r']\n\nfig, axs = pyplot.subplots(1, 2, gridspec_kw=dict(width_ratios=[2, 1]), figsize=(15, 7))\n\nbdf = pd.DataFrame(samples, columns=columns).melt(var_name=var_name, value_name=val_name)\nbdf['x'] = 0\nax = axs[0]\nsns.violinplot(ax=ax, x=var_name, y=val_name, data=bdf, inner='quartile')\nax.set_title(\"MultiBERTs CDA intervention vs. from-scratch - bias r\")\nax.axhline(0)\n\nvar_name = 'Pretraining Steps'\nval_name = \"Accuracy delta\"\nbdf = pd.DataFrame(samples, columns=columns)\nbdf['deltas'] = bdf['CDA from-scratch'] - bdf['CDA intervention']\nbdf = bdf.drop(axis=1, labels=columns).melt(var_name=var_name, value_name=val_name)\nbdf['x'] = 0\nax = axs[1]\nsns.violinplot(ax=ax, x=var_name, y=val_name, data=bdf, inner='quartile',\n palette='gray')\nax.set_title(\"MultiBERTs CDA intervention vs. from-scratch - bias r deltas\")\nax.axhline(0)\n\nmultibootstrap.report_ci(samples, c=0.95, expect_negative_effect=True);", "Do we actually need to do the full multiboostrap, where we sample over both seeds and examples simultaneously? We can check this with ablations where we sample over one axis only:\n\nSeeds only (sample_examples=False)\nExamples only (sample_seeds=False)", "#@title As above, but sample seeds only\nrseed=42\n\nmetric = get_bias_corr\nsamples = multibootstrap.multibootstrap(selected_runs, preds, labels,\n metric, nboot=num_bootstrap_samples,\n rng=rseed,\n paired_seeds=False,\n sample_examples=False,\n progress_indicator=tqdm)\n\ncolumns = ['CDA intervention', 'CDA from-scratch']\nvar_name = 'Group Name'\nval_name = \"Bias Correlation\"\n\nfig, axs = pyplot.subplots(1, 2, gridspec_kw=dict(width_ratios=[2, 1]), figsize=(15, 7))\n\nbdf = pd.DataFrame(samples, columns=columns).melt(var_name=var_name, value_name=val_name)\nbdf['x'] = 0\nax = axs[0]\nsns.violinplot(ax=ax, x=var_name, y=val_name, data=bdf, inner='quartile')\nax.set_title(\"MultiBERTs CDA intervention vs. from-scratch - bias r\")\nax.axhline(0)\n\nvar_name = 'Pretraining Steps'\nval_name = \"Accuracy delta\"\nbdf = pd.DataFrame(samples, columns=columns)\nbdf['deltas'] = bdf['CDA from-scratch'] - bdf['CDA intervention']\nbdf = bdf.drop(axis=1, labels=columns).melt(var_name=var_name, value_name=val_name)\nbdf['x'] = 0\nax = axs[1]\nsns.violinplot(ax=ax, x=var_name, y=val_name, data=bdf, inner='quartile',\n palette='gray')\nax.set_title(\"MultiBERTs CDA intervention vs. from-scratch - bias r deltas\")\nax.axhline(0)\n\nmultibootstrap.report_ci(samples, c=0.95, expect_negative_effect=True);\n\n#@title As above, but sample examples only\nrseed=42\n\nmetric = get_bias_corr\nsamples = multibootstrap.multibootstrap(selected_runs, preds, labels,\n metric, nboot=num_bootstrap_samples,\n rng=rseed,\n paired_seeds=False,\n sample_seeds=False,\n progress_indicator=tqdm)\n\ncolumns = ['CDA intervention', 'CDA from-scratch']\nvar_name = 'Group Name'\nval_name = \"Bias Correlation\"\n\nfig, axs = pyplot.subplots(1, 2, gridspec_kw=dict(width_ratios=[2, 1]), figsize=(15, 7))\n\nbdf = pd.DataFrame(samples, columns=columns).melt(var_name=var_name, value_name=val_name)\nbdf['x'] = 0\nax = axs[0]\nsns.violinplot(ax=ax, x=var_name, y=val_name, data=bdf, inner='quartile')\nax.set_title(\"MultiBERTs CDA intervention vs. from-scratch - bias r\")\nax.axhline(0)\n\nvar_name = 'Pretraining Steps'\nval_name = \"Accuracy delta\"\nbdf = pd.DataFrame(samples, columns=columns)\nbdf['deltas'] = bdf['CDA from-scratch'] - bdf['CDA intervention']\nbdf = bdf.drop(axis=1, labels=columns).melt(var_name=var_name, value_name=val_name)\nbdf['x'] = 0\nax = axs[1]\nsns.violinplot(ax=ax, x=var_name, y=val_name, data=bdf, inner='quartile',\n palette='gray')\nax.set_title(\"MultiBERTs CDA intervention vs. from-scratch - bias r deltas\")\nax.axhline(0)\n\nmultibootstrap.report_ci(samples, c=0.95, expect_negative_effect=True);", "In both of the above, we get lower p-values - suggesting that if we don't account jointly for both sources of variation, we could end up making overly-confident conclusions about the difference between these methods." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
akloster/porekit-python
examples/squiggle_classifier_1/Read_Until_Efficiency.ipynb
isc
[ "Theoretical Efficiency of Read Until Enrichment\nThe \"Read Until\" feature of the Oxford Nanopore sequencing technology means a program can see the data coming in at each pore and, dependend on that data, reject the molecule inside a certain pore.\nThe actual performance of such a method depends on a lot of factors:\n\nratio of desireable over undesireable molecules in the sample\naccuracy of detection\nlength of event data necessary for the decision\nlatency of event data reaching the controlling program\ndelay between decision and ejecting the molecule\ntime until the pore can accept a new molecule\nlength of DNA strands in the sample\n\nIn this notebook I boiled it down to three parameters:\n\nham_frequency is the frequency of desired molecules\nham_duration is the scale by which the desired molecules are read \"longer\"\naccuracy is the accuracy of the classification\n\nThe analogy to spam detection is chosen because \"ham/spam\" makes for catchier variable names. This computation considers time and \"amount of data\" as equivalent. In reality, event speeds vary a lot, but in the long run, duration of reads and length of the strands correlate very strongly.\nThe result of the computation is the ratio of desired time/data over undesired time/data, which is hopefully higher than the original ham_frequency.", "import matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline\n\ndef sim_ru(ham_frequency, ham_duration, accuracy):\n # Monte-Carlo Style\n n = 1000000\n ham = np.random.random(size=n)<ham_frequency\n durations = np.ones(n)\n accurate = np.random.random(size=n)<accuracy\n durations[ham & accurate] = ham_duration\n durations[~ham & ~accurate] = ham_duration\n return (np.sum(durations[ham]) / np.sum(durations))\n\n\ndef sim_ru2(ham_frequency, ham_duration, accuracy):\n # exact calculation\n long = ((ham_frequency* accuracy) + (1-ham_frequency)*(1-accuracy)) * ham_duration\n short = ((ham_frequency* (1-accuracy)) + (1-ham_frequency)*(accuracy)) * 1.0\n ham = (ham_frequency* accuracy)*ham_duration + (ham_frequency* (1-accuracy))*1\n return ham / (long+short)\n\ndef make_plot(ham_frequency):\n f, ax = plt.subplots()\n f.set_figwidth(14)\n f.set_figheight(6)\n ax.set_ylim(0,1)\n x = np.arange(0.5, 1.0,0.001)\n y = np.zeros(len(x))\n handles = []\n for j in reversed([2.5,5,10,20,40]):\n for i in range(len(x)):\n y[i] = sim_ru2(ham_frequency, j, x[i])\n handles.append(ax.plot(x,y, label = \"%.1f\" % j))\n ax.grid()\n f.suptitle(\"Ratio of desired data over total data for different values of \\\"desired length\\\"/\\\"rejected length\\\" \")\n ax.legend(loc=0);\n ax.xaxis.set_label_text(\"Detection Accuracy\");\n ax.yaxis.set_label_text(\"Desired Output / Total Output\");", "50% ham in sample", "make_plot(0.5)", "10% ham in sample", "make_plot(0.1)", "1% ham in sample", "make_plot(0.01)", "Conclusions\nI hope this illustrates how to think about and design Read Until workflows.\nIn practical applications there will be tradeoffs between accuracy and the ham_duration: The more time the molecule has to spend inside the pore before ejection the higher the accuracy of the decision and the lower the ratio of the ham/spam duration.\nIt's also obvious that Read Until strongly favors long reads." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
david4096/bioapi-examples
python_notebooks/1kg_sequence_annotation_service.ipynb
apache-2.0
[ "GA4GH 1000 Genomes Sequence Annotations Example\nThis example illustrates how to access the sequence annotations for a given set of ....\nInitialize Client\nIn this step we create a client object which will be used to communicate with the server. It is initialized using the URL.", "from ga4gh.client import client\nc = client.HttpClient(\"http://1kgenomes.ga4gh.org\")\n\n#Obtain dataSet id REF: -> `1kg_metadata_service`\ndataset = c.search_datasets().next() ", "Search Feature Sets\nFeature sets are the logical containers for genomic features that might be defined in a GFF3, or other file that describes features in genomic coordinates. They are mapped to a single reference set, and belong to specific datasets.", "for feature_set in c.search_feature_sets(dataset_id=dataset.id):\n print feature_set\n if feature_set.name == \"gencode_v24lift37\":\n gencode = feature_set", "Get Feature Set by ID\nWith the identifier to a specific Feature Set, one can retrieve that feature set by ID.", "feature_set = c.get_feature_set(feature_set_id=gencode.id)\nprint feature_set", "Search Features\nWith a Feature Set ID, it becomes possible to construct a Search Features Request. In this request, we can find genomic features by position, type, or name. In this request we simply return all features in the Feature Set.", "counter = 0\nfor features in c.search_features(feature_set_id=feature_set.id):\n if counter > 3:\n break\n counter += 1\n print\"Id: {},\".format(features.id)\n print\" Name: {},\".format(features.name)\n print\" Gene Symbol: {},\".format(features.gene_symbol)\n print\" Parent Id: {},\".format(features.parent_id)\n if features.child_ids:\n for i in features.child_ids:\n print\" Child Ids: {}\".format(i)\n print\" Feature Set Id: {},\".format(features.feature_set_id)\n print\" Reference Name: {},\".format(features.reference_name)\n print\" Start: {},\\tEnd: {},\".format(features.start, features.end)\n print\" Strand: {},\".format(features.strand)\n print\" Feature Type Id: {},\".format(features.feature_type.id)\n print\" Feature Type Term: {},\".format(features.feature_type.term)\n print\" Feature Type Sorce Name: {},\".format(features.feature_type.source_name)\n print\" Feature Type Source Version: {}\\n\".format(features.feature_type.source_version)", "Note: Not all of the elements returned in the response are present in the example. All of the parameters will be shown in the get by id method.\nWe can perform a similar search, this time restricting to a specific genomic region.", "for feature in c.search_features(feature_set_id=feature_set.id, reference_name=\"chr17\", start=42000000, end=42001000):\n print feature.name, feature.start, feature.end\n\nfeature = c.get_feature(feature_id=features.id)\nprint\"Id: {},\".format(feature.id)\nprint\" Name: {},\".format(feature.name)\nprint\" Gene Symbol: {},\".format(feature.gene_symbol)\nprint\" Parent Id: {},\".format(feature.parent_id)\nif feature.child_ids:\n for i in feature.child_ids:\n print\" Child Ids: {}\".format(i)\nprint\" Feature Set Id: {},\".format(feature.feature_set_id)\nprint\" Reference Name: {},\".format(feature.reference_name)\nprint\" Start: {},\\tEnd: {},\".format(feature.start, feature.end)\nprint\" Strand: {},\".format(feature.strand)\nprint\" Feature Type Id: {},\".format(feature.feature_type.id)\nprint\" Feature Type Term: {},\".format(feature.feature_type.term)\nprint\" Feature Type Sorce Name: {},\".format(feature.feature_type.source_name)\nprint\" Feature Type Source Version: {}\\n\".format(feature.feature_type.source_version)\nfor vals in feature.attributes.vals:\n print\"{}: {}\".format(vals, feature.attributes.vals[vals].values[0].string_value)", "In this last call we represent all of the elements returned in the message.\nFor documentation in the service, and more information go to:\nhttps://ga4gh-schemas.readthedocs.io/en/latest/schemas/allele_annotation_service.proto.html" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
psychemedia/ou-robotics-vrep
robotVM/notebooks/Demo - Square 2 - Variables.ipynb
apache-2.0
[ "Traverse a Square - Part 2 - Variables\nIn this notebook, we will introduce one of the most powerful ideas in programming: the variable.\nA variable is a container that we can reference by name that is associated with a particular value. The value is assigned to the variable using the the = operator, which we might read as is set to the value of.\nFor example, consider the following assignment statement:\npython\nmessage=\"Hello World\"\nHere, we create a named container message and put the value Hello World into it.\nWhen we refer to the variable as part of another expression, we can then access the value it contains and use that in our expression, as the following example demonstrates:", "# Create the message variable and assign the value \"Hello World\" to it\nmessage=\"Hello World\"\n\n# Use the variable in a print statement\n# The print statement retrieves the value assigned to the variable and displays the value\nprint(message)", "Try changing the message in the previous code cell and re-running it. Does it behave as you expect?\nYou may remember from the Getting Started WIth Notebooks.ipynb notebook that if the last statement in a code cell returns a value, the value will be displayed as the output of the code cell when the cell contents have been executed.\nIf you place the name of a variable, or one or more comma separated variables, on the last line of a code cell, the value will be displayed.\nWhat do you think the output of the following cell will be? Run the cell to find out.", "message", "You can assign whatever object you like to a variable.\nFor example, we can assign numbers to them and do sums with them:", "#Assign raw numbers to variables\napples=5\noranges=10\n\n#Do a sum with the values represented by the variables and assign the result to a new variable\nitems_in_basket = apples + oranges\n\n#Display the resulting value as the cell output\nitems_in_basket", "See if you can add the count of a new set of purchases to the number of items in your basket in the cell above. For example, what if you also bought 3 pears. And a bunch of bananas.\nMaking Use of Variables\nLet's look back at our simple attempt at the square drawing program, in which we repeated blocks of instructions and set the numberical parameter values separately in each case.\nBefore we run the program, we need to load in the bits we need...", "%run 'Set-up.ipynb'\n%run 'Loading scenes.ipynb'\n%run 'vrep_models/PioneerP3DX.ipynb'", "The original programme appears in the code cell below. \n\nhow many changes would you have to make to it in order to change the side length?\ncan you see how you might be able to simplify the act of changing the side length?\nwhat would you need to change if you wanted to make the turns faster? Or slower?\n\nHINT: think variables...", "%%vrepsim '../scenes/OU_Pioneer.ttt' PioneerP3DX\nimport time\n\n#side 1\nrobot.move_forward()\ntime.sleep(1)\n#turn 1\nrobot.rotate_left(1.8)\ntime.sleep(0.45)\n#side 2\nrobot.move_forward()\ntime.sleep(1)\n#turn 2\nrobot.rotate_left(1.8)\ntime.sleep(0.45)\n#side 3\nrobot.move_forward()\ntime.sleep(1)\n#turn 3\nrobot.rotate_left(1.8)\ntime.sleep(0.45)\n#side 4\nrobot.move_forward()\ntime.sleep(1)", "Using the above programme as a guide, see if you can write a programme in the code cell below that makes it easier to maintin and simplifies the act of changing the numerical parameter values.", "%%vrepsim '../scenes/OU_Pioneer.ttt' PioneerP3DX\nimport time\n\n#YOUR CODE HERE", "How did you get on?\nHow easy is is to change the side length now? Or find a new combination of the turn speed and turn angle to turn through ninety degrees (or thereabouts?). Try it and see...\nHere's the programme I came up with: I used three variables, one for side length, one for turn time, and one for turn speed. Feel free to try running and modifying this programme too...", "%%vrepsim '../scenes/OU_Pioneer.ttt' PioneerP3DX\nimport time\n\nside_length_time=1\nturn_speed=1.8\nturn_time=0.45\n\n#side 1\nrobot.move_forward()\ntime.sleep(side_length_time)\n#turn 1\nrobot.rotate_left(turn_speed)\ntime.sleep(turn_time)\n#side 2\nrobot.move_forward()\ntime.sleep(side_length_time)\n#turn 2\nrobot.rotate_left(turn_speed)\ntime.sleep(turn_time)\n#side 3\nrobot.move_forward()\ntime.sleep(side_length_time)\n#turn 3\nrobot.rotate_left(turn_speed)\ntime.sleep(turn_time)\n#side 4\nrobot.move_forward()\ntime.sleep(side_length_time)", "In the next two notebooks on this theme, we'll see how to cut out some of the repetition." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
ES-DOC/esdoc-jupyterhub
notebooks/messy-consortium/cmip6/models/emac-2-53-aerchem/aerosol.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Aerosol\nMIP Era: CMIP6\nInstitute: MESSY-CONSORTIUM\nSource ID: EMAC-2-53-AERCHEM\nTopic: Aerosol\nSub-Topics: Transport, Emissions, Concentrations, Optical Radiative Properties, Model. \nProperties: 69 (37 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:10\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'messy-consortium', 'emac-2-53-aerchem', 'aerosol')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties\n2. Key Properties --&gt; Software Properties\n3. Key Properties --&gt; Timestep Framework\n4. Key Properties --&gt; Meteorological Forcings\n5. Key Properties --&gt; Resolution\n6. Key Properties --&gt; Tuning Applied\n7. Transport\n8. Emissions\n9. Concentrations\n10. Optical Radiative Properties\n11. Optical Radiative Properties --&gt; Absorption\n12. Optical Radiative Properties --&gt; Mixtures\n13. Optical Radiative Properties --&gt; Impact Of H2o\n14. Optical Radiative Properties --&gt; Radiative Scheme\n15. Optical Radiative Properties --&gt; Cloud Interactions\n16. Model \n1. Key Properties\nKey properties of the aerosol model\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of aerosol model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of aerosol model code", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.3. Scheme Scope\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nAtmospheric domains covered by the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.scheme_scope') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"troposhere\" \n# \"stratosphere\" \n# \"mesosphere\" \n# \"mesosphere\" \n# \"whole atmosphere\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.4. Basic Approximations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nBasic approximations made in the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.basic_approximations') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.5. Prognostic Variables Form\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nPrognostic variables in the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"3D mass/volume ratio for aerosols\" \n# \"3D number concenttration for aerosols\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "1.6. Number Of Tracers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of tracers in the aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.number_of_tracers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "1.7. Family Approach\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre aerosol calculations generalized into families of species?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.family_approach') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Software Properties\nSoftware properties of aerosol code\n2.1. Repository\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nLocation of code for this component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.2. Code Version\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCode version identifier.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2.3. Code Languages\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nCode language(s).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Timestep Framework\nPhysical properties of seawater in ocean\n3.1. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMathematical method deployed to solve the time evolution of the prognostic variables", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses atmospheric chemistry time stepping\" \n# \"Specific timestepping (operator splitting)\" \n# \"Specific timestepping (integrated)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3.2. Split Operator Advection Timestep\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTimestep for aerosol advection (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.3. Split Operator Physical Timestep\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTimestep for aerosol physics (in seconds).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.4. Integrated Timestep\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTimestep for the aerosol model (in seconds)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "3.5. Integrated Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSpecify the type of timestep scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Implicit\" \n# \"Semi-implicit\" \n# \"Semi-analytic\" \n# \"Impact solver\" \n# \"Back Euler\" \n# \"Newton Raphson\" \n# \"Rosenbrock\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Meteorological Forcings\n**\n4.1. Variables 3D\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nThree dimensionsal forcing variables, e.g. U, V, W, T, Q, P, conventive mass flux", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Variables 2D\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTwo dimensionsal forcing variables, e.g. land-sea mask definition", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Frequency\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nFrequency with which meteological forcings are applied (in seconds).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Resolution\nResolution in the aersosol model grid\n5.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Canonical Horizontal Resolution\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.3. Number Of Horizontal Gridpoints\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5.4. Number Of Vertical Levels\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nNumber of vertical levels resolved on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5.5. Is Adaptive Grid\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDefault is False. Set true if grid resolution changes during execution.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Tuning Applied\nTuning methodology for aerosol model\n6.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.2. Global Mean Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList set of metrics of the global mean state used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.3. Regional Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList of regional metrics of mean state used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6.4. Trend Metrics Used\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nList observed trend metrics used in tuning model/component", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Transport\nAerosol transport\n7.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of transport in atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod for aerosol transport modeling", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Specific transport scheme (eulerian)\" \n# \"Specific transport scheme (semi-lagrangian)\" \n# \"Specific transport scheme (eulerian and semi-lagrangian)\" \n# \"Specific transport scheme (lagrangian)\" \n# TODO - please enter value(s)\n", "7.3. Mass Conservation Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod used to ensure mass conservation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Mass adjustment\" \n# \"Concentrations positivity\" \n# \"Gradients monotonicity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "7.4. Convention\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nTransport by convention", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.transport.convention') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Uses Atmospheric chemistry transport scheme\" \n# \"Convective fluxes connected to tracers\" \n# \"Vertical velocities connected to tracers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8. Emissions\nAtmospheric aerosol emissions\n8.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of emissions in atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Method\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod used to define aerosol species (several methods allowed because the different species may not use the same method).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Prescribed (climatology)\" \n# \"Prescribed CMIP6\" \n# \"Prescribed above surface\" \n# \"Interactive\" \n# \"Interactive above surface\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.3. Sources\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nSources of the aerosol species are taken into account in the emissions scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.sources') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Vegetation\" \n# \"Volcanos\" \n# \"Bare ground\" \n# \"Sea surface\" \n# \"Lightning\" \n# \"Fires\" \n# \"Aircraft\" \n# \"Anthropogenic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.4. Prescribed Climatology\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify the climatology type for aerosol emissions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_climatology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Interannual\" \n# \"Annual\" \n# \"Monthly\" \n# \"Daily\" \n# TODO - please enter value(s)\n", "8.5. Prescribed Climatology Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and prescribed via a climatology", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.6. Prescribed Spatially Uniform Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and prescribed as spatially uniform", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.7. Interactive Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and specified via an interactive method", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.8. Other Emitted Species\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of aerosol species emitted and specified via an &quot;other method&quot;", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.other_emitted_species') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.9. Other Method Characteristics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nCharacteristics of the &quot;other method&quot; used for aerosol emissions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.emissions.other_method_characteristics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Concentrations\nAtmospheric aerosol concentrations\n9.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of concentrations in atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.2. Prescribed Lower Boundary\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed at the lower boundary.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.3. Prescribed Upper Boundary\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed at the upper boundary.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.4. Prescribed Fields Mmr\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed as mass mixing ratios.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9.5. Prescribed Fields Mmr\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nList of species prescribed as AOD plus CCNs.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Optical Radiative Properties\nAerosol optical and radiative properties\n10.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of optical and radiative properties", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11. Optical Radiative Properties --&gt; Absorption\nAbsortion properties in aerosol scheme\n11.1. Black Carbon\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAbsorption mass coefficient of black carbon at 550nm (if non-absorbing enter 0)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.2. Dust\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAbsorption mass coefficient of dust at 550nm (if non-absorbing enter 0)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.3. Organics\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nAbsorption mass coefficient of organics at 550nm (if non-absorbing enter 0)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "12. Optical Radiative Properties --&gt; Mixtures\n**\n12.1. External\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there external mixing with respect to chemical composition?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.2. Internal\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs there internal mixing with respect to chemical composition?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.3. Mixing Rule\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf there is internal mixing with respect to chemical composition then indicate the mixinrg rule", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Optical Radiative Properties --&gt; Impact Of H2o\n**\n13.1. Size\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes H2O impact size?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "13.2. Internal Mixture\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes H2O impact internal mixture?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "14. Optical Radiative Properties --&gt; Radiative Scheme\nRadiative scheme for aerosol\n14.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of radiative scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.2. Shortwave Bands\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of shortwave bands", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14.3. Longwave Bands\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of longwave bands", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15. Optical Radiative Properties --&gt; Cloud Interactions\nAerosol-cloud interactions\n15.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of aerosol-cloud interactions", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Twomey\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs the Twomey effect included?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.3. Twomey Minimum Ccn\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf the Twomey effect is included, then what is the minimum CCN number?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "15.4. Drizzle\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the scheme affect drizzle?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.5. Cloud Lifetime\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the scheme affect cloud lifetime?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "15.6. Longwave Bands\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of longwave bands", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "16. Model\nAerosol model\n16.1. Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of atmosperic aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16.2. Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProcesses included in the Aerosol model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Dry deposition\" \n# \"Sedimentation\" \n# \"Wet deposition (impaction scavenging)\" \n# \"Wet deposition (nucleation scavenging)\" \n# \"Coagulation\" \n# \"Oxidation (gas phase)\" \n# \"Oxidation (in cloud)\" \n# \"Condensation\" \n# \"Ageing\" \n# \"Advection (horizontal)\" \n# \"Advection (vertical)\" \n# \"Heterogeneous chemistry\" \n# \"Nucleation\" \n# TODO - please enter value(s)\n", "16.3. Coupling\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nOther model components coupled to the Aerosol model", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Radiation\" \n# \"Land surface\" \n# \"Heterogeneous chemistry\" \n# \"Clouds\" \n# \"Ocean\" \n# \"Cryosphere\" \n# \"Gas phase chemistry\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.4. Gas Phase Precursors\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of gas phase aerosol precursors.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.gas_phase_precursors') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"DMS\" \n# \"SO2\" \n# \"Ammonia\" \n# \"Iodine\" \n# \"Terpene\" \n# \"Isoprene\" \n# \"VOC\" \n# \"NOx\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.5. Scheme Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nType(s) of aerosol scheme used by the aerosols model (potentially multiple: some species may be covered by one type of aerosol scheme and other species covered by another type).", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Bulk\" \n# \"Modal\" \n# \"Bin\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "16.6. Bulk Scheme Species\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of species covered by the bulk scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.aerosol.model.bulk_scheme_species') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sulphate\" \n# \"Nitrate\" \n# \"Sea salt\" \n# \"Dust\" \n# \"Ice\" \n# \"Organic\" \n# \"Black carbon / soot\" \n# \"SOA (secondary organic aerosols)\" \n# \"POM (particulate organic matter)\" \n# \"Polar stratospheric ice\" \n# \"NAT (Nitric acid trihydrate)\" \n# \"NAD (Nitric acid dihydrate)\" \n# \"STS (supercooled ternary solution aerosol particule)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
Kulbear/deep-learning-nano-foundation
DLND-tv-script-generation/dlnd_tv_script_generation.ipynb
mit
[ "TV Script Generation\nIn this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern.\nGet the Data\nThe data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like \"Moe's Cavern\", \"Flaming Moe's\", \"Uncle Moe's Family Feed-Bag\", etc..", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport helper\n\ndata_dir = 'data/simpsons/moes_tavern_lines.txt'\ntext = helper.load_data(data_dir)\n# Ignore notice, since we don't use it for analysing the data\ntext = text[81:]\n\ntext[0:500]", "Explore the Data\nPlay around with view_sentence_range to view different parts of the data.", "view_sentence_range = (0, 10)\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport numpy as np\n\nprint('Dataset Stats')\nprint('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))\nscenes = text.split('\\n\\n')\nprint('Number of scenes: {}'.format(len(scenes)))\nsentence_count_scene = [scene.count('\\n') for scene in scenes]\nprint('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))\n\nsentences = [sentence for scene in scenes for sentence in scene.split('\\n')]\nprint('Number of lines: {}'.format(len(sentences)))\nword_count_sentence = [len(sentence.split()) for sentence in sentences]\nprint('Average number of words in each line: {}'.format(np.average(word_count_sentence)))\n\nprint()\nprint('The sentences {} to {}:'.format(*view_sentence_range))\nprint('\\n'.join(text.split('\\n')[view_sentence_range[0]:view_sentence_range[1]]))", "Implement Preprocessing Functions\nThe first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:\n- Lookup Table\n- Tokenize Punctuation\nLookup Table\nTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:\n- Dictionary to go from the words to an id, we'll call vocab_to_int\n- Dictionary to go from the id to word, we'll call int_to_vocab\nReturn these dictionaries in the following tuple (vocab_to_int, int_to_vocab)", "import numpy as np\nimport problem_unittests as tests\n\nfrom collections import Counter\n\ndef create_lookup_tables(text):\n \"\"\"\n Create lookup tables for vocabulary\n :param text: The text of tv scripts split into words\n :return: A tuple of dicts (vocab_to_int, int_to_vocab)\n \"\"\"\n counts = Counter(text)\n vocab = sorted(counts, key=counts.get, reverse=True)\n vocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)}\n int_to_vocab = {ii: word for ii, word in enumerate(vocab, 1)}\n \n return (vocab_to_int, int_to_vocab)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_create_lookup_tables(create_lookup_tables)", "Tokenize Punctuation\nWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word \"bye\" and \"bye!\".\nImplement the function token_lookup to return a dict that will be used to tokenize symbols like \"!\" into \"||Exclamation_Mark||\". Create a dictionary for the following symbols where the symbol is the key and value is the token:\n- Period ( . )\n- Comma ( , )\n- Quotation Mark ( \" )\n- Semicolon ( ; )\n- Exclamation mark ( ! )\n- Question mark ( ? )\n- Left Parentheses ( ( )\n- Right Parentheses ( ) )\n- Dash ( -- )\n- Return ( \\n )\nThis dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token \"dash\", try using something like \"||dash||\".", "def token_lookup():\n \"\"\"\n Generate a dict to turn punctuation into a token.\n :return: Tokenize dictionary where the key is the punctuation and the value is the token\n \"\"\"\n # TODO: Implement Function\n punct_list = {'.': '||period||', \n ',': '||comma||',\n '\"': '||quotation_mark||',\n ';': '||semicolon||',\n '!': '||exclamation_mark||',\n '?': '||question_mark||',\n '(': '||left_parentheses||',\n ')': '||right_parentheses||',\n '--': '||dash||',\n '\\n': '||return||'}\n return punct_list\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_tokenize(token_lookup)", "Preprocess all the data and save it\nRunning the code cell below will preprocess all the data and save it to file.", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# Preprocess Training, Validation, and Testing Data\nhelper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)", "Check Point\nThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport helper\nimport numpy as np\nimport problem_unittests as tests\n\nint_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()\n\nlen(int_text)", "Build the Neural Network\nYou'll build the components necessary to build a RNN by implementing the following functions below:\n- get_inputs\n- get_init_cell\n- get_embed\n- build_rnn\n- build_nn\n- get_batches\nCheck the Version of TensorFlow and Access to GPU", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nfrom distutils.version import LooseVersion\nimport warnings\nimport tensorflow as tf\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))", "Input\nImplement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:\n- Input text placeholder named \"input\" using the TF Placeholder name parameter.\n- Targets placeholder\n- Learning Rate placeholder\nReturn the placeholders in the following the tuple (Input, Targets, LearingRate)", "def get_inputs():\n \"\"\"\n Create TF Placeholders for input, targets, and learning rate.\n :return: Tuple (input, targets, learning rate)\n \"\"\"\n inputs = tf.placeholder(tf.int32, [None, None], name='input')\n targets = tf.placeholder(tf.int32, [None, None], name='targets')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n \n return (inputs, targets, learning_rate)\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_inputs(get_inputs)", "Build RNN Cell and Initialize\nStack one or more BasicLSTMCells in a MultiRNNCell.\n- The Rnn size should be set using rnn_size\n- Initalize Cell State using the MultiRNNCell's zero_state() function\n - Apply the name \"initial_state\" to the initial state using tf.identity()\nReturn the cell and initial state in the following tuple (Cell, InitialState)", "def get_init_cell(batch_size, rnn_size):\n \"\"\"\n Create an RNN Cell and initialize it.\n :param batch_size: Size of batches\n :param rnn_size: Size of RNNs\n :return: Tuple (cell, initialize state)\n \"\"\"\n lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)\n cell = tf.contrib.rnn.MultiRNNCell([lstm] * 2)\n \n initial_state = cell.zero_state(batch_size, tf.float32)\n initial_state = tf.identity(initial_state, name= \"initial_state\")\n \n return (cell, initial_state)\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_init_cell(get_init_cell)", "Word Embedding\nApply embedding to input_data using TensorFlow. Return the embedded sequence.", "def get_embed(input_data, vocab_size, embed_dim):\n \"\"\"\n Create embedding for <input_data>.\n :param input_data: TF placeholder for text input.\n :param vocab_size: Number of words in vocabulary.\n :param embed_dim: Number of embedding dimensions\n :return: Embedded input.\n \"\"\"\n embedding = tf.Variable(tf.truncated_normal((vocab_size, embed_dim), stddev=0.25))\n embed = tf.nn.embedding_lookup(embedding, input_data)\n \n return embed\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_embed(get_embed)", "Build RNN\nYou created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN.\n- Build the RNN using the tf.nn.dynamic_rnn()\n - Apply the name \"final_state\" to the final state using tf.identity()\nReturn the outputs and final_state state in the following tuple (Outputs, FinalState)", "def build_rnn(cell, inputs):\n \"\"\"\n Create a RNN using a RNN Cell\n :param cell: RNN Cell\n :param inputs: Input text data\n :return: Tuple (Outputs, Final State)\n \"\"\"\n outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)\n final_state = tf.identity(final_state, name=\"final_state\")\n \n return outputs, final_state\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_build_rnn(build_rnn)", "Build the Neural Network\nApply the functions you implemented above to:\n- Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function.\n- Build RNN using cell and your build_rnn(cell, inputs) function.\n- Apply a fully connected layer with a linear activation and vocab_size as the number of outputs.\nReturn the logits and final state in the following tuple (Logits, FinalState)", "def build_nn(cell, rnn_size, input_data, vocab_size):\n \"\"\"\n Build part of the neural network\n :param cell: RNN cell\n :param rnn_size: Size of rnns\n :param input_data: Input data\n :param vocab_size: Vocabulary size\n :return: Tuple (Logits, FinalState)\n \"\"\"\n inputs = get_embed(input_data, vocab_size, rnn_size)\n outputs, final_state = build_rnn(cell, inputs)\n logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=None)\n return logits, final_state\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_build_nn(build_nn)", "Batches\nImplement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements:\n- The first element is a single batch of input with the shape [batch size, sequence length]\n- The second element is a single batch of targets with the shape [batch size, sequence length]\nIf you can't fill the last batch with enough data, drop the last batch.\nFor exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3) would return a Numpy array of the following:\n```\n[\n # First Batch\n [\n # Batch of Input\n [[ 1 2 3], [ 7 8 9]],\n # Batch of targets\n [[ 2 3 4], [ 8 9 10]]\n ],\n# Second Batch\n [\n # Batch of Input\n [[ 4 5 6], [10 11 12]],\n # Batch of targets\n [[ 5 6 7], [11 12 13]]\n ]\n]\n```", "def get_batches(int_text, batch_size, seq_length):\n \"\"\"\n Return batches of input and target\n :param int_text: Text with the words replaced by their ids\n :param batch_size: The size of batch\n :param seq_length: The length of sequence\n :return: Batches as a Numpy array\n \"\"\"\n slice_size = batch_size * seq_length\n n_batches = len(int_text) // slice_size\n \n # We will drop the last few words to keep the batches in equal size\n used_data = int_text[0:n_batches * slice_size + 1]\n batches = []\n\n for i in range(n_batches):\n input_batch = []\n target_batch = []\n \n for j in range(batch_size):\n start_idx = i * batch_size + j * seq_length\n end_idx = i * batch_size + (j + 1) * seq_length\n \n input_batch.append(used_data[start_idx: end_idx])\n target_batch.append(used_data[start_idx + 1: end_idx + 1])\n \n batches.append([input_batch, target_batch])\n\n return np.array(batches)\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_batches(get_batches)", "Neural Network Training\nHyperparameters\nTune the following parameters:\n\nSet num_epochs to the number of epochs.\nSet batch_size to the batch size.\nSet rnn_size to the size of the RNNs.\nSet seq_length to the length of sequence.\nSet learning_rate to the learning rate.\nSet show_every_n_batches to the number of batches the neural network should print progress.", "# Number of Epochs\nnum_epochs = 50\n# Batch Size\nbatch_size = 128\n# RNN Size\nrnn_size = 1024\n# Sequence Length\nseq_length = 16\n# Learning Rate\nlearning_rate = 0.001\n# Show stats for every n number of batches\nshow_every_n_batches = 11\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\nsave_dir = './save'", "Build the Graph\nBuild the graph using the neural network you implemented.", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nfrom tensorflow.contrib import seq2seq\n\ntrain_graph = tf.Graph()\nwith train_graph.as_default():\n vocab_size = len(int_to_vocab)\n input_text, targets, lr = get_inputs()\n input_data_shape = tf.shape(input_text)\n cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)\n logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)\n\n # Probabilities for generating words\n probs = tf.nn.softmax(logits, name='probs')\n\n # Loss function\n cost = seq2seq.sequence_loss(\n logits,\n targets,\n tf.ones([input_data_shape[0], input_data_shape[1]])\n )\n\n # Optimizer\n optimizer = tf.train.AdamOptimizer(lr)\n\n # Gradient Clipping\n gradients = optimizer.compute_gradients(cost)\n capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]\n train_op = optimizer.apply_gradients(capped_gradients)", "Train\nTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nbatches = get_batches(int_text, batch_size, seq_length)\n\nwith tf.Session(graph=train_graph) as sess:\n sess.run(tf.global_variables_initializer())\n\n for epoch_i in range(num_epochs):\n state = sess.run(initial_state, {input_text: batches[0][0]})\n\n for batch_i, (x, y) in enumerate(batches):\n feed = {\n input_text: x,\n targets: y,\n initial_state: state,\n lr: learning_rate}\n train_loss, state, _ = sess.run([cost, final_state, train_op], feed)\n\n # Show every <show_every_n_batches> batches\n if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:\n print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(\n epoch_i,\n batch_i,\n len(batches),\n train_loss))\n\n # Save Model\n saver = tf.train.Saver()\n saver.save(sess, save_dir)\n print('Model Trained and Saved')", "Save Parameters\nSave seq_length and save_dir for generating a new TV script.", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# Save parameters for checkpoint\nhelper.save_params((seq_length, save_dir))", "Checkpoint", "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport helper\nimport problem_unittests as tests\n\n_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()\nseq_length, load_dir = helper.load_params()", "Implement Generate Functions\nGet Tensors\nGet tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names:\n- \"input:0\"\n- \"initial_state:0\"\n- \"final_state:0\"\n- \"probs:0\"\nReturn the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)", "def get_tensors(loaded_graph):\n \"\"\"\n Get input, initial state, final state, and probabilities tensor from <loaded_graph>\n :param loaded_graph: TensorFlow graph loaded from file\n :return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)\n \"\"\"\n inputs = loaded_graph.get_tensor_by_name(\"input:0\")\n initial_state = loaded_graph.get_tensor_by_name(\"initial_state:0\")\n final_state = loaded_graph.get_tensor_by_name(\"final_state:0\")\n probs = loaded_graph.get_tensor_by_name(\"probs:0\")\n \n return (inputs, initial_state, final_state, probs)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_get_tensors(get_tensors)", "Choose Word\nImplement the pick_word() function to select the next word using probabilities.", "from random import randint\n\ndef pick_word(probabilities, int_to_vocab):\n \"\"\"\n Pick the next word in the generated text\n :param probabilities: Probabilites of the next word\n :param int_to_vocab: Dictionary of word ids as the keys and words as the values\n :return: String of the predicted word\n \"\"\"\n return int_to_vocab[np.argmax(probabilities)]\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_pick_word(pick_word)", "Generate TV Script\nThis will generate the TV script for you. Set gen_length to the length of TV script you want to generate.", "gen_length = 300\n# homer_simpson, moe_szyslak, or Barney_Gumble\nprime_word = 'moe_szyslak'\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\nloaded_graph = tf.Graph()\nwith tf.Session(graph=loaded_graph) as sess:\n # Load saved model\n loader = tf.train.import_meta_graph(load_dir + '.meta')\n loader.restore(sess, load_dir)\n\n # Get Tensors from loaded model\n input_text, initial_state, final_state, probs = get_tensors(loaded_graph)\n\n # Sentences generation setup\n gen_sentences = [prime_word + ':']\n prev_state = sess.run(initial_state, {input_text: np.array([[1]])})\n\n # Generate sentences\n for n in range(gen_length):\n # Dynamic Input\n dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]\n dyn_seq_length = len(dyn_input[0])\n\n # Get Prediction\n probabilities, prev_state = sess.run(\n [probs, final_state],\n {input_text: dyn_input, initial_state: prev_state})\n \n pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)\n\n gen_sentences.append(pred_word)\n \n # Remove tokens\n tv_script = ' '.join(gen_sentences)\n for key, token in token_dict.items():\n ending = ' ' if key in ['\\n', '(', '\"'] else ''\n tv_script = tv_script.replace(' ' + token.lower(), key)\n tv_script = tv_script.replace('\\n ', '\\n')\n tv_script = tv_script.replace('( ', '(')\n \n print(tv_script)", "The TV Script is Nonsensical\nIt's ok if the TV script doesn't make any sense. We trained on less than a megabyte of text. In order to get good results, you'll have to use a smaller vocabulary or get more data. Luckly there's more data! As we mentioned in the begging of this project, this is a subset of another dataset. We didn't have you train on all the data, because that would take too long. However, you are free to train your neural network on all the data. After you complete the project, of course.\nSubmitting This Project\nWhen submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as \"dlnd_tv_script_generation.ipynb\" and save it as a HTML file under \"File\" -> \"Download as\". Include the \"helper.py\" and \"problem_unittests.py\" files in your submission." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
JorisBolsens/PYNQ
Pynq-Z1/notebooks/examples/video_filters.ipynb
bsd-3-clause
[ "Software Grayscale and Sobel filters on HDMI input\nThis example notebook will demonstrate two image filters using a snapshot from the HDMI input: <br>\n1. First, a frame is read from HDMI input\n2. That image is saved and displayed in the notebook\n3. Some simple Python pixel-level image processing is done (Gray Scale conversion, and Sobel filter)\n1. Start the HDMI input\nAn HDMI input source is required for this example. This should be on, and connected to the board before running the code below.", "from pynq import Overlay\nfrom pynq.drivers import Frame, HDMI\nfrom IPython.display import Image\n\nOverlay('base.bit').download()\nhdmi=HDMI('in')\nhdmi.start()", "2. Save frame and display JPG", "frame = hdmi.frame()\norig_img_path = '/home/xilinx/jupyter_notebooks/examples/data/orig.jpg'\nframe.save_as_jpeg(orig_img_path)\nImage(filename=orig_img_path)", "3. Gray Scale filter\nThis cell should take ~50s to complete. Note that there are better ways (e.g., openCV, etc.) to do grayscale conversion, but this is just an example of doing that without using any additional library.", "from pynq.drivers.video import MAX_FRAME_WIDTH\n\nframe_i = frame.frame\n\nheight = hdmi.frame_height()\nwidth = hdmi.frame_width()\n\nfor y in range(0, height):\n for x in range(0, width):\n offset = 3 * (y * MAX_FRAME_WIDTH + x)\n gray = round((0.299*frame_i[offset+2]) + \n (0.587*frame_i[offset+0]) +\n (0.114*frame_i[offset+1]))\n frame_i[offset:offset+3] = gray,gray,gray\n\ngray_img_path = '/home/xilinx/jupyter_notebooks/examples/data/gray.jpg'\nframe.save_as_jpeg(gray_img_path)\nImage(filename=gray_img_path)", "4. Sobel filter\nThis cell should take ~80s to complete. Note that there are better ways (e.g., openCV, etc.) to do sobel filter, but this is just an example of doing that without using any additional library.\nCompute the Sobel Filter output with sobel operator:\n$G_x=\n\\begin{bmatrix}\n-1 & 0 & +1 \\\n-2 & 0 & +2 \\\n-1 & 0 & +1\n\\end{bmatrix}\n$\n$G_y=\n\\begin{bmatrix}\n+1 & +2 & +1 \\\n0 & 0 & 0 \\\n-1 & -2 & -1\n\\end{bmatrix}\n$", "height = 1080\nwidth = 1920\nsobel = Frame(1920, 1080)\nframe_i = frame.frame\n\nfor y in range(1,height-1):\n for x in range(1,width-1):\n offset = 3 * (y * MAX_FRAME_WIDTH + x)\n upper_row_offset = offset - MAX_FRAME_WIDTH*3\n lower_row_offset = offset + MAX_FRAME_WIDTH*3 \n gx = abs(-frame_i[lower_row_offset-3] + frame_i[lower_row_offset+3] -\n 2*frame_i[offset-3] + 2*frame_i[offset+3] -\n frame_i[upper_row_offset-3] + frame_i[upper_row_offset+3])\n gy = abs(frame_i[lower_row_offset-3] + 2*frame_i[lower_row_offset] + \n frame_i[lower_row_offset+3] - frame_i[upper_row_offset-3] -\n 2*frame_i[upper_row_offset] - frame_i[upper_row_offset+3]) \n grad = min(gx + gy,255) \n sobel.frame[offset:offset+3] = grad,grad,grad\n \nsobel_img_path = '/home/xilinx/jupyter_notebooks/examples/data/sobel.jpg'\nsobel.save_as_jpeg(sobel_img_path)\nImage(filename=sobel_img_path)", "5: Free up space", "hdmi.stop()\n\ndel sobel\ndel hdmi" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ES-DOC/esdoc-jupyterhub
notebooks/inm/cmip6/models/inm-cm4-8/seaice.ipynb
gpl-3.0
[ "ES-DOC CMIP6 Model Properties - Seaice\nMIP Era: CMIP6\nInstitute: INM\nSource ID: INM-CM4-8\nTopic: Seaice\nSub-Topics: Dynamics, Thermodynamics, Radiative Processes. \nProperties: 80 (63 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:04\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook", "# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'inm', 'inm-cm4-8', 'seaice')", "Document Authors\nSet document authors", "# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Contributors\nSpecify document contributors", "# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)", "Document Publication\nSpecify document publication status", "# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)", "Document Table of Contents\n1. Key Properties --&gt; Model\n2. Key Properties --&gt; Variables\n3. Key Properties --&gt; Seawater Properties\n4. Key Properties --&gt; Resolution\n5. Key Properties --&gt; Tuning Applied\n6. Key Properties --&gt; Key Parameter Values\n7. Key Properties --&gt; Assumptions\n8. Key Properties --&gt; Conservation\n9. Grid --&gt; Discretisation --&gt; Horizontal\n10. Grid --&gt; Discretisation --&gt; Vertical\n11. Grid --&gt; Seaice Categories\n12. Grid --&gt; Snow On Seaice\n13. Dynamics\n14. Thermodynamics --&gt; Energy\n15. Thermodynamics --&gt; Mass\n16. Thermodynamics --&gt; Salt\n17. Thermodynamics --&gt; Salt --&gt; Mass Transport\n18. Thermodynamics --&gt; Salt --&gt; Thermodynamics\n19. Thermodynamics --&gt; Ice Thickness Distribution\n20. Thermodynamics --&gt; Ice Floe Size Distribution\n21. Thermodynamics --&gt; Melt Ponds\n22. Thermodynamics --&gt; Snow Processes\n23. Radiative Processes \n1. Key Properties --&gt; Model\nName of seaice model used.\n1.1. Model Overview\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nOverview of sea ice model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.model.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "1.2. Model Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nName of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.)", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.model.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "2. Key Properties --&gt; Variables\nList of prognostic variable in the sea ice model.\n2.1. Prognostic\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList of prognostic variables in the sea ice component.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.variables.prognostic') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Sea ice temperature\" \n# \"Sea ice concentration\" \n# \"Sea ice thickness\" \n# \"Sea ice volume per grid cell area\" \n# \"Sea ice u-velocity\" \n# \"Sea ice v-velocity\" \n# \"Sea ice enthalpy\" \n# \"Internal ice stress\" \n# \"Salinity\" \n# \"Snow temperature\" \n# \"Snow depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3. Key Properties --&gt; Seawater Properties\nProperties of seawater relevant to sea ice\n3.1. Ocean Freezing Point\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nEquation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TEOS-10\" \n# \"Constant\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "3.2. Ocean Freezing Point Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant seawater freezing point, specify this value.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "4. Key Properties --&gt; Resolution\nResolution of the sea ice grid\n4.1. Name\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.2. Canonical Horizontal Resolution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "4.3. Number Of Horizontal Gridpoints\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "5. Key Properties --&gt; Tuning Applied\nTuning applied to sea ice model component\n5.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.2. Target\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.target') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.3. Simulations\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\n*Which simulations had tuning applied, e.g. all, not historical, only pi-control? *", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.4. Metrics Used\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList any observed metrics used in tuning model/parameters", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "5.5. Variables\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nWhich variables were changed during the tuning process?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "6. Key Properties --&gt; Key Parameter Values\nValues of key parameters\n6.1. Typical Parameters\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nWhat values were specificed for the following parameters if used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ice strength (P*) in units of N m{-2}\" \n# \"Snow conductivity (ks) in units of W m{-1} K{-1} \" \n# \"Minimum thickness of ice created in leads (h0) in units of m\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "6.2. Additional Parameters\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N\nIf you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7. Key Properties --&gt; Assumptions\nAssumptions made in the sea ice model\n7.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nGeneral overview description of any key assumptions made in this model.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.description') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.2. On Diagnostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nNote any assumptions that specifically affect the CMIP6 diagnostic sea ice variables.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "7.3. Missing Processes\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nList any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8. Key Properties --&gt; Conservation\nConservation in the sea ice component\n8.1. Description\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nProvide a general description of conservation methodology.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.2. Properties\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nProperties conserved in sea ice by the numerical schemes.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.properties') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Energy\" \n# \"Mass\" \n# \"Salt\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "8.3. Budget\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nFor each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.budget') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "8.4. Was Flux Correction Used\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes conservation involved flux correction?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "8.5. Corrected Conserved Prognostic Variables\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nList any variables which are conserved by more than the numerical scheme alone.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "9. Grid --&gt; Discretisation --&gt; Horizontal\nSea ice discretisation in the horizontal\n9.1. Grid\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nGrid on which sea ice is horizontal discretised?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ocean grid\" \n# \"Atmosphere Grid\" \n# \"Own Grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.2. Grid Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the type of sea ice grid?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Structured grid\" \n# \"Unstructured grid\" \n# \"Adaptive grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.3. Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the advection scheme?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Finite differences\" \n# \"Finite elements\" \n# \"Finite volumes\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "9.4. Thermodynamics Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the time step in the sea ice model thermodynamic component in seconds.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "9.5. Dynamics Time Step\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the time step in the sea ice model dynamic component in seconds.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "9.6. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional horizontal discretisation details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "10. Grid --&gt; Discretisation --&gt; Vertical\nSea ice vertical properties\n10.1. Layering\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhat type of sea ice vertical layers are implemented for purposes of thermodynamic calculations?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Zero-layer\" \n# \"Two-layers\" \n# \"Multi-layers\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "10.2. Number Of Layers\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using multi-layers specify how many.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "10.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional vertical grid details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11. Grid --&gt; Seaice Categories\nWhat method is used to represent sea ice categories ?\n11.1. Has Mulitple Categories\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nSet to true if the sea ice model has multiple sea ice categories.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "11.2. Number Of Categories\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using sea ice categories specify how many.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "11.3. Category Limits\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIf using sea ice categories specify each of the category limits.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.4. Ice Thickness Distribution Scheme\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the sea ice thickness distribution scheme", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "11.5. Other\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.seaice_categories.other') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12. Grid --&gt; Snow On Seaice\nSnow on sea ice details\n12.1. Has Snow On Ice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nIs snow on ice represented in this model?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "12.2. Number Of Snow Levels\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nNumber of vertical levels of snow on ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "12.3. Snow Fraction\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe how the snow fraction on sea ice is determined", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "12.4. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nSpecify any additional details related to snow on ice.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "13. Dynamics\nSea Ice Dynamics\n13.1. Horizontal Transport\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of horizontal advection of sea ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.horizontal_transport') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Incremental Re-mapping\" \n# \"Prather\" \n# \"Eulerian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.2. Transport In Thickness Space\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of sea ice transport in thickness space (i.e. in thickness categories)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Incremental Re-mapping\" \n# \"Prather\" \n# \"Eulerian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.3. Ice Strength Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhich method of sea ice strength formulation is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Hibler 1979\" \n# \"Rothrock 1975\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.4. Redistribution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhich processes can redistribute sea ice (including thickness)?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.redistribution') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Rafting\" \n# \"Ridging\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "13.5. Rheology\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nRheology, what is the ice deformation formulation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.dynamics.rheology') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Free-drift\" \n# \"Mohr-Coloumb\" \n# \"Visco-plastic\" \n# \"Elastic-visco-plastic\" \n# \"Elastic-anisotropic-plastic\" \n# \"Granular\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14. Thermodynamics --&gt; Energy\nProcesses related to energy in sea ice thermodynamics\n14.1. Enthalpy Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the energy formulation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pure ice latent heat (Semtner 0-layer)\" \n# \"Pure ice latent and sensible heat\" \n# \"Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)\" \n# \"Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.2. Thermal Conductivity\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat type of thermal conductivity is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pure ice\" \n# \"Saline ice\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.3. Heat Diffusion\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of heat diffusion?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Conduction fluxes\" \n# \"Conduction and radiation heat fluxes\" \n# \"Conduction, radiation and latent heat transport\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.4. Basal Heat Flux\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod by which basal ocean heat flux is handled?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Heat Reservoir\" \n# \"Thermal Fixed Salinity\" \n# \"Thermal Varying Salinity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "14.5. Fixed Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "14.6. Heat Content Of Precipitation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method by which the heat content of precipitation is handled.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "14.7. Precipitation Effects On Salinity\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15. Thermodynamics --&gt; Mass\nProcesses related to mass in sea ice thermodynamics\n15.1. New Ice Formation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method by which new sea ice is formed in open water.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.2. Ice Vertical Growth And Melt\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method that governs the vertical growth and melt of sea ice.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.3. Ice Lateral Melting\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the method of sea ice lateral melting?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Floe-size dependent (Bitz et al 2001)\" \n# \"Virtual thin ice melting (for single-category)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "15.4. Ice Surface Sublimation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method that governs sea ice surface sublimation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "15.5. Frazil Ice\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDescribe the method of frazil ice formation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "16. Thermodynamics --&gt; Salt\nProcesses related to salt in sea ice thermodynamics.\n16.1. Has Multiple Sea Ice Salinities\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "16.2. Sea Ice Salinity Thermal Impacts\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nDoes sea ice salinity impact the thermal properties of sea ice?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "17. Thermodynamics --&gt; Salt --&gt; Mass Transport\nMass transport of salt\n17.1. Salinity Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is salinity determined in the mass transport of salt calculation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Prescribed salinity profile\" \n# \"Prognostic salinity profile\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "17.2. Constant Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant salinity value specify this value in PSU?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "17.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the salinity profile used.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "18. Thermodynamics --&gt; Salt --&gt; Thermodynamics\nSalt thermodynamics\n18.1. Salinity Type\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is salinity determined in the thermodynamic calculation?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Prescribed salinity profile\" \n# \"Prognostic salinity profile\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "18.2. Constant Salinity Value\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nIf using a constant salinity value specify this value in PSU?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n", "18.3. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the salinity profile used.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "19. Thermodynamics --&gt; Ice Thickness Distribution\nIce thickness distribution details.\n19.1. Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is the sea ice thickness distribution represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Virtual (enhancement of thermal conductivity, thin ice melting)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20. Thermodynamics --&gt; Ice Floe Size Distribution\nIce floe-size distribution details.\n20.1. Representation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nHow is the sea ice floe-size represented?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Explicit\" \n# \"Parameterised\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "20.2. Additional Details\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nPlease provide further details on any parameterisation of floe-size.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "21. Thermodynamics --&gt; Melt Ponds\nCharacteristics of melt ponds.\n21.1. Are Included\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nAre melt ponds included in the sea ice model?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "21.2. Formulation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat method of melt pond formulation is used?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Flocco and Feltham (2010)\" \n# \"Level-ice melt ponds\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "21.3. Impacts\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nWhat do melt ponds have an impact on?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Albedo\" \n# \"Freshwater\" \n# \"Heat\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "22. Thermodynamics --&gt; Snow Processes\nThermodynamic processes in snow on sea ice\n22.1. Has Snow Aging\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSet to True if the sea ice model has a snow aging scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "22.2. Snow Aging Scheme\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the snow aging scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.3. Has Snow Ice Formation\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nSet to True if the sea ice model has snow ice formation.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n", "22.4. Snow Ice Formation Scheme\nIs Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1\nDescribe the snow ice formation scheme.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.5. Redistribution\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the impact of ridging on snow cover?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n", "22.6. Heat Diffusion\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nWhat is the heat diffusion through snow methodology in sea ice thermodynamics?", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Single-layered heat diffusion\" \n# \"Multi-layered heat diffusion\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23. Radiative Processes\nSea Ice Radiative Processes\n23.1. Surface Albedo\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1\nMethod used to handle surface albedo.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.radiative_processes.surface_albedo') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Delta-Eddington\" \n# \"Parameterized\" \n# \"Multi-band albedo\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "23.2. Ice Radiation Transmission\nIs Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N\nMethod by which solar radiation through sea ice is handled.", "# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Delta-Eddington\" \n# \"Exponential attenuation\" \n# \"Ice radiation transmission per category\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n", "©2017 ES-DOC" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
sailuh/perceive
Parsers/SecLists/Reply-Parse.ipynb
gpl-2.0
[ "Seclists reply parse\nExample: http://seclists.org/fulldisclosure/2017/Jan/0\nWith each reply, we'll attempt to parse out the following:\n* raw reply text, without html tags\n * the reply text with any signatures stripped out\n* an analysis of what html tags are in the message\n* a listing of which domains are referenced in links in the message", "import re\nimport requests\n\nfrom bs4 import BeautifulSoup", "We'll gather the contents of a single message. 2017_Jan_0 is one that includes a personal signature, as well as the standard Full Disclosure footer.\n2017_Jan_45 is a message that includes a PGP signature.", "year = '2005'\nmonth = 'Jan'\nid = '0'\nurl = 'http://seclists.org/fulldisclosure/' + year + '/' + month + '/' + id\n\nr = requests.get(url)\ncontent = r.text\nfrom IPython.display import Pretty\nPretty(content)", "Each message in the FD list is wrapped in seclists.org code, including navigation, ads, and trackers, all irrelevant to us. The body of the reply is contained between two comments, &lt;!--X-Body-of-Message--&gt; and &lt;!--X-Body-of-Message-End--&gt;.\nBeautifulSoup isn't great at handling comments, so we first use simple indexing to extract the relevant chars. We'll then send it through BeautifulSoup so we can use its .text property to strip out the html tags. BS4 automatically adds tags to create valid html, so remember to parse using the generated &lt;body&gt; tags.\nWhat we end up with is a plaintext version of the message's body.", "start = content.index('<!--X-Body-of-Message-->') + 24\nend = content.index('<!--X-Body-of-Message-End-->')\nbody = content[start:end]\n\nsoup = BeautifulSoup(body, 'html5lib')\nbodyhtml = soup.find('body')\nraw = bodyhtml.text\nPretty(raw)", "Signature extraction\nMessages to the FD list usually end with a common footer:\n2002-2005:\n_______________________________________________\nFull-Disclosure - We believe in it.\nCharter: http://lists.netsys.com/full-disclosure-charter.html\n2005-2014:\n_______________________________________________\nFull-Disclosure - We believe in it.\nCharter: http://lists.grok.org.uk/full-disclosure-charter.html\nHosted and sponsored by Secunia - http://secunia.com/\n2014-onward:\n_______________________________________________\nSent through the Full Disclosure mailing list\nhttp://nmap.org/mailman/listinfo/fulldisclosure\nWeb Archives &amp; RSS: http://seclists.org/fulldisclosure/\nWe'll look for the first line (47 underscores), then test the lines below to make sure it's a match. If so, we'll strip out that footer from our content.", "workcopy = raw\nfooters = [m.start() for m in re.finditer('_{47}', workcopy)]\nfor f in reversed(footers):\n possible = workcopy[f:f+190] \n lines = possible.splitlines()\n if(len(lines) == 4\n and lines[1][0:15] == 'Full-Disclosure'\n and lines[2][0:8] == 'Charter:'\n and lines[3][0:20] == 'Hosted and sponsored'):\n workcopy = workcopy[:f] + workcopy[f+213:]\n continue\n \n if(len(lines) == 4\n and lines[1][0:16] == 'Sent through the'\n and lines[2][0:17] == 'https://nmap.org/'\n and lines[3][0:14] == 'Web Archives &'):\n workcopy = workcopy[:f] + workcopy[f+211:]\n continue\n \n \n possible = workcopy[f:f+146]\n lines = possible.splitlines()\n if(len(lines) == 3\n and lines[1][0:15] == 'Full-Disclosure'\n and lines[2][0:8] == 'Charter:'):\n workcopy = workcopy[:f] + workcopy[f+146:]\n continue\n \nprint(workcopy)", "PGP messages\nAs can be expected, many messages offer a PGP signature validation. This isn't useful to our processing, so we'll take it out. First, we define get_raw_message with code we've used previously. We then create strip_pgp, looking for the PGP signature. We can just use simple text searches again, with an exception of using RE for the Hash, which can change.\nhttp://seclists.org/fulldisclosure/2017/Oct/11 is a message that includes a PGP signature, so we'll use that to test.", "def get_raw_message(url):\n r = requests.get(url)\n content = r.text\n start = content.index('<!--X-Body-of-Message-->') + 24\n end = content.index('<!--X-Body-of-Message-End-->')\n body = content[start:end]\n\n soup = BeautifulSoup(body, 'html5lib')\n bodyhtml = soup.find('body')\n return bodyhtml.text\n\n#rawmsg = get_raw_message('http://seclists.org/fulldisclosure/2017/Oct/11')\nrawmsg = get_raw_message('http://seclists.org/fulldisclosure/2005/Jan/719')\n\ndef strip_pgp(raw):\n\n try:\n pgp_sig_start = raw.index('-----BEGIN PGP SIGNATURE-----')\n pgp_sig_end = raw.index('-----END PGP SIGNATURE-----') + 27\n \n cleaned = raw[:pgp_sig_start] + raw[pgp_sig_end:]\n \n # if we find a public key block, then strip that out\n try: \n pgp_pk_start = raw.index('-----BEGIN PGP PUBLIC KEY BLOCK-----')\n pgp_pk_end = raw.index('-----END PGP PUBLIC KEY BLOCK-----') + 35\n cleaned = cleaned[:pgp_pk_start] + cleaned[pgp_pk_end:]\n except ValueError as ve:\n pass\n\n # finally, try to remove the signed message header\n pgp_msg = raw.index('-----BEGIN PGP SIGNED MESSAGE-----')\n pgp_hash = re.search('Hash:(.)+\\n', raw)\n \n if pgp_hash is not None:\n first_hash = pgp_hash.span(0)\n if first_hash[0] == pgp_msg + 35:\n #if we found a hash designation immediately after the header, strip that too\n cleaned = cleaned[:pgp_msg] + cleaned[first_hash[1]:]\n else:\n #just strip the header\n cleaned = cleaned[:pgp_msg] + cleaned[pgp_msg + 34:]\n else:\n cleaned = cleaned[:pgp_msg] + cleaned[pgp_msg + 34:]\n \n \n return cleaned\n except ValueError as ve:\n return raw\n\nunpgp = strip_pgp(rawmsg)\nPretty(unpgp)\n#Pretty(strip_pgp(raw))\n\n", "Talon processing\nNext, we'll attempt to use talon to strip out the signature from the message. Talon provides two different ways to find the signature, \"brute force\" and \"machine learning\". \nWe'll try the brute force method first.", "import talon\nfrom talon.signature.bruteforce import extract_signature\n\nreply, signature = extract_signature(raw)\nif(not signature is None):\n Pretty(signature)\n\nPretty(reply)", "At least for 2017_Jan_0, it is pretty effective. 2017_Jan_45 was not successful at all. Now, we'll try the machine learning style, to compare.", "talon.init()\nfrom talon import signature\nreply_ml, sig_ml = signature.extract(raw, sender=\"[email protected]\")\nprint(sig_ml)\n#reply_ml", "This doesn't seem to output anything. I'm unclear whether or not this library is already trained; documentation states that it was trained on the authors' personal email and an ENRON set. There is an open issue on github https://github.com/mailgun/talon/issues/143 from July asking about the same thing. We will stick with the \"brute force\" method for now, and continue to look for more libraries.\nExtract HTML tags\nWe'll use a fairly simple regex to extract any tags from the reply. \n&lt;([^\\s&gt;]+)(\\s|/&gt;)+\n * [^\\s&gt;]+ one or more non-whitespace characters, followed by:\n * \\s|/ either a whitespace character, or a slash (/) for self-closing tags.\nWe then use a dictionary to count the instances of each unique tag.", "rx = re.compile('<([^\\s>]+)(\\s|/>)+')\ntags = {}\nfor tag in rx.findall(str(bodyhtml)):\n tagtype = tag[0]\n if not tagtype.startswith('/'):\n if tagtype in tags:\n tags[tagtype] = tags[tagtype] + 1\n else:\n tags[tagtype] = 1\nprint(tags)", "Extract link domains\nWe'll record what domains are linked to in each message. We use BeautifulSoup to pull out all &lt;a&gt; tags, then urlparse to determine the domain within.", "from urllib.parse import urlparse\n\nsites = {}\n\natags = bodyhtml.find_all('a')\nhrefs = [link.get('href') for link in atags]\n\nfor link in hrefs:\n parsedurl = urlparse(link)\n site = parsedurl.netloc\n if site in sites:\n sites[site] = sites[site] + 1\n else:\n sites[site] = 1\n\nsites" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ocefpaf/secoora
notebooks/timeSeries/ssv/00-velocity_secoora.ipynb
mit
[ "SECOORA sea surface temperature time-series notebook\nProduce weekly maps and tables for the SECOORA. Based on IOOS system-test notebook.", "import time\nstart_time = time.time()\n\nimport os\n\n%load_ext watermark\n%watermark --githash --machine --python --packages iris,pyoos,owslib\n\nstyle = os.path.join(os.pardir, os.pardir, 'style.css')\n\nimport pytz\nfrom datetime import datetime, timedelta\n\nfrom utilities import CF_names\n\n\n# Choose the date range (e.g: stop = datetime(2014, 7, 7, 12)).\nstop = datetime(2015, 2, 6, 12)\n\nstop = stop.replace(tzinfo=pytz.utc)\nstart = stop - timedelta(days=7)\n\n# SECOORA region (NC, SC GA, FL).\nbbox = [-87.40, 24.25, -74.70, 36.70]\n\n# CF-names to look for:\ncurrents = CF_names['currents']\nname_list = currents['u'] + currents['v'] + currents['speed_direction']\n\nrun_name = '{:%Y-%m-%d}'.format(stop)\n\nif not os.path.exists(run_name):\n os.makedirs(run_name)\n\nimport iris\nimport pyoos\nimport owslib\n\nimport logging as log\nreload(log)\n\nfmt = '{:*^64}'.format\nlog.captureWarnings(True)\nLOG_FILENAME = 'log.txt'\nLOG_FILENAME = os.path.join(run_name, LOG_FILENAME)\nlog.basicConfig(filename=LOG_FILENAME,\n filemode='w',\n format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%I:%M:%S',\n level=log.INFO,\n stream=None)\n\nlog.info(fmt(' Run information '))\nlog.info('Run date: {:%Y-%m-%d %H:%M:%S}'.format(datetime.utcnow()))\nlog.info('Download start: {:%Y-%m-%d %H:%M:%S}'.format(start))\nlog.info('Download stop: {:%Y-%m-%d %H:%M:%S}'.format(stop))\nlog.info('Bounding box: {0:3.2f}, {1:3.2f},'\n '{2:3.2f}, {3:3.2f}'.format(*bbox))\nlog.info(fmt(' Software version '))\nlog.info('Iris version: {}'.format(iris.__version__))\nlog.info('owslib version: {}'.format(owslib.__version__))\nlog.info('pyoos version: {}'.format(pyoos.__version__))\n\nfrom owslib import fes\nfrom utilities import fes_date_filter\n\nkw = dict(wildCard='*',\n escapeChar='\\\\',\n singleChar='?',\n propertyname='apiso:AnyText')\n\nor_filt = fes.Or([fes.PropertyIsLike(literal=('*%s*' % val), **kw)\n for val in name_list])\n\n# Exclude ROMS Averages and History files.\nnot_filt = fes.Not([fes.PropertyIsLike(literal='*Averages*', **kw)])\n\nbegin, end = fes_date_filter(start, stop)\nfilter_list = [fes.And([fes.BBox(bbox), begin, end, or_filt, not_filt])]\n\nfrom owslib.csw import CatalogueServiceWeb\n\nendpoint = 'http://www.ngdc.noaa.gov/geoportal/csw'\ncsw = CatalogueServiceWeb(endpoint, timeout=60)\ncsw.getrecords2(constraints=filter_list, maxrecords=1000, esn='full')\n\nlog.info(fmt(' Catalog information '))\nlog.info(\"URL: {}\".format(endpoint))\nlog.info(\"CSW version: {}\".format(csw.version))\nlog.info(\"Number of datasets available: {}\".format(len(csw.records.keys())))\n\nfrom utilities import service_urls\n\ndap_urls = service_urls(csw.records, service='odp:url')\nsos_urls = service_urls(csw.records, service='sos:url')\n\nlog.info(fmt(' CSW '))\nfor rec, item in csw.records.items():\n log.info('{}'.format(item.title))\n\nlog.info(fmt(' DAP '))\nfor url in dap_urls:\n log.info('{}.html'.format(url))\n\nlog.info(fmt(' SOS '))\nfor url in sos_urls:\n log.info('{}'.format(url))", "Add SECOORA models and observations.", "from utilities import titles, fix_url\n\nsecoora_models = ['SABGOM', 'USEAST', 'USF_ROMS',\n 'USF_SWAN', 'USF_FVCOM']\n\nfor secoora_model in secoora_models:\n if titles[secoora_model] not in dap_urls:\n log.warning('{} not in the NGDC csw'.format(secoora_model))\n dap_urls.append(titles[secoora_model])\n\n# NOTE: USEAST is not archived at the moment!\ndap_urls = [fix_url(start, url) if 'SABGOM' in url else url for url in dap_urls]", "FIXME: deal with ($u$, $v$) and speed, direction.", "from iris.exceptions import CoordinateNotFoundError, ConstraintMismatchError\n\nfrom utilities import TimeoutException, secoora_buoys, get_cubes\n\nurls = list(secoora_buoys())\n\nbuoys = dict()\nfor url in urls:\n try:\n cubes = get_cubes(url, name_list=name_list,\n bbox=bbox, time=(start, stop))\n buoy = url.split('/')[-1].split('.nc')[0]\n buoys.update({buoy: cubes[0]})\n except (RuntimeError, ValueError, TimeoutException,\n ConstraintMismatchError, CoordinateNotFoundError) as e:\n log.warning('Cannot get cube for: {}\\n{}'.format(url, e))\n\nname_list\n\nbuoys\n\nunits=iris.unit.Unit('m s-1')" ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
dsiufl/2015-Fall-Hadoop
instructor-notes/3-pyspark-wordcount.ipynb
mit
[ "Spark version of wordcount examples\nPrepare the pyspark environment.", "import findspark\nimport os\nfindspark.init('/home/ubuntu/shortcourse/spark-1.5.1-bin-hadoop2.6')\n\nfrom pyspark import SparkContext, SparkConf\nconf = SparkConf().setAppName(\"test\").setMaster(\"local[2]\")\nsc = SparkContext(conf=conf)", "Make sure your HDFS is still on and the input files (the three books) are still in the input folder.\nCreate the input RDD from the files on the HDFS (hdfs://localhost:54310/user/ubuntu/input).", "lines = sc.textFile('hdfs://localhost:54310/user/ubuntu/input')\nlines.count()", "Simple Word Count\nPerform the counting, by flatMap, map, and reduceByKey.", "from operator import add\ncounts = lines.flatMap(lambda x: x.split()).map(lambda x: (x, 1)).reduceByKey(add)", "Take the top 10 frequently used words", "counts.takeOrdered(10, lambda x: -x[1])", "Pattern Matching WordCount\nRead the pattern file into a set. (file: /home/ubuntu/shortcourse/notes/scripts/wordcount2/wc2-pattern.txt)", "pattern = set()\nf = open('/home/ubuntu/shortcourse/notes/scripts/wordcount2/wc2-pattern.txt')\nfor line in f:\n words = line.split()\n for word in words:\n pattern.add(word)", "Perform the counting, by flatMap, filter, map, and reduceByKey.", "result = lines.flatMap(lambda x: x.split()).filter(lambda x: x in pattern).map(lambda x: (x, 1)).reduceByKey(add)", "Collect and show the results.", "result.collect()\n\n# stop the spark context\nsc.stop()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
agile-geoscience/welly
docs/_userguide/Projects.ipynb
apache-2.0
[ "Projects\nWells are one of the fundamental objects in welly.\nWell objects include collections of Curve objects. Multiple Well objects can be stored in a Project.\nOn this page, we take a closer look at the Project class. It lets us handle groups of wells. It is really just a list of Well objects, with a few extra powers.\nFirst, some preliminaries…", "import welly\n\nwelly.__version__", "Make a project\nWe have a few LAS files in a folder; we can load them all at once with standard POSIX file globbing syntax:", "p = welly.read_las(\"../../tests/assets/example_*.las\")", "Now we have a project, containing two files:", "p", "You can pass in a list of files or URLs:", "p = welly.read_las(['../../tests/assets/P-129_out.LAS',\n 'https://geocomp.s3.amazonaws.com/data/P-130.LAS',\n 'https://geocomp.s3.amazonaws.com/data/R-39.las',\n ])", "This project has three wells:", "p", "Typical, the UWIs are a disaster. Let's ignore this for now.\nThe Project is really just a list-like thing, so you can index into it to get at a single well. Each well is represented by a welly.Well object.", "p[0]", "Some of the fields of this LAS file are messed up; see the Well notebook for more on how to fix this. \nPlot curves from several wells\nThe DT log is called DT4P in one of the wells. We can deal with this sort of issue with aliases. Let's set up an alias dictionary, then plot the DT log from each well:", "alias = {'Sonic': ['DT', 'DT4P'],\n 'Caliper': ['HCAL', 'CALI'],\n }\n\nimport matplotlib.pyplot as plt\n\nfig, axs = plt.subplots(figsize=(7, 14),\n ncols=len(p),\n sharey=True,\n )\n\nfor i, (ax, w) in enumerate(zip(axs, p)):\n log = w.get_curve('Sonic', alias=alias)\n if log is not None:\n ax = log.plot(ax=ax)\n ax.set_title(\"Sonic log for\\n{}\".format(w.uwi))\n\nmin_z, max_z = p.basis_range\n \nplt.ylim(max_z, min_z)\nplt.show()", "Get a pandas.DataFrame\nThe df() method makes a DataFrame using a dual index of UWI and Depth.\nBefore we export our wells, let's give Kennetcook #2 a better UWI:", "p[0].uwi = p[0].name\np[0]", "That's better.\nWhen creating the DataFrame, you can pass a list of the keys (mnemonics) you want, and use aliases as usual.", "alias\n\nkeys = ['Caliper', 'GR', 'Sonic']\n\ndf = p.df(keys=keys, alias=alias, rename_aliased=True)\ndf", "Quality\nWelly can run quality tests on the curves in your project. Some of the tests take arguments. You can test for things like this:\n\nall_positive: Passes if all the values are greater than zero.\nall_above(50): Passes if all the values are greater than 50.\nmean_below(100): Passes if the mean of the log is less than 100.\nno_nans: Passes if there are no NaNs in the log.\nno_flat: Passes if there are no sections of well log with the same values (e.g. because a gap was interpolated across with a constant value).\nno_monotonic: Passes if there are no monotonic ramps in the log (e.g. because a gap was linearly interpolated across).\n\nInsert lists of tests into a dictionary with any of the following key examples:\n\n'GR': The test(s) will run against the GR log.\n'Gamma': The test(s) will run against the log matching according to the alias dictionary.\n'Each': The test(s) will run against every log in a well.\n'All': Some tests take multiple logs as input, for example quality.no_similarities. These test(s) will run against all the logs as a group. Could be quite slow, because there may be a lot of pairwise comparisons to do.\n\nThe tests are run against all wells in the project. If you only want to run against a subset of the wells, make a new project for them.", "import welly.quality as q\n\ntests = {\n 'All': [q.no_similarities],\n 'Each': [q.no_gaps, q.no_monotonic, q.no_flat],\n 'GR': [q.all_positive],\n 'Sonic': [q.all_positive, q.all_between(50, 200)],\n}", "Let's add our own test for units:", "def has_si_units(curve):\n return curve.units.lower() in ['mm', 'gapi', 'us/m', 'k/m3']\n\ntests['Each'].append(has_si_units)", "We'll use the same alias dictionary as before:", "alias", "Now we can run the tests and look at the results, which are in an HTML table:", "from IPython.display import HTML\n\nHTML(p.curve_table_html(keys=['Caliper', 'GR', 'Sonic', 'SP', 'RHOB'],\n tests=tests, alias=alias)\n )", "Here's how to interpret the result:\n\nGreen background: the log is present. You can see the mean value and the units (check them!!).\nGrey background: the log is not present.\n\nAnd the traffic light dots (hover to see how many tests passed): \n\nGreen dot: all the tests passed.\nOrange dot: some tests failed.\nRed dot: all tests failed.\nGrey dot: no tests ran.\n\nThe Passing percentage shows how many tests passed for that well.\n\n&copy; 2022 Agile Scientific, CC BY" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
mne-tools/mne-tools.github.io
dev/_downloads/00e78bba5d10188fcf003ef05e32a6f7/decoding_time_generalization_conditions.ipynb
bsd-3-clause
[ "%matplotlib inline", "Decoding sensor space data with generalization across time and conditions\nThis example runs the analysis described in :footcite:KingDehaene2014. It\nillustrates how one can\nfit a linear classifier to identify a discriminatory topography at a given time\ninstant and subsequently assess whether this linear model can accurately\npredict all of the time samples of a second set of conditions.", "# Authors: Jean-Remi King <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Denis Engemann <[email protected]>\n#\n# License: BSD-3-Clause\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\n\nimport mne\nfrom mne.datasets import sample\nfrom mne.decoding import GeneralizingEstimator\n\nprint(__doc__)\n\n# Preprocess data\ndata_path = sample.data_path()\n# Load and filter data, set up epochs\nmeg_path = data_path / 'MEG' / 'sample'\nraw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif'\nevents_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif'\nraw = mne.io.read_raw_fif(raw_fname, preload=True)\npicks = mne.pick_types(raw.info, meg=True, exclude='bads') # Pick MEG channels\nraw.filter(1., 30., fir_design='firwin') # Band pass filtering signals\nevents = mne.read_events(events_fname)\nevent_id = {'Auditory/Left': 1, 'Auditory/Right': 2,\n 'Visual/Left': 3, 'Visual/Right': 4}\ntmin = -0.050\ntmax = 0.400\n# decimate to make the example faster to run, but then use verbose='error' in\n# the Epochs constructor to suppress warning about decimation causing aliasing\ndecim = 2\nepochs = mne.Epochs(raw, events, event_id=event_id, tmin=tmin, tmax=tmax,\n proj=True, picks=picks, baseline=None, preload=True,\n reject=dict(mag=5e-12), decim=decim, verbose='error')", "We will train the classifier on all left visual vs auditory trials\nand test on all right visual vs auditory trials.", "clf = make_pipeline(\n StandardScaler(),\n LogisticRegression(solver='liblinear') # liblinear is faster than lbfgs\n)\ntime_gen = GeneralizingEstimator(clf, scoring='roc_auc', n_jobs=None,\n verbose=True)\n\n# Fit classifiers on the epochs where the stimulus was presented to the left.\n# Note that the experimental condition y indicates auditory or visual\ntime_gen.fit(X=epochs['Left'].get_data(),\n y=epochs['Left'].events[:, 2] > 2)", "Score on the epochs where the stimulus was presented to the right.", "scores = time_gen.score(X=epochs['Right'].get_data(),\n y=epochs['Right'].events[:, 2] > 2)", "Plot", "fig, ax = plt.subplots(1)\nim = ax.matshow(scores, vmin=0, vmax=1., cmap='RdBu_r', origin='lower',\n extent=epochs.times[[0, -1, 0, -1]])\nax.axhline(0., color='k')\nax.axvline(0., color='k')\nax.xaxis.set_ticks_position('bottom')\nax.set_xlabel('Testing Time (s)')\nax.set_ylabel('Training Time (s)')\nax.set_title('Generalization across time and condition')\nplt.colorbar(im, ax=ax)\nplt.show()", "References\n.. footbibliography::" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
h-mayorquin/time_series_basic
presentations/2016-01-21(Wall-Street-Letter-Latency-Prediction).ipynb
bsd-3-clause
[ "Prediction of text with Nexa Letter Latency.\nThis notbook is for seeing how much the delay between the code vector and the code is related to the accuaracy of the prediciton.", "import numpy as np\nimport h5py\nfrom sklearn import svm, cross_validation\nfrom sklearn.naive_bayes import MultinomialNB", "Load the data", "# First we load the file \nfile_location = '../results_database/text_wall_street_big.hdf5'\nf = h5py.File(file_location, 'r')\n\n# Now we need to get the letters and align them\ntext_directory = '../data/wall_street_letters.npy'\nletters_sequence = np.load(text_directory)\nNletters = len(letters_sequence)\nsymbols = set(letters_sequence)\n\n# Load the particular example\nNspatial_clusters = 5\nNtime_clusters = 15\nNembedding = 3\n\nrun_name = '/low-resolution'\nparameters_string = '/' + str(Nspatial_clusters)\nparameters_string += '-' + str(Ntime_clusters)\nparameters_string += '-' + str(Nembedding)\n\nnexa = f[run_name + parameters_string]\n\n# Now we load the time and the code vectors\ntime = nexa['time']\ncode_vectors = nexa['code-vectors']\ncode_vectors_distance = nexa['code-vectors-distance']\ncode_vectors_softmax = nexa['code-vectors-softmax']\ncode_vectors_winner = nexa['code-vectors-winner']", "Study the Latency of the Data by Accuracy\nMake prediction with winner takes all\nMake the prediction for each delay. This takes a bit", "N = 50000 # Amount of data\ndelays = np.arange(0, 10)\naccuracy = []\n\n# Make prediction with scikit-learn\nfor delay in delays:\n X = code_vectors_winner[:(N - delay)]\n y = letters_sequence[delay:N]\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)\n\n clf = svm.SVC(C=1.0, cache_size=200, kernel='linear')\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test) * 100.0\n accuracy.append(score)\n print('delay', delay)\n print('score', score)\n", "Plot it", "import matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline\nplt.plot(delays, accuracy, 'o-', lw=2, markersize=10)\nplt.xlabel('Delays')\nplt.ylim([0, 105])\nplt.xlim([-0.5, 10])\nplt.ylabel('Accuracy %')\nplt.title('Delays vs Accuracy')\nfig = plt.gcf()\nfig.set_size_inches((12, 9))", "Make predictions with representation standarization", "from sklearn import preprocessing\n\nN = 50000 # Amount of data\ndelays = np.arange(0, 10)\naccuracy_std = []\n\n# Make prediction with scikit-learn\nfor delay in delays:\n X = code_vectors_winner[:(N - delay)]\n y = letters_sequence[delay:N]\n X = preprocessing.scale(X)\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)\n\n clf = svm.SVC(C=1.0, cache_size=200, kernel='linear')\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test) * 100.0\n accuracy_std.append(score)\n print('delay', delay)\n print('score', score)", "Plot it", "plt.plot(delays, accuracy, 'o-', lw=2, markersize=10., label='Accuracy')\nplt.plot(delays, accuracy_std, 'o-', lw=2, markersize=10, label='Standarized Representations')\nplt.xlabel('Delays')\nplt.ylim([0, 105])\nplt.xlim([-0.5, 10])\nplt.ylabel('Accuracy %')\nplt.title('Delays vs Accuracy')\nfig = plt.gcf()\nfig.set_size_inches((12, 9))\nplt.legend()", "Make prediction with softmax", "accuracy_softmax = []\n\n# Make prediction with scikit-learn\nfor delay in delays:\n X = code_vectors_softmax[:(N - delay)]\n y = letters_sequence[delay:N]\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)\n\n clf = svm.SVC(C=1.0, cache_size=200, kernel='linear')\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test) * 100.0\n accuracy_softmax.append(score)\n print('delay', delay)\n print('score', score)\n", "Standarized predictions with softmax", "accuracy_softmax_std = []\n\n# Make prediction with scikit-learn\nfor delay in delays:\n X = code_vectors_winner[:(N - delay)]\n y = letters_sequence[delay:N]\n X = preprocessing.scale(X)\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10)\n\n clf = svm.SVC(C=1.0, cache_size=200, kernel='linear')\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test) * 100.0\n accuracy_softmax_std.append(score)\n print('delay', delay)\n print('score', score)\n\nplt.plot(delays, accuracy_softmax, 'o-', lw=2, markersize=10., label='Accuracy')\nplt.plot(delays, accuracy_softmax_std, 'o-', lw=2, markersize=10, label='Standarized Representations')\nplt.xlabel('Delays')\nplt.ylim([0, 105])\nplt.xlim([-0.5, 10])\nplt.ylabel('Accuracy %')\nplt.title('Delays vs Accuracy (Softmax)')\nfig = plt.gcf()\nfig.set_size_inches((12, 9))\nplt.legend()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
GoogleCloudPlatform/training-data-analyst
courses/machine_learning/feateng/feateng.ipynb
apache-2.0
[ "<h1> Feature Engineering </h1>\n\nIn this notebook, you will learn how to incorporate feature engineering into your pipeline.\n<ul>\n<li> Working with feature columns </li>\n<li> Adding feature crosses in TensorFlow </li>\n<li> Reading data from BigQuery </li>\n<li> Creating datasets using Dataflow </li>\n<li> Using a wide-and-deep model </li>\n</ul>", "!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst\n\n!pip install --user apache-beam[gcp]==2.24.0 \n!pip install --user httplib2==0.12.0 ", "NOTE: In the output of the above cell you may ignore any WARNINGS or ERRORS related to the following: \"apache-beam\", \"pyarrow\", \"tensorflow-transform\", \"tensorflow-model-analysis\", \"tensorflow-data-validation\", \"joblib\", \"google-cloud-storage\" etc.\nIf you get any related errors mentioned above please rerun the above cell.\nNote: Restart your kernel to use updated packages.", "import tensorflow as tf\nimport apache_beam as beam\nimport shutil\nprint(tf.__version__)", "<h2> 1. Environment variables for project and bucket </h2>\n\n\nYour project id is the unique string that identifies your project (not the project name). You can find this from the GCP Console dashboard's Home page. My dashboard reads: <b>Project ID:</b> cloud-training-demos \nCloud training often involves saving and restoring model files. Therefore, we should <b>create a single-region bucket</b>. If you don't have a bucket already, I suggest that you create one from the GCP console (because it will dynamically check whether the bucket name you want is available) \n<b>Change the cell below</b> to reflect your Project ID and bucket name.", "import os\nPROJECT = 'cloud-training-demos' # CHANGE THIS\nBUCKET = 'cloud-training-demos' # REPLACE WITH YOUR BUCKET NAME. Use a regional bucket in the region you selected.\nREGION = 'us-central1' # Choose an available region for Cloud AI Platform\n\n# for bash\nos.environ['PROJECT'] = PROJECT\nos.environ['BUCKET'] = BUCKET\nos.environ['REGION'] = REGION\nos.environ['TFVERSION'] = '2.6' \n\n## ensure we're using python3 env\nos.environ['CLOUDSDK_PYTHON'] = 'python3.7'\n\n%%bash\ngcloud config set project $PROJECT\ngcloud config set compute/region $REGION\n\n## ensure we predict locally with our current Python environment\ngcloud config set ml_engine/local_python `which python`", "<h2> 2. Specifying query to pull the data </h2>\n\nLet's pull out a few extra columns from the timestamp.", "def create_query(phase, EVERY_N):\n if EVERY_N == None:\n EVERY_N = 4 #use full dataset\n \n #select and pre-process fields\n base_query = \"\"\"\n#legacySQL\nSELECT\n (tolls_amount + fare_amount) AS fare_amount,\n DAYOFWEEK(pickup_datetime) AS dayofweek,\n HOUR(pickup_datetime) AS hourofday,\n pickup_longitude AS pickuplon,\n pickup_latitude AS pickuplat,\n dropoff_longitude AS dropofflon,\n dropoff_latitude AS dropofflat,\n passenger_count*1.0 AS passengers,\n CONCAT(STRING(pickup_datetime), STRING(pickup_longitude), STRING(pickup_latitude), STRING(dropoff_latitude), STRING(dropoff_longitude)) AS key\nFROM\n [nyc-tlc:yellow.trips]\nWHERE\n trip_distance > 0\n AND fare_amount >= 2.5\n AND pickup_longitude > -78\n AND pickup_longitude < -70\n AND dropoff_longitude > -78\n AND dropoff_longitude < -70\n AND pickup_latitude > 37\n AND pickup_latitude < 45\n AND dropoff_latitude > 37\n AND dropoff_latitude < 45\n AND passenger_count > 0\n \"\"\"\n \n #add subsampling criteria by modding with hashkey\n if phase == 'train': \n query = \"{} AND ABS(HASH(pickup_datetime)) % {} < 2\".format(base_query,EVERY_N)\n elif phase == 'valid': \n query = \"{} AND ABS(HASH(pickup_datetime)) % {} == 2\".format(base_query,EVERY_N)\n elif phase == 'test':\n query = \"{} AND ABS(HASH(pickup_datetime)) % {} == 3\".format(base_query,EVERY_N)\n return query\n \nprint(create_query('valid', 100)) #example query using 1% of data", "Try the query above in https://bigquery.cloud.google.com/table/nyc-tlc:yellow.trips if you want to see what it does (ADD LIMIT 10 to the query!)\n<h2> 3. Preprocessing Dataflow job from BigQuery </h2>\n\nThis code reads from BigQuery and saves the data as-is on Google Cloud Storage. We can do additional preprocessing and cleanup inside Dataflow, but then we'll have to remember to repeat that prepreprocessing during inference. It is better to use tf.transform which will do this book-keeping for you, or to do preprocessing within your TensorFlow model. We will look at this in future notebooks. For now, we are simply moving data from BigQuery to CSV using Dataflow.\nWhile we could read from BQ directly from TensorFlow (See: https://www.tensorflow.org/api_docs/python/tf/contrib/cloud/BigQueryReader), it is quite convenient to export to CSV and do the training off CSV. Let's use Dataflow to do this at scale.\nBecause we are running this on the Cloud, you should go to the GCP Console (https://console.cloud.google.com/dataflow) to look at the status of the job. It will take several minutes for the preprocessing job to launch.", "%%bash\nif gsutil ls | grep -q gs://${BUCKET}/taxifare/ch4/taxi_preproc/; then\n gsutil -m rm -rf gs://$BUCKET/taxifare/ch4/taxi_preproc/\nfi", "First, let's define a function for preprocessing the data", "import datetime\n\n####\n# Arguments:\n# -rowdict: Dictionary. The beam bigquery reader returns a PCollection in\n# which each row is represented as a python dictionary\n# Returns:\n# -rowstring: a comma separated string representation of the record with dayofweek\n# converted from int to string (e.g. 3 --> Tue)\n####\ndef to_csv(rowdict):\n days = ['null', 'Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']\n CSV_COLUMNS = 'fare_amount,dayofweek,hourofday,pickuplon,pickuplat,dropofflon,dropofflat,passengers,key'.split(',')\n rowdict['dayofweek'] = days[rowdict['dayofweek']]\n rowstring = ','.join([str(rowdict[k]) for k in CSV_COLUMNS])\n return rowstring\n\n\n####\n# Arguments:\n# -EVERY_N: Integer. Sample one out of every N rows from the full dataset.\n# Larger values will yield smaller sample\n# -RUNNER: 'DirectRunner' or 'DataflowRunner'. Specify to run the pipeline\n# locally or on Google Cloud respectively. \n# Side-effects:\n# -Creates and executes dataflow pipeline. \n# See https://beam.apache.org/documentation/programming-guide/#creating-a-pipeline\n####\ndef preprocess(EVERY_N, RUNNER):\n job_name = 'preprocess-taxifeatures' + '-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S')\n print('Launching Dataflow job {} ... hang on'.format(job_name))\n OUTPUT_DIR = 'gs://{0}/taxifare/ch4/taxi_preproc/'.format(BUCKET)\n\n #dictionary of pipeline options\n options = {\n 'staging_location': os.path.join(OUTPUT_DIR, 'tmp', 'staging'),\n 'temp_location': os.path.join(OUTPUT_DIR, 'tmp'),\n 'job_name': 'preprocess-taxifeatures' + '-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S'),\n 'project': PROJECT,\n 'runner': RUNNER,\n 'num_workers' : 4,\n 'max_num_workers' : 5\n }\n #instantiate PipelineOptions object using options dictionary\n opts = beam.pipeline.PipelineOptions(flags=[], **options)\n #instantantiate Pipeline object using PipelineOptions\n with beam.Pipeline(options=opts) as p:\n for phase in ['train', 'valid']:\n query = create_query(phase, EVERY_N) \n outfile = os.path.join(OUTPUT_DIR, '{}.csv'.format(phase))\n (\n p | 'read_{}'.format(phase) >> beam.io.Read(beam.io.BigQuerySource(query=query))\n | 'tocsv_{}'.format(phase) >> beam.Map(to_csv)\n | 'write_{}'.format(phase) >> beam.io.Write(beam.io.WriteToText(outfile))\n )\n print(\"Done\")", "Now, let's run pipeline locally. This takes upto <b>5 minutes</b>. You will see a message \"Done\" when it is done.", "preprocess(50*10000, 'DirectRunner') \n\n%%bash\ngsutil ls gs://$BUCKET/taxifare/ch4/taxi_preproc/", "4. Run Beam pipeline on Cloud Dataflow\nRun pipeline on cloud on a larger sample size.", "%%bash\nif gsutil ls | grep -q gs://${BUCKET}/taxifare/ch4/taxi_preproc/; then\n gsutil -m rm -rf gs://$BUCKET/taxifare/ch4/taxi_preproc/\nfi", "The following step will take <b>10-15 minutes.</b> Monitor job progress on the Cloud Console in the Dataflow section.\nNote: If the error occurred regarding enabling of Dataflow API then disable and re-enable the Dataflow API and re-run the below cell.", "preprocess(50*100, 'DataflowRunner') \n", "Once the job completes, observe the files created in Google Cloud Storage", "%%bash\ngsutil ls -l gs://$BUCKET/taxifare/ch4/taxi_preproc/\n\n%%bash\n#print first 10 lines of first shard of train.csv\ngsutil cat \"gs://$BUCKET/taxifare/ch4/taxi_preproc/train.csv-00000-of-*\" | head", "5. Develop model with new inputs\nDownload the first shard of the preprocessed data to enable local development.", "%%bash\nif [ -d sample ]; then\n rm -rf sample\nfi\nmkdir sample\ngsutil cat \"gs://$BUCKET/taxifare/ch4/taxi_preproc/train.csv-00000-of-*\" > sample/train.csv\ngsutil cat \"gs://$BUCKET/taxifare/ch4/taxi_preproc/valid.csv-00000-of-*\" > sample/valid.csv", "We have two new inputs in the INPUT_COLUMNS, three engineered features, and the estimator involves bucketization and feature crosses.", "%%bash\ngrep -A 20 \"INPUT_COLUMNS =\" taxifare/trainer/model.py\n\n%%bash\ngrep -A 50 \"build_estimator\" taxifare/trainer/model.py\n\n%%bash\ngrep -A 15 \"add_engineered(\" taxifare/trainer/model.py", "Try out the new model on the local sample (this takes <b>5 minutes</b>) to make sure it works fine.", "%%bash\nrm -rf taxifare.tar.gz taxi_trained\nexport PYTHONPATH=${PYTHONPATH}:${PWD}/taxifare\npython -m trainer.task \\\n --train_data_paths=${PWD}/sample/train.csv \\\n --eval_data_paths=${PWD}/sample/valid.csv \\\n --output_dir=${PWD}/taxi_trained \\\n --train_steps=10 \\\n --job-dir=/tmp\n\n%%bash\nls taxi_trained/export/exporter/", "You can use saved_model_cli to look at the exported signature. Note that the model doesn't need any of the engineered features as inputs. It will compute latdiff, londiff, euclidean from the provided inputs, thanks to the add_engineered call in the serving_input_fn.", "%%bash\nmodel_dir=$(ls ${PWD}/taxi_trained/export/exporter | tail -1)\nsaved_model_cli show --dir ${PWD}/taxi_trained/export/exporter/${model_dir} --all\n\n%%writefile /tmp/test.json\n{\"dayofweek\": \"Sun\", \"hourofday\": 17, \"pickuplon\": -73.885262, \"pickuplat\": 40.773008, \"dropofflon\": -73.987232, \"dropofflat\": 40.732403, \"passengers\": 2}\n\n%%bash\nmodel_dir=$(ls ${PWD}/taxi_trained/export/exporter)\ngcloud ai-platform local predict \\\n --model-dir=${PWD}/taxi_trained/export/exporter/${model_dir} \\\n --json-instances=/tmp/test.json", "6. Train on cloud\nThis will take <b> 10-15 minutes </b> even though the prompt immediately returns after the job is submitted. Monitor job progress on the Cloud Console, in the AI Platform section and wait for the training job to complete.", "%%bash\nOUTDIR=gs://${BUCKET}/taxifare/ch4/taxi_trained\nJOBNAME=lab4a_$(date -u +%y%m%d_%H%M%S)\necho $OUTDIR $REGION $JOBNAME\ngsutil -m rm -rf $OUTDIR\ngcloud ai-platform jobs submit training $JOBNAME \\\n --region=$REGION \\\n --module-name=trainer.task \\\n --package-path=${PWD}/taxifare/trainer \\\n --job-dir=$OUTDIR \\\n --staging-bucket=gs://$BUCKET \\\n --scale-tier=BASIC \\\n --runtime-version 2.3 \\\n --python-version 3.5 \\\n -- \\\n --train_data_paths=\"gs://$BUCKET/taxifare/ch4/taxi_preproc/train*\" \\\n --eval_data_paths=\"gs://${BUCKET}/taxifare/ch4/taxi_preproc/valid*\" \\\n --train_steps=5000 \\\n --output_dir=$OUTDIR", "The RMSE is now 8.33249, an improvement over the 9.3 that we were getting ... of course, we won't know until we train/validate on a larger dataset. Still, this is promising. But before we do that, let's do hyper-parameter tuning.\n<b>Use the Cloud Console link to monitor the job and wait till the job is done.</b>\nCopyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
crdietrich/sparklines
Pandas Sparklines Demo.ipynb
mit
[ "Sparklines in Pandas\nSparklines are small unlabeled plots, used to visually convey an idea in a small space. This script creates sparklines in a Pandas DataFrame which can then be displayed inline in a Jupyter Notebook or output to an HTML file. It does not annotate the figure, other columns of the DataFrame can be used to convey details about the sparklines.\nBackground:\nhttps://en.wikipedia.org/wiki/Sparkline\nForked and extended from:\nhttps://github.com/iiSeymour/sparkline-nb", "import numpy as np\nimport pandas as pd\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport sparklines", "Create some plot data\nFunction assumes data to plot is an array-like object in a single cell per row.", "density_func = 78\nmean, var, skew, kurt = stats.chi.stats(density_func, moments='mvsk')\nx_chi = np.linspace(stats.chi.ppf(0.01, density_func),\n stats.chi.ppf(0.99, density_func), 100)\ny_chi = stats.chi.pdf(x_chi, density_func)\n\nx_expon = np.linspace(stats.expon.ppf(0.01), stats.expon.ppf(0.99), 100)\ny_expon = stats.expon.pdf(x_expon)\n\na_gamma = 1.99\nx_gamma = np.linspace(stats.gamma.ppf(0.01, a_gamma),\n stats.gamma.ppf(0.99, a_gamma), 100)\ny_gamma = stats.gamma.pdf(x_gamma, a_gamma)\n\nn = 100\n\nnp.random.seed(0) # keep generated data the same for git commit\n\ndata = [np.random.rand(n), \n np.random.randn(n), \n np.random.beta(2, 1, size=n), \n np.random.binomial(3.4, 0.22, size=n), \n np.random.exponential(size=n),\n np.random.geometric(0.5, size=n), \n np.random.laplace(size=n), \n y_chi, \n y_expon, \n y_gamma]\n\nfunction = ['rand',\n 'randn',\n 'beta',\n 'binomial',\n 'exponential',\n 'geometric',\n 'laplace',\n 'chi',\n 'expon',\n 'gamma']\n\ndf = pd.DataFrame(data)\ndf['function'] = function\n\ndf", "Define range of data to make sparklines\nNote: data must be row wise", "a = df.ix[:, 0:100]", "Output to new DataFrame of Sparklines", "df_out = pd.DataFrame()\ndf_out['sparkline'] = sparklines.create(data=a)\nsparklines.show(df_out[['sparkline']])", "Insert Sparklines into source DataFrame", "df['sparkline'] = sparklines.create(data=a)\nsparklines.show(df[['function', 'sparkline']])", "Detailed Formatting\nReturn only sparklines, format the line, fill and marker.", "df_out = pd.DataFrame()\ndf_out['sparkline'] = sparklines.create(data=a,\n color='#1b470a',\n fill_color='#99a894',\n fill_alpha=0.2,\n point_color='blue',\n point_fill='none',\n point_marker='*',\n point_size=3,\n figsize=(6, 0.25))\nsparklines.show(df_out[['sparkline']])", "Example Data and Sparklines Layout", "df_copy = df[['function', 'sparkline']].copy()\n\ndf_copy['value'] = df.ix[:, 100]\n\ndf_copy['change'] = df.ix[:,98] - df.ix[:,99]\n\ndf_copy['change_%'] = df_copy.change / df.ix[:,99]\n\nsparklines.show(df_copy)", "Export to HTML\nInline Jupyter Notebook", "sparklines.to_html(df_copy, 'pandas_sparklines_demo')", "HTML text for rendering elsewhere", "html = sparklines.to_html(df_copy)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
mit-eicu/eicu-code
notebooks/medication.ipynb
mit
[ "medication\nThe medications table reflects the active medication orders for patients. These are orders but do not necessarily reflect administration to the patient. For example, while existence of data in the infusionDrug table confirms a patient received a continuous infusion, existence of the same data in this table only indicates that the infusion was ordered for the patient. Most orders are fulfilled, but not all. Furthermore, many orders are done pro re nata, or PRN, which means \"when needed\". Administration of these orders is difficult to quantify.\nIn the US, all orders must be reviewed by a pharmacist. The majority of hospitals have an HL7 medication interface system in place which automatically synchronizes the orders with eCareManager (the source of this database) as they are verified by the pharmacist in the source pharmacy system. For hospitals without a medication interface, the eICU staff may enter a selection of medications to facilitate population management and completeness for reporting purposes.", "# Import libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport psycopg2\nimport getpass\nimport pdvega\n\n# for configuring connection \nfrom configobj import ConfigObj\nimport os\n\n%matplotlib inline\n\n# Create a database connection using settings from config file\nconfig='../db/config.ini'\n\n# connection info\nconn_info = dict()\nif os.path.isfile(config):\n config = ConfigObj(config)\n conn_info[\"sqluser\"] = config['username']\n conn_info[\"sqlpass\"] = config['password']\n conn_info[\"sqlhost\"] = config['host']\n conn_info[\"sqlport\"] = config['port']\n conn_info[\"dbname\"] = config['dbname']\n conn_info[\"schema_name\"] = config['schema_name']\nelse:\n conn_info[\"sqluser\"] = 'postgres'\n conn_info[\"sqlpass\"] = ''\n conn_info[\"sqlhost\"] = 'localhost'\n conn_info[\"sqlport\"] = 5432\n conn_info[\"dbname\"] = 'eicu'\n conn_info[\"schema_name\"] = 'public,eicu_crd'\n \n# Connect to the eICU database\nprint('Database: {}'.format(conn_info['dbname']))\nprint('Username: {}'.format(conn_info[\"sqluser\"]))\nif conn_info[\"sqlpass\"] == '':\n # try connecting without password, i.e. peer or OS authentication\n try:\n if (conn_info[\"sqlhost\"] == 'localhost') & (conn_info[\"sqlport\"]=='5432'):\n con = psycopg2.connect(dbname=conn_info[\"dbname\"],\n user=conn_info[\"sqluser\"]) \n else:\n con = psycopg2.connect(dbname=conn_info[\"dbname\"],\n host=conn_info[\"sqlhost\"],\n port=conn_info[\"sqlport\"],\n user=conn_info[\"sqluser\"])\n except:\n conn_info[\"sqlpass\"] = getpass.getpass('Password: ')\n\n con = psycopg2.connect(dbname=conn_info[\"dbname\"],\n host=conn_info[\"sqlhost\"],\n port=conn_info[\"sqlport\"],\n user=conn_info[\"sqluser\"],\n password=conn_info[\"sqlpass\"])\nquery_schema = 'set search_path to ' + conn_info['schema_name'] + ';'", "Examine a single patient", "patientunitstayid = 237395\n\nquery = query_schema + \"\"\"\nselect *\nfrom medication\nwhere patientunitstayid = {}\norder by drugorderoffset\n\"\"\".format(patientunitstayid)\n\ndf = pd.read_sql_query(query, con)\ndf.head()\n\ndf.columns\n\n# Look at a subset of columns\ncols = ['medicationid','patientunitstayid',\n 'drugorderoffset','drugorderoffset', 'drugstopoffset',\n 'drugivadmixture', 'drugordercancelled', 'drugname','drughiclseqno', 'gtc',\n 'dosage','routeadmin','loadingdose', 'prn']\ndf[cols].head().T", "Here we can see that, roughly on ICU admission, the patient had an order for vancomycin, aztreonam, and tobramycin.\nIdentifying patients admitted on a single drug\nLet's look for patients who have an order for vancomycin using exact text matching.", "drug = 'VANCOMYCIN'\nquery = query_schema + \"\"\"\nselect \n distinct patientunitstayid\nfrom medication\nwhere drugname like '%{}%'\n\"\"\".format(drug)\n\ndf_drug = pd.read_sql_query(query, con)\nprint('{} unit stays with {}.'.format(df_drug.shape[0], drug))", "Exact text matching is fairly weak, as there's no systematic reason to prefer upper case or lower case. Let's relax the case matching.", "drug = 'VANCOMYCIN'\nquery = query_schema + \"\"\"\nselect \n distinct patientunitstayid\nfrom medication\nwhere drugname ilike '%{}%'\n\"\"\".format(drug)\n\ndf_drug = pd.read_sql_query(query, con)\nprint('{} unit stays with {}.'.format(df_drug.shape[0], drug))", "HICL codes are used to group together drugs which have the same underlying ingredient (i.e. most frequently this is used to group brand name drugs with the generic name drugs). We can see above the HICL for vancomycin is 10093, so let's try grabbing that.", "hicl = 10093\nquery = query_schema + \"\"\"\nselect \n distinct patientunitstayid\nfrom medication\nwhere drughiclseqno = {}\n\"\"\".format(hicl)\n\ndf_hicl = pd.read_sql_query(query, con)\nprint('{} unit stays with HICL = {}.'.format(df_hicl.shape[0], hicl))", "No luck! I wonder what we missed? Let's go back to the original query, this time retaining HICL and the name of the drug.", "drug = 'VANCOMYCIN'\nquery = query_schema + \"\"\"\nselect \n drugname, drughiclseqno, count(*) as n\nfrom medication\nwhere drugname ilike '%{}%'\ngroup by drugname, drughiclseqno\norder by n desc\n\"\"\".format(drug)\n\ndf_drug = pd.read_sql_query(query, con)\ndf_drug.head()", "It appears there are more than one HICL - we can group by HICL in this query to get an idea.", "df_drug['drughiclseqno'].value_counts()", "Unfortunately, we can't be sure that these HICLs always identify only vancomycin. For example, let's look at drugnames for HICL = 1403.", "hicl = 1403\nquery = query_schema + \"\"\"\nselect \n drugname, count(*) as n\nfrom medication\nwhere drughiclseqno = {}\ngroup by drugname\norder by n desc\n\"\"\".format(hicl)\n\ndf_hicl = pd.read_sql_query(query, con)\ndf_hicl.head()", "This HICL seems more focused on the use of creams than on vancomycin. Let's instead inspect the top 3.", "for hicl in [4042, 10093, 37442]:\n query = query_schema + \"\"\"\n select \n drugname, count(*) as n\n from medication\n where drughiclseqno = {}\n group by drugname\n order by n desc\n \"\"\".format(hicl)\n\n df_hicl = pd.read_sql_query(query, con)\n print('HICL {}'.format(hicl))\n print('Number of rows: {}'.format(df_hicl['n'].sum()))\n print('Top 5 rows by frequency:')\n print(df_hicl.head())\n print()", "This is fairly convincing that these only refer to vancomycin. An alternative approach is to acquire the code book for HICL codes and look up vancomycin there.\nHospitals with data available", "query = query_schema + \"\"\"\nwith t as\n(\nselect distinct patientunitstayid\nfrom medication\n)\nselect \n pt.hospitalid\n , count(distinct pt.patientunitstayid) as number_of_patients\n , count(distinct t.patientunitstayid) as number_of_patients_with_tbl\nfrom patient pt\nleft join t\n on pt.patientunitstayid = t.patientunitstayid\ngroup by pt.hospitalid\n\"\"\".format(patientunitstayid)\n\ndf = pd.read_sql_query(query, con)\ndf['data completion'] = df['number_of_patients_with_tbl'] / df['number_of_patients'] * 100.0\ndf.sort_values('number_of_patients_with_tbl', ascending=False, inplace=True)\ndf.head(n=10)\n\ndf[['data completion']].vgplot.hist(bins=10,\n var_name='Number of hospitals',\n value_name='Percent of patients with data')", "Here we can see there are a few hospitals with no interface, and thus 0 patients, though the majority have >90% data completion." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
opengeostat/pygslib
pygslib/Ipython_templates/backtr_raw.ipynb
mit
[ "Testing the back normalscore transformation", "#general imports\nimport matplotlib.pyplot as plt \nimport pygslib \nfrom matplotlib.patches import Ellipse\nimport numpy as np\nimport pandas as pd\n\n#make the plots inline\n%matplotlib inline ", "Getting the data ready for work\nIf the data is in GSLIB format you can use the function gslib.read_gslib_file(filename) to import the data into a Pandas DataFrame.", "#get the data in gslib format into a pandas Dataframe\nmydata= pygslib.gslib.read_gslib_file('../data/cluster.dat') \n\n#view data in a 2D projection\nplt.scatter(mydata['Xlocation'],mydata['Ylocation'], c=mydata['Primary'])\nplt.colorbar()\nplt.grid(True)\nplt.show()", "The nscore transformation table function", "print (pygslib.gslib.__dist_transf.backtr.__doc__)", "Get the transformation table", "transin,transout, error = pygslib.gslib.__dist_transf.ns_ttable(mydata['Primary'],mydata['Declustering Weight'])\nprint ('there was any error?: ', error!=0)", "Get the normal score transformation\nNote that the declustering is applied on the transformation tables", "mydata['NS_Primary'] = pygslib.gslib.__dist_transf.nscore(mydata['Primary'],transin,transout,getrank=False)\n\nmydata['NS_Primary'].hist(bins=30)", "Doing the back transformation", "mydata['NS_Primary_BT'],error = pygslib.gslib.__dist_transf.backtr(mydata['NS_Primary'],\n transin,transout,\n ltail=1,utail=1,ltpar=0,utpar=60,\n zmin=0,zmax=60,getrank=False)\nprint ('there was any error?: ', error!=0, error)\n\nmydata[['Primary','NS_Primary_BT']].hist(bins=30)\n\nmydata[['Primary','NS_Primary_BT', 'NS_Primary']].head()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
henrysky/astroNN
demo_tutorial/VAE/variational_autoencoder_demo.ipynb
mit
[ "Variational Autoencoder demo with 1D data\nHere is astroNN, please take a look if you are interested in astronomy or how neural network applied in astronomy\n* Henry Leung - Astronomy student, University of Toronto - henrysky\n* Project advisor: Jo Bovy - Professor, Department of Astronomy and Astrophysics, University of Toronto - jobovy\n* Contact Henry: henrysky.leung [at] utoronto.ca\n* This tutorial is created on 13/Jan/2018 with Keras 2.1.2, Tensorflow 1.4.0, Nvidia CuDNN 6.1 for CUDA 8.0 (Optional), Python 3.6.3 Win10 x64\n* Updated on 31/Jan/2020 with Tensorflow 2.1.0\nImport everything we need first", "%matplotlib inline\n%config InlineBackend.figure_format='retina'\n\nimport numpy as np\nimport pylab as plt\nfrom scipy.stats import norm\n\nfrom tensorflow.keras.layers import Input, Dense, Lambda, Layer, Add, Multiply\nfrom tensorflow.keras.models import Model, Sequential\nfrom tensorflow.keras import regularizers\n\nimport tensorflow as tf\nfrom astroNN.nn.layers import KLDivergenceLayer\nfrom astroNN.nn.losses import nll", "Then define basic constant, function and define our neural network", "original_dim = 4000 # Our 1D images dimension, each image has 4000 pixel\nintermediate_dim = 256 # Number of neurone our fully connected neural net has\n\nbatch_size = 50\nepochs = 15\nepsilon_std = 1.0\n\n\ndef blackbox_image_generator(pixel, center, sigma):\n return norm.pdf(pixel, center, sigma)\n\n\ndef model_vae(latent_dim):\n \"\"\" \n Main Model + Encoder\n \"\"\"\n x = Input(shape=(original_dim,))\n h = Dense(intermediate_dim, activation='relu')(x)\n\n z_mu = Dense(latent_dim, kernel_regularizer=regularizers.l2(1e-4))(h)\n z_log_var = Dense(latent_dim)(h)\n\n z_mu, z_log_var = KLDivergenceLayer()([z_mu, z_log_var])\n z_sigma = Lambda(lambda t: tf.exp(.5*t))(z_log_var)\n\n eps = Input(tensor=tf.random.normal(mean=0, stddev=epsilon_std, shape=(tf.shape(x)[0], latent_dim)))\n \n z_eps = Multiply()([z_sigma, eps])\n z = Add()([z_mu, z_eps])\n \n decoder = Sequential()\n decoder.add(Dense(intermediate_dim, input_dim=latent_dim, activation='relu'))\n decoder.add(Dense(original_dim, activation='sigmoid'))\n \n x_pred = decoder(z)\n\n vae = Model(inputs=[x, eps], outputs=x_pred)\n \n encoder = Model(x, z_mu)\n \n return vae, encoder", "Now we will generate some true latent variable so we can pass them to a blackbox image generator to generate some 1D images.\nThe blackbox image generator (which is deterministic) will take two numbers and generate images in a predictable way. This is important because if the generator generate image in a random way, then there is nothing neural network can learn.\nBut for simplicity, we will fix the first latent variable of the blackbox image generator a constant and only use the second one to generate images.", "s_1 = np.random.normal(30, 1.5, 900)\ns_2 = np.random.normal(15, 1, 900)\ns_3 = np.random.normal(10, 1, 900)\n\ns = np.concatenate([s_1, s_2, s_3])\n\nplt.figure(figsize=(12, 12))\nplt.hist(s[:900], 70, density=1, facecolor='green', alpha=0.75, label='Population 1')\nplt.hist(s[900:1800], 70, density=1, facecolor='red', alpha=0.75, label='Population 2')\nplt.hist(s[1800:], 70, density=1, facecolor='blue', alpha=0.75, label='Population 3')\nplt.title('Disturbution of hidden variable used to generate data', fontsize=15)\nplt.xlabel('True Latent Variable Value', fontsize=15)\nplt.ylabel('Probability Density', fontsize=15)\nplt.tick_params(labelsize=12, width=1, length=10)\nplt.legend(loc='best', fontsize=15)\nplt.show()", "Now we will pass the true latent variable to the blackbox image generator to generate some images. Below are the example\nimages from the three populations. They may seems to have no difference but neural network will pick up some subtle features\nusually.", "# We have some images, each has 4000 pixels\nx_train = np.zeros((len(s), original_dim))\nfor counter, S in enumerate(s):\n xs = np.linspace(0, 40, original_dim)\n x_train[counter] = blackbox_image_generator(xs, 20, S)\n\n# Prevent nan causes error\nx_train[np.isnan(x_train.astype(float))] = 0\n\nx_train *= 10\n\n# Add some noise to our images\nx_train += np.random.normal(0, 0.2, x_train.shape)\n\nplt.figure(figsize=(8, 8))\nplt.title('Example image from Population 1', fontsize=15)\nplt.plot(x_train[500])\nplt.xlabel('Pixel', fontsize=15)\nplt.ylabel('Flux', fontsize=15)\nplt.tick_params(labelsize=12, width=1, length=10)\nplt.show()\n\nplt.figure(figsize=(8, 8))\nplt.title('Example image from Population 2', fontsize=15)\nplt.plot(x_train[1000])\nplt.xlabel('Pixel', fontsize=15)\nplt.ylabel('Flux', fontsize=15)\nplt.tick_params(labelsize=12, width=1, length=10)\nplt.show()\n\nplt.figure(figsize=(8, 8))\nplt.title('Example image from Population 3', fontsize=15)\nplt.plot(x_train[1600])\nplt.xlabel('Pixel', fontsize=15)\nplt.ylabel('Flux', fontsize=15)\nplt.tick_params(labelsize=12, width=1, length=10)\nplt.show()", "Now we will pass the images to the neural network and train with them.", "latent_dim = 1 # Dimension of our latent space\nvae, encoder = model_vae(latent_dim)\nvae.compile(optimizer='rmsprop', loss=nll, \n weighted_metrics=None,\n loss_weights=None,\n sample_weight_mode=None)\n\nvae.fit(x_train, x_train, shuffle=True, epochs=epochs, batch_size=batch_size, verbose=0)\n\nz_test = encoder.predict(x_train, batch_size=batch_size)\n\nplt.figure(figsize=(12, 12))\nplt.hist(z_test[:900], 70, density=1, facecolor='green', alpha=0.75, label='Population 1')\nplt.hist(z_test[900:1800], 70, density=1, facecolor='red', alpha=0.75, label='Population 2')\nplt.hist(z_test[1800:], 70, density=1, facecolor='blue', alpha=0.75, label='Population 3')\nplt.title('Disturbution of latent variable value from neural net', fontsize=15)\nplt.xlabel('Latent Variable Value from Neural Net', fontsize=15)\nplt.ylabel('Probability Density', fontsize=15)\nplt.tick_params(labelsize=12, width=1, length=10)\nplt.legend(loc='best', fontsize=15)\nplt.show()", "Yay!! Seems like the neural network recovered the three population successfully. Althought the recovered latent variable is not exactly the same as the original ones we generated (I mean at least the scale isn't the same), usually you won't expect the neural network can learn the real phyiscs. In this case, the latent variable is just some transformations from the original ones.\nYou should still remember that we have fixed the first latent variable of the blackbox image generator. What happes if we also generate 3 populations for the first latent variable, and the first latent variable will have no correlation with the second latent variable (Meaning if you know the first latent value of an object, you have no information gain on the second latent value of that object because the first and second have nothing to do with each other)", "m_1A = np.random.normal(28, 2, 300)\nm_1B = np.random.normal(19, 2, 300)\nm_1C = np.random.normal(12, 1, 300)\n\nm_2A = np.random.normal(28, 2, 300)\nm_2B = np.random.normal(19, 2, 300)\nm_2C = np.random.normal(12, 1, 300)\n\nm_3A = np.random.normal(28, 2, 300)\nm_3B = np.random.normal(19, 2, 300)\nm_3C = np.random.normal(12, 1, 300)\n\nm = np.concatenate([m_1A, m_1B, m_1C, m_2A, m_2B, m_2C, m_3A, m_3B, m_3C])\n\nx_train = np.zeros((len(s), original_dim))\nfor counter in range(len(s)):\n xs = np.linspace(0, 40, original_dim)\n x_train[counter] = blackbox_image_generator(xs, m[counter], s[counter])\n \n# Prevent nan causes error\nx_train[np.isnan(x_train.astype(float))] = 0\n\nx_train *= 10\n\n# Add some noise to our images\nx_train += np.random.normal(0, 0.1, x_train.shape) \n\n\nplt.figure(figsize=(12, 12))\nplt.hist(s[:900], 70, density=1, facecolor='green', alpha=0.75, label='Population 1')\nplt.hist(s[900:1800], 70, density=1, facecolor='red', alpha=0.75, label='Population 2')\nplt.hist(s[1800:], 70, density=1, facecolor='blue', alpha=0.75, label='Population 3')\nplt.title('Disturbution of hidden variable 1 used to generate data', fontsize=15)\nplt.xlabel('True Latent Variable Value', fontsize=15)\nplt.ylabel('Probability Density', fontsize=15)\nplt.tick_params(labelsize=12, width=1, length=10)\nplt.legend(loc='best', fontsize=15)\nplt.show()\n\nplt.figure(figsize=(12, 12))\nplt.hist(m[:900], 70, density=1, facecolor='green', alpha=0.75, label='Population 1')\nplt.hist(m[900:1800], 70, density=1, facecolor='red', alpha=0.75, label='Population 2')\nplt.hist(m[1800:], 70, density=1, facecolor='blue', alpha=0.75, label='Population 3')\nplt.title('Disturbution of hidden variable 2 used to generate data', fontsize=15)\nplt.xlabel('True Latent Variable Value', fontsize=15)\nplt.ylabel('Probability Density', fontsize=15)\nplt.tick_params(labelsize=12, width=1, length=10)\nplt.legend(loc='best', fontsize=15)\nplt.show()", "Since we have two independent variables to generate our images, what happened if you still try to force the neural network to explain the images with just one variable?\nBefore we run the training, we should think about what we expect first. Lets denate the first latent variable population as 1, 2 and 3 , while the second latent variable population as A, B and C. If we know an object is in population 2, it has equal chance that its in population A, B and C. With this logic, we should have 9 unique population in total (1A, 1B, 1C, 2A, 2B, 2C, 3A, 3B, 3C). If the neural network want to explain the images with 1 latent variable, it should has 9 peaks in the plot.", "latent_dim = 1 # Dimension of our latent space\nvae, encoder = model_vae(latent_dim)\nvae.compile(optimizer='rmsprop', loss=nll, \n weighted_metrics=None,\n loss_weights=None,\n sample_weight_mode=None)\n\nepochs = 15\n\nvae.fit(x_train, x_train, shuffle=True, epochs=epochs, batch_size=batch_size, verbose=0)\n\nz_test = encoder.predict(x_train, batch_size=batch_size)\n\nplt.figure(figsize=(12, 12))\n# plt.hist(z_test[:900], 70, density=1, facecolor='green', alpha=0.75, label='Population 1')\n# plt.hist(z_test[900:1800], 70, density=1, facecolor='red', alpha=0.75, label='Population 2')\n# plt.hist(z_test[1800:], 70, density=1, facecolor='blue', alpha=0.75, label='Population 3')\n\nplt.hist(z_test[:300], 70, density=1, alpha=0.75, label='Population 1A')\nplt.hist(z_test[300:600], 70, density=1, alpha=0.75, label='Population 1B')\nplt.hist(z_test[600:900], 70, density=1, alpha=0.75, label='Population 1C')\n\nplt.hist(z_test[900:1200], 70, density=1, alpha=0.75, label='Population 2A')\nplt.hist(z_test[1200:1500], 70, density=1, alpha=0.75, label='Population 2B')\nplt.hist(z_test[1500:1800], 70, density=1, alpha=0.75, label='Population 2C')\n\nplt.hist(z_test[1800:2100], 70, density=1, alpha=0.75, label='Population 3A')\nplt.hist(z_test[2100:2400], 70, density=1, alpha=0.75, label='Population 3B')\nplt.hist(z_test[2400:2700], 70, density=1, alpha=0.75, label='Population 3C')\n\nplt.title('Disturbution of latent variable value from neural net', fontsize=15)\nplt.xlabel('Latent Variable Value from Neural Net', fontsize=15)\nplt.ylabel('Probability Density', fontsize=15)\nplt.tick_params(labelsize=12, width=1, length=10)\nplt.legend(loc='best', fontsize=15)\nplt.show()", "By visual inspection, seems like the neural network only recovered 6 population :(\nWhat will happen if we increase the latent space of the nerual network to 2?", "latent_dim = 2 # Dimension of our latent space\nepochs = 40\n\nvae, encoder = model_vae(latent_dim)\nvae.compile(optimizer='rmsprop', loss=nll, \n weighted_metrics=None,\n loss_weights=None,\n sample_weight_mode=None)\nvae.fit(x_train, x_train, shuffle=True, epochs=epochs, batch_size=batch_size, verbose=0)\n\nz_test = encoder.predict(x_train, batch_size=batch_size)\n\nplt.figure(figsize=(12, 12))\nplt.scatter(z_test[:300, 0], z_test[:300, 1], s=4, label='Population 1A')\nplt.scatter(z_test[300:600, 0], z_test[300:600, 1], s=4, label='Population 1B')\nplt.scatter(z_test[600:900, 0], z_test[600:900, 1], s=4, label='Population 1C')\n\nplt.scatter(z_test[900:1200, 0], z_test[900:1200, 1], s=4, label='Population 2A')\nplt.scatter(z_test[1200:1500, 0], z_test[1200:1500, 1], s=4, label='Population 2B')\nplt.scatter(z_test[1500:1800, 0], z_test[1500:1800, 1], s=4, label='Population 2C')\n\nplt.scatter(z_test[1800:2100, 0], z_test[1800:2100, 1], s=4, label='Population 3A')\nplt.scatter(z_test[2100:2400, 0], z_test[2100:2400, 1], s=4, label='Population 3B')\nplt.scatter(z_test[2400:2700, 0], z_test[2400:2700, 1], s=4, label='Population 3C')\n\nplt.title('Latent Space (Middle layer of Neurones)', fontsize=15)\nplt.xlabel('Second Latent Variable (Neurone)', fontsize=15)\nplt.ylabel('First Latent Variable (Neurone)', fontsize=15)\nplt.tick_params(labelsize=12, width=1, length=10)\nplt.legend(loc='best', fontsize=15, markerscale=6)\nplt.show()", "Why using Mean-Square-Error as reconstruction loss is a bad idea?\nMean-Square-Error: $\\frac{1}{T} \\sum (\\hat x-x)^2$\nGaussian disturbution: $P(x|\\mu,\\sigma)=\\frac{1}{C} exp(\\frac{(\\mu-x)^2}{2\\sigma^2})$\nLets set $\\mu$ as our datapoint and $\\sigma$ as 1 for simplicity\n= $p(x|\\hat x) \\propto exp(\\frac{(\\mu-x)^2}{2})$\nTaking log, $log(p(x|\\hat x)) \\propto \\sum (\\hat x-x)^2$\nWhich means use mean square error (as an objective function which minimizing the MSE/L2 loss) is equivalent maximizing the log-likelihood of a Gaussian. We are assuming our data is somehow gaussian which may or may not be true.\nThats why the reconstructed images are blurry if we use mean square error loss, our real world data highly probably come from multimodal distribution, which we fit a gaussian with it in optimization process with mean square error objective.\nIn real world, even though our data are multimodal distribution, $P(x|z)$ could be unimodel which can be fitted with a gaussian if $z$ is informative enough, and I have shown in this demo, x can be quite informative.\nFor reference: arXiv:1511.05440, arXiv:1702.08658" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
turbomanage/training-data-analyst
blogs/goes16/maria/hurricanes2017.ipynb
apache-2.0
[ "2017 Hurricane Tracks\nDemonstrates how to plot all the North American hurricane tracks in 2017, starting from the BigQuery public dataset.", "%bash\napt-get update\napt-get -y install python-mpltoolkits.basemap \n\nfrom mpl_toolkits.basemap import Basemap\nimport google.datalab.bigquery as bq\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\nquery=\"\"\"\n#standardSQL\nSELECT\n name,\n latitude,\n longitude,\n iso_time,\n usa_sshs\nFROM\n `bigquery-public-data.noaa_hurricanes.hurricanes`\nWHERE\n basin = 'NA'\n AND season = '2017'\n\"\"\"\n\ndf = bq.Query(query).execute().result().to_dataframe()\n\ndf.head()", "Plot one of the hurricanes\nLet's just plot the track of Hurricane MARIA", "maria = df[df['name'] == 'MARIA'].sort_values('iso_time')\n\nm = Basemap(llcrnrlon=-100.,llcrnrlat=0.,urcrnrlon=-20.,urcrnrlat=57.,\n projection='lcc',lat_1=20.,lat_2=40.,lon_0=-60.,\n resolution ='l',area_thresh=1000.)\nx, y = m(maria['longitude'].values,maria['latitude'].values)\nm.plot(x,y,linewidth=5,color='r')\n\n# draw coastlines, meridians and parallels.\nm.drawcoastlines()\nm.drawcountries()\nm.drawmapboundary(fill_color='#99ffff')\nm.fillcontinents(color='#cc9966',lake_color='#99ffff')\nm.drawparallels(np.arange(10,70,20),labels=[1,1,0,0])\nm.drawmeridians(np.arange(-100,0,20),labels=[0,0,0,1])\nplt.title('Hurricane Maria (2017)');", "Plot all the hurricanes\nUse line thickness based on the maximum category reached by the hurricane", "names = df.name.unique()\nnames\n\nm = Basemap(llcrnrlon=-100.,llcrnrlat=0.,urcrnrlon=-20.,urcrnrlat=57.,\n projection='lcc',lat_1=20.,lat_2=40.,lon_0=-60.,\n resolution ='l',area_thresh=1000.)\n\nfor name in names:\n if name != 'NOT_NAMED':\n named = df[df['name'] == name].sort_values('iso_time')\n x, y = m(named['longitude'].values,named['latitude'].values)\n maxcat = max(named['usa_sshs'])\n m.plot(x,y,linewidth=maxcat,color='b')\n \n# draw coastlines, meridians and parallels.\nm.drawcoastlines()\nm.drawcountries()\nm.drawmapboundary(fill_color='#99ffff')\nm.fillcontinents(color='#cc9966',lake_color='#99ffff')\nm.drawparallels(np.arange(10,70,20),labels=[1,1,0,0])\nm.drawmeridians(np.arange(-100,0,20),labels=[0,0,0,1])\nplt.title('Named North-Atlantic hurricanes (2017)');", "Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the \\\"License\\\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \\\"AS IS\\\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
probml/pyprobml
notebooks/misc/linreg_divorce_numpyro.ipynb
mit
[ "<a href=\"https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/linreg_divorce_numpyro.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\nRobust linear regression\nWe illustrate linear using the \"waffle divorce\" example in sec 5.1 of Statistical Rethinking ed 2. \nThe numpyro code is from Du Phan's site", "!pip install -q numpyro@git+https://github.com/pyro-ppl/numpyro\n!pip install -q arviz\n\nimport numpy as np\n\nnp.set_printoptions(precision=3)\nimport matplotlib.pyplot as plt\nimport math\nimport os\nimport warnings\nimport pandas as pd\n\nimport jax\n\nprint(\"jax version {}\".format(jax.__version__))\nprint(\"jax backend {}\".format(jax.lib.xla_bridge.get_backend().platform))\n\nimport jax.numpy as jnp\nfrom jax import random, vmap\n\nrng_key = random.PRNGKey(0)\nrng_key, rng_key_ = random.split(rng_key)\n\nimport numpyro\nimport numpyro.distributions as dist\nfrom numpyro.distributions import constraints\nfrom numpyro.distributions.transforms import AffineTransform\nfrom numpyro.diagnostics import hpdi, print_summary\nfrom numpyro.infer import Predictive, log_likelihood\nfrom numpyro.infer import MCMC, NUTS\nfrom numpyro.infer import SVI, Trace_ELBO, init_to_value\nfrom numpyro.infer.autoguide import AutoLaplaceApproximation\nimport numpyro.optim as optim\n\n\nimport arviz as az", "Data\nThe data records the divorce rate $D$, marriage rate $M$, and average age $A$ that people get married at for 50 US states.", "# load data and copy\nurl = \"https://raw.githubusercontent.com/fehiepsi/rethinking-numpyro/master/data/WaffleDivorce.csv\"\nWaffleDivorce = pd.read_csv(url, sep=\";\")\nd = WaffleDivorce\n\n# standardize variables\nd[\"A\"] = d.MedianAgeMarriage.pipe(lambda x: (x - x.mean()) / x.std())\nd[\"D\"] = d.Divorce.pipe(lambda x: (x - x.mean()) / x.std())\nd[\"M\"] = d.Marriage.pipe(lambda x: (x - x.mean()) / x.std())", "Model (Gaussian likelihood)\nWe predict divorce rate D given marriage rate M and age A.", "def model(M, A, D=None):\n a = numpyro.sample(\"a\", dist.Normal(0, 0.2))\n bM = numpyro.sample(\"bM\", dist.Normal(0, 0.5))\n bA = numpyro.sample(\"bA\", dist.Normal(0, 0.5))\n sigma = numpyro.sample(\"sigma\", dist.Exponential(1))\n mu = numpyro.deterministic(\"mu\", a + bM * M + bA * A)\n numpyro.sample(\"D\", dist.Normal(mu, sigma), obs=D)\n\n\nm5_3 = AutoLaplaceApproximation(model)\nsvi = SVI(model, m5_3, optim.Adam(1), Trace_ELBO(), M=d.M.values, A=d.A.values, D=d.D.values)\np5_3, losses = svi.run(random.PRNGKey(0), 1000)\npost = m5_3.sample_posterior(random.PRNGKey(1), p5_3, (1000,))\n\nparam_names = {\"a\", \"bA\", \"bM\", \"sigma\"}\nfor p in param_names:\n print(f\"posterior for {p}\")\n print_summary(post[p], 0.95, False)", "Posterior predicted vs actual", "# call predictive without specifying new data\n# so it uses original data\npost = m5_3.sample_posterior(random.PRNGKey(1), p5_3, (int(1e4),))\npost_pred = Predictive(m5_3.model, post)(random.PRNGKey(2), M=d.M.values, A=d.A.values)\nmu = post_pred[\"mu\"]\n\n# summarize samples across cases\nmu_mean = jnp.mean(mu, 0)\nmu_PI = jnp.percentile(mu, q=(5.5, 94.5), axis=0)\n\nax = plt.subplot(ylim=(float(mu_PI.min()), float(mu_PI.max())), xlabel=\"Observed divorce\", ylabel=\"Predicted divorce\")\nplt.plot(d.D, mu_mean, \"o\")\nx = jnp.linspace(mu_PI.min(), mu_PI.max(), 101)\nplt.plot(x, x, \"--\")\nfor i in range(d.shape[0]):\n plt.plot([d.D[i]] * 2, mu_PI[:, i], \"b\")\nfig = plt.gcf()\n\nfor i in range(d.shape[0]):\n if d.Loc[i] in [\"ID\", \"UT\", \"AR\", \"ME\"]:\n ax.annotate(d.Loc[i], (d.D[i], mu_mean[i]), xytext=(-25, -5), textcoords=\"offset pixels\")\nplt.tight_layout()\nplt.savefig(\"linreg_divorce_postpred.pdf\")\nplt.show()\nfig", "Per-point LOO scores\nWe compute the predicted probability of each point given the others, following\nsec 7.5.2 of Statistical Rethinking ed 2. \nThe numpyro code is from Du Phan's site", "# post = m5_3.sample_posterior(random.PRNGKey(24071847), p5_3, (1000,))\nlogprob = log_likelihood(m5_3.model, post, A=d.A.values, M=d.M.values, D=d.D.values)[\"D\"]\naz5_3 = az.from_dict(\n posterior={k: v[None, ...] for k, v in post.items()},\n log_likelihood={\"D\": logprob[None, ...]},\n)\n\nPSIS_m5_3 = az.loo(az5_3, pointwise=True, scale=\"deviance\")\nWAIC_m5_3 = az.waic(az5_3, pointwise=True, scale=\"deviance\")\npenalty = az5_3.log_likelihood.stack(sample=(\"chain\", \"draw\")).var(dim=\"sample\")\n\nfig, ax = plt.subplots()\nax.plot(PSIS_m5_3.pareto_k.values, penalty.D.values, \"o\", mfc=\"none\")\nax.set_xlabel(\"PSIS Pareto k\")\nax.set_ylabel(\"WAIC penalty\")\n\nplt.savefig(\"linreg_divorce_waic_vs_pareto.pdf\")\nplt.show()\nplt.show()\n\npareto = PSIS_m5_3.pareto_k.values\nwaic = penalty.D.values\nndx = np.where(pareto > 0.4)[0]\nfor i in ndx:\n print(d.Loc[i], pareto[i], waic[i])\n\n\nfor i in ndx:\n ax.annotate(d.Loc[i], (pareto[i], waic[i]), xytext=(5, 0), textcoords=\"offset pixels\")\nfig", "Student likelihood", "def model(M, A, D=None):\n a = numpyro.sample(\"a\", dist.Normal(0, 0.2))\n bM = numpyro.sample(\"bM\", dist.Normal(0, 0.5))\n bA = numpyro.sample(\"bA\", dist.Normal(0, 0.5))\n sigma = numpyro.sample(\"sigma\", dist.Exponential(1))\n # mu = a + bM * M + bA * A\n mu = numpyro.deterministic(\"mu\", a + bM * M + bA * A)\n numpyro.sample(\"D\", dist.StudentT(2, mu, sigma), obs=D)\n\n\nm5_3t = AutoLaplaceApproximation(model)\nsvi = SVI(model, m5_3t, optim.Adam(0.3), Trace_ELBO(), M=d.M.values, A=d.A.values, D=d.D.values)\np5_3t, losses = svi.run(random.PRNGKey(0), 1000)\n\n# call predictive without specifying new data\n# so it uses original data\npost_t = m5_3t.sample_posterior(random.PRNGKey(1), p5_3t, (int(1e4),))\npost_pred_t = Predictive(m5_3t.model, post_t)(random.PRNGKey(2), M=d.M.values, A=d.A.values)\nmu = post_pred_t[\"mu\"]\n\n# summarize samples across cases\nmu_mean = jnp.mean(mu, 0)\nmu_PI = jnp.percentile(mu, q=(5.5, 94.5), axis=0)\n\n\nax = plt.subplot(ylim=(float(mu_PI.min()), float(mu_PI.max())), xlabel=\"Observed divorce\", ylabel=\"Predicted divorce\")\nplt.plot(d.D, mu_mean, \"o\")\nx = jnp.linspace(mu_PI.min(), mu_PI.max(), 101)\nplt.plot(x, x, \"--\")\nfor i in range(d.shape[0]):\n plt.plot([d.D[i]] * 2, mu_PI[:, i], \"b\")\nfig = plt.gcf()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
saketkc/notebooks
python/coursera-BayesianML/04_mcmc_assignment.ipynb
bsd-2-clause
[ "<a href=\"https://colab.research.google.com/github/saketkc/notebooks/blob/master/python/coursera-BayesianML/04_mcmc_assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\nFirst things first\nClick File -> Save a copy in Drive and click Open in new tab in the pop-up window to save your progress in Google Drive.\nUsing PyMC3\nIn this assignment, we will learn how to use a library for probabilistic programming and inference called <a href=\"http://docs.pymc.io/\">PyMC3</a>.\nSetup\nLoading auxiliary files and importing the necessary libraries.", "pip install -U pymc3 arviz \n\n\ntry:\n import google.colab\n IN_COLAB = True\nexcept:\n IN_COLAB = False\nif IN_COLAB:\n print(\"Downloading Colab files\")\n ! shred -u setup_google_colab.py\n ! wget https://raw.githubusercontent.com/hse-aml/bayesian-methods-for-ml/master/setup_google_colab.py -O setup_google_colab.py\n import setup_google_colab\n setup_google_colab.load_data_week4()\n\nimport numpy as np\nimport pandas as pd\nimport numpy.random as rnd\nimport seaborn as sns\nfrom matplotlib import animation\nimport pymc3 as pm\nfrom w4_grader import MCMCGrader\n%pylab inline", "Grading\nWe will create a grader instance below and use it to collect your answers. Note that these outputs will be stored locally inside grader and will be uploaded to the platform only after running submitting function in the last part of this assignment. If you want to make a partial submission, you can run that cell anytime you want.", "grader = MCMCGrader()", "Task 1. Alice and Bob\nAlice and Bob are trading on the market. Both of them are selling the Thing and want to get as high profit as possible.\nEvery hour they check out with each other's prices and adjust their prices to compete on the market. Although they have different strategies for price setting.\nAlice: takes Bob's price during the previous hour, multiply by 0.6, add \\$90, add Gaussian noise from $N(0, 20^2)$.\nBob: takes Alice's price during the current hour, multiply by 1.2 and subtract \\$20, add Gaussian noise from $N(0, 10^2)$.\nThe problem is to find the joint distribution of Alice and Bob's prices after many hours of such an experiment.\nTask 1.1\nImplement the run_simulation function according to the description above.", "def run_simulation(alice_start_price=300.0, bob_start_price=300.0, seed=42, num_hours=10000, burnin=1000):\n \"\"\"Simulates an evolution of prices set by Bob and Alice.\n \n The function should simulate Alice and Bob behavior for `burnin' hours, then ignore the obtained\n simulation results, and then simulate it for `num_hours' more.\n The initial burnin (also sometimes called warmup) is done to make sure that the distribution stabilized.\n \n Please don't change the signature of the function.\n \n Returns:\n two lists, with Alice and with Bob prices. Both lists should be of length num_hours.\n \"\"\"\n np.random.seed(seed)\n\n alice_prices = [alice_start_price]\n bob_prices = [bob_start_price]\n \n #### YOUR CODE HERE ####\n for hour in range(burnin + num_hours - 1):\n #Alice: takes Bob's price during the previous hour, multiply by 0.6, add $90, add Gaussian noise from N(0,202) .\n #Bob: takes Alice's price during the current hour, multiply by 1.2 and subtract $20, add Gaussian noise from N(0,102) .\n alice_current = bob_prices[-1]*0.6 + 90 + np.random.normal(loc=0, scale=20)\n bob_current = alice_current*1.2 - 20 + np.random.normal(loc=0, scale=10)\n\n alice_prices.append(alice_current)\n bob_prices.append(bob_current)\n \n \n ### END OF YOUR CODE ###\n #print(len(alice_prices[burnin:]), len(bob_prices[burnin:]))\n return alice_prices[burnin:], bob_prices[burnin:]\n\nalice_prices, bob_prices = run_simulation(alice_start_price=300, bob_start_price=300, seed=42, num_hours=3, burnin=1)\nif len(alice_prices) != 3:\n raise RuntimeError('Make sure that the function returns `num_hours` data points.')\ngrader.submit_simulation_trajectory(alice_prices, bob_prices)", "Task 1.2\nWhat is the average price for Alice and Bob after the burn-in period? Whose prices are higher?", "#### YOUR CODE HERE ####\nalice_prices, bob_prices = run_simulation(alice_start_price=300, bob_start_price=300)\n\naverage_alice_price = np.mean(alice_prices)\naverage_bob_price = np.mean(bob_prices)\n### END OF YOUR CODE ###\ngrader.submit_simulation_mean(average_alice_price, average_bob_price)", "Task 1.3\nLet's look at the 2-d histogram of prices, computed using kernel density estimation.", "data = np.array(run_simulation())\nsns.jointplot(data[0, :], data[1, :], stat_func=None, kind='kde')", "Clearly, the prices of Bob and Alce are highly correlated. What is the Pearson correlation coefficient of Alice and Bob prices?", "#### YOUR CODE HERE ####\ncorrelation = np.corrcoef(alice_prices, bob_prices)[0,1]\n### END OF YOUR CODE ###\ngrader.submit_simulation_correlation(correlation)", "Task 1.4\nWe observe an interesting effect here: seems like the bivariate distribution of Alice and Bob prices converges to a correlated bivariate Gaussian distribution.\nLet's check, whether the results change if we use different random seed and starting points.", "# Pick different starting prices, e.g 10, 1000, 10000 for Bob and Alice. \n# Does the joint distribution of the two prices depend on these parameters?\nPOSSIBLE_ANSWERS = {\n 0: 'Depends on random seed and starting prices', \n 1: 'Depends only on random seed',\n 2: 'Depends only on starting prices',\n 3: 'Does not depend on random seed and starting prices'\n}\n\nidx = 3### TYPE THE INDEX OF THE CORRECT ANSWER HERE ###\nanswer = POSSIBLE_ANSWERS[idx]\ngrader.submit_simulation_depends(answer)", "Task 2. Logistic regression with PyMC3\nLogistic regression is a powerful model that allows you to analyze how a set of features affects some binary target label. Posterior distribution over the weights gives us an estimation of the influence of each particular feature on the probability of the target being equal to one. But most importantly, posterior distribution gives us the interval estimates for each weight of the model. This is very important for data analysis when you want to not only provide a good model but also estimate the uncertainty of your conclusions.\nIn this task, we will learn how to use PyMC3 library to perform approximate Bayesian inference for logistic regression.\nThis part of the assignment is based on the logistic regression tutorial by Peadar Coyle and J. Benjamin Cook.\nLogistic regression.\nThe problem here is to model how the probability that a person has salary $\\geq$ \\$50K is affected by his/her age, education, sex and other features.\nLet $y_i = 1$ if i-th person's salary is $\\geq$ \\$50K and $y_i = 0$ otherwise. Let $x_{ij}$ be $j$-th feature of $i$-th person.\nLogistic regression models this probabilty in the following way:\n$$p(y_i = 1 \\mid \\beta) = \\sigma (\\beta_1 x_{i1} + \\beta_2 x_{i2} + \\dots + \\beta_k x_{ik} ), $$\nwhere $\\sigma(t) = \\frac1{1 + e^{-t}}$\nOdds ratio.\nLet's try to answer the following question: does the gender of a person affects his or her salary? To do it we will use the concept of odds.\nIf we have a binary random variable $y$ (which may indicate whether a person makes \\$50K) and if the probabilty of the positive outcome $p(y = 1)$ is for example 0.8, we will say that the odds are 4 to 1 (or just 4 for short), because succeding is 4 time more likely than failing $\\frac{p(y = 1)}{p(y = 0)} = \\frac{0.8}{0.2} = 4$.\nNow, let's return to the effect of gender on the salary. Let's compute the ratio between the odds of a male having salary $\\geq $ \\$50K and the odds of a female (with the same level of education, experience and everything else) having salary $\\geq$ \\$50K. The first feature of each person in the dataset is gender. Specifically, $x_{i1} = 0$ if the person is female and $x_{i1} = 1$ otherwise. Consider two people $i$ and $j$ having all but one features the same with the only difference in $x_{i1} \\neq x_{j1}$.\nIf the logistic regression model above estimates the probabilities exactly, the odds for a male will be (check it!):\n$$\n\\frac{p(y_i = 1 \\mid x_{i1}=1, x_{i2}, \\ldots, x_{ik})}{p(y_i = 0 \\mid x_{i1}=1, x_{i2}, \\ldots, x_{ik})} = \\frac{\\sigma(\\beta_1 + \\beta_2 x_{i2} + \\ldots)}{1 - \\sigma(\\beta_1 + \\beta_2 x_{i2} + \\ldots)} = \\exp(\\beta_1 + \\beta_2 x_{i2} + \\ldots)\n$$\nNow the ratio of the male and female odds will be:\n$$\n\\frac{\\exp(\\beta_1 \\cdot 1 + \\beta_2 x_{i2} + \\ldots)}{\\exp(\\beta_1 \\cdot 0 + \\beta_2 x_{i2} + \\ldots)} = \\exp(\\beta_1)\n$$\nSo given the correct logistic regression model, we can estimate odds ratio for some feature (gender in this example) by just looking at the corresponding coefficient. But of course, even if all the logistic regression assumptions are met we cannot estimate the coefficient exactly from real-world data, it's just too noisy. So it would be really nice to build an interval estimate, which would tell us something along the lines \"with probability 0.95 the odds ratio is greater than 0.8 and less than 1.2, so we cannot conclude that there is any gender discrimination in the salaries\" (or vice versa, that \"with probability 0.95 the odds ratio is greater than 1.5 and less than 1.9 and the discrimination takes place because a male has at least 1.5 higher probability to get >$50k than a female with the same level of education, age, etc.\"). In Bayesian statistics, this interval estimate is called credible interval.\nUnfortunately, it's impossible to compute this credible interval analytically. So let's use MCMC for that!\nCredible interval\nA credible interval for the value of $\\exp(\\beta_1)$ is an interval $[a, b]$ such that $p(a \\leq \\exp(\\beta_1) \\leq b \\mid X_{\\text{train}}, y_{\\text{train}})$ is $0.95$ (or some other predefined value). To compute the interval, we need access to the posterior distribution $p(\\exp(\\beta_1) \\mid X_{\\text{train}}, y_{\\text{train}})$.\nLets for simplicity focus on the posterior on the parameters $p(\\beta_1 \\mid X_{\\text{train}}, y_{\\text{train}})$ since if we compute it, we can always find $[a, b]$ such that $p(\\log a \\leq \\beta_1 \\leq \\log b \\mid X_{\\text{train}}, y_{\\text{train}}) = p(a \\leq \\exp(\\beta_1) \\leq b \\mid X_{\\text{train}}, y_{\\text{train}}) = 0.95$\nTask 2.1 MAP inference\nLet's read the dataset. This is a post-processed version of the UCI Adult dataset.", "data = pd.read_csv(\"adult_us_postprocessed.csv\")\ndata.head()", "Each row of the dataset is a person with his (her) features. The last column is the target variable $y$. One indicates that this person's annual salary is more than $50K.\nFirst of all let's set up a Bayesian logistic regression model (i.e. define priors on the parameters $\\alpha$ and $\\beta$ of the model) that predicts the value of \"income_more_50K\" based on person's age and education:\n$$\np(y = 1 \\mid \\alpha, \\beta_1, \\beta_2) = \\sigma(\\alpha + \\beta_1 x_1 + \\beta_2 x_2) \\ \n\\alpha \\sim N(0, 100^2) \\\n\\beta_1 \\sim N(0, 100^2) \\\n\\beta_2 \\sim N(0, 100^2), \\\n$$\nwhere $x_1$ is a person's age, $x_2$ is his/her level of education, y indicates his/her level of income, $\\alpha$, $\\beta_1$ and $\\beta_2$ are paramters of the model.", "with pm.Model() as manual_logistic_model:\n # Declare pymc random variables for logistic regression coefficients with uninformative \n # prior distributions N(0, 100^2) on each weight using pm.Normal. \n # Don't forget to give each variable a unique name.\n \n #### YOUR CODE HERE ####\n\n alpha = pm.Normal('alpha', mu=0, sigma=100)\n beta1 = pm.Normal('beta1', mu=0, sigma=100)\n beta2 = pm.Normal('beta2', mu=0, sigma=100)\n \n ### END OF YOUR CODE ###\n \n # Transform these random variables into vector of probabilities p(y_i=1) using logistic regression model specified \n # above. PyMC random variables are theano shared variables and support simple mathematical operations.\n # For example:\n # z = pm.Normal('x', 0, 1) * np.array([1, 2, 3]) + pm.Normal('y', 0, 1) * np.array([4, 5, 6])`\n # is a correct PyMC expression.\n # Use pm.invlogit for the sigmoid function.\n \n #### YOUR CODE HERE ####\n \n prob = pm.invlogit(alpha + beta1*data['age'] + beta2*data['educ'] )\n ### END OF YOUR CODE ###\n \n # Declare PyMC Bernoulli random vector with probability of success equal to the corresponding value\n # given by the sigmoid function.\n # Supply target vector using \"observed\" argument in the constructor.\n\n #### YOUR CODE HERE ####\n likelihood = pm.Bernoulli('likelihood', p=prob, observed=data['income_more_50K'])\n ### END OF YOUR CODE ###\n \n # Use pm.find_MAP() to find the maximum a-posteriori estimate for the vector of logistic regression weights.\n map_estimate = pm.find_MAP()\n print(map_estimate)\n\n", "Sumbit MAP estimations of corresponding coefficients:", "with pm.Model() as logistic_model:\n # There's a simpler interface for generalized linear models in pymc3. \n # Try to train the same model using pm.glm.GLM.from_formula.\n # Do not forget to specify that the target variable is binary (and hence follows Binomial distribution).\n \n #### YOUR CODE HERE ####\n formula = 'income_more_50K ~ age + educ'\n likelihood = pm.glm.GLM.from_formula(formula, data, family=pm.glm.families.Binomial())\n ### END OF YOUR CODE ###\n map_estimate = pm.find_MAP()\n print(map_estimate)\n\nbeta_age_coefficient = 0.04348259### TYPE MAP ESTIMATE OF THE AGE COEFFICIENT HERE ###\nbeta_education_coefficient = 0.36210894### TYPE MAP ESTIMATE OF THE EDUCATION COEFFICIENT HERE ###\ngrader.submit_pymc_map_estimates(beta_age_coefficient, beta_education_coefficient)", "Task 2.2 MCMC\nTo find credible regions let's perform MCMC inference.", "# You will need the following function to visualize the sampling process.\n# You don't need to change it.\ndef plot_traces(traces, burnin=200):\n ''' \n Convenience function:\n Plot traces with overlaid means and values\n '''\n \n ax = pm.traceplot(traces[burnin:], figsize=(12,len(traces.varnames)*1.5),\n lines={k: v['mean'] for k, v in pm.summary(traces[burnin:]).iterrows()})\n\n for i, mn in enumerate(pm.summary(traces[burnin:])['mean']):\n ax[i,0].annotate('{:.2f}'.format(mn), xy=(mn,0), xycoords='data'\n ,xytext=(5,10), textcoords='offset points', rotation=90\n ,va='bottom', fontsize='large', color='#AA0022')", "Metropolis-Hastings\nLet's use the Metropolis-Hastings algorithm for finding the samples from the posterior distribution.\nOnce you wrote the code, explore the hyperparameters of Metropolis-Hastings such as the proposal distribution variance to speed up the convergence. You can use plot_traces function in the next cell to visually inspect the convergence.\nYou may also use MAP-estimate to initialize the sampling scheme to speed things up. This will make the warmup (burn-in) period shorter since you will start from a probable point.", "with pm.Model() as logistic_model:\n # Since it is unlikely that the dependency between the age and salary is linear, we will include age squared\n # into features so that we can model dependency that favors certain ages.\n # Train Bayesian logistic regression model on the following features: sex, age, age^2, educ, hours\n # Use pm.sample to run MCMC to train this model.\n # To specify the particular sampler method (Metropolis-Hastings) to pm.sample,\n # use `pm.Metropolis`.\n # Train your model for 400 samples.\n # Save the output of pm.sample to a variable: this is the trace of the sampling procedure and will be used\n # to estimate the statistics of the posterior distribution.\n \n #### YOUR CODE HERE ####\n data['age_squared'] = data['age'] ** 2\n formula = 'income_more_50K ~ sex + age + age_squared + educ + hours'\n likelihood = pm.glm.GLM.from_formula(formula, data, family=pm.glm.families.Binomial())\n trace = pm.sample(400, step=pm.Metropolis(), chains=1)\n ### END OF YOUR CODE ###\n\npm.__version__\n\nplot_traces(trace)", "NUTS sampler\nUse pm.sample without specifying a particular sampling method (pymc3 will choose it automatically).\nThe sampling algorithm that will be used in this case is NUTS, which is a form of Hamiltonian Monte Carlo, in which parameters are tuned automatically. This is an advanced method that we hadn't cover in the lectures, but it usually converges faster and gives less correlated samples compared to vanilla Metropolis-Hastings.", "with pm.Model() as logistic_model:\n # Train Bayesian logistic regression model on the following features: sex, age, age_squared, educ, hours\n # Use pm.sample to run MCMC to train this model.\n # Train your model for 400 samples.\n # Training can take a while, so relax and wait :)\n \n #### YOUR CODE HERE ####\n formula = 'income_more_50K ~ sex + age + age_squared + educ + hours'\n likelihood = pm.glm.GLM.from_formula(formula, data, family=pm.glm.families.Binomial())\n trace = pm.sample(400, step=pm.NUTS(), chains=1)\n ### END OF YOUR CODE ###\n\nplot_traces(trace)", "Estimating the odds ratio\nNow, let's build the posterior distribution on the odds ratio given the dataset (approximated by MCMC).", "# We don't need to use a large burn-in here, since we initialize sampling\n# from a good point (from our approximation of the most probable\n# point (MAP) to be more precise).\nburnin = 100\nb = trace['sex[T. Male]'][burnin:]\nplt.hist(np.exp(b), bins=20, density=True)\nplt.xlabel(\"Odds Ratio\")\nplt.show()", "Finally, we can find a credible interval (recall that credible intervals are Bayesian and confidence intervals are frequentist) for this quantity. This may be the best part about Bayesian statistics: we get to interpret credibility intervals the way we've always wanted to interpret them. We are 95% confident that the odds ratio lies within our interval!", "lb, ub = np.percentile(b, 2.5), np.percentile(b, 97.5)\nprint(\"P(%.3f < Odds Ratio < %.3f) = 0.95\" % (np.exp(lb), np.exp(ub)))\n\n# Submit the obtained credible interval.\ngrader.submit_pymc_odds_ratio_interval(np.exp(lb), np.exp(ub))", "Task 2.3 interpreting the results", "# Does the gender affects salary in the provided dataset?\n# (Note that the data is from 1996 and maybe not representative\n# of the current situation in the world.)\nPOSSIBLE_ANSWERS = {\n 0: 'No, there is certainly no discrimination',\n 1: 'We cannot say for sure',\n 2: 'Yes, we are 95% sure that a female is *less* likely to get >$50K than a male with the same age, level of education, etc.', \n 3: 'Yes, we are 95% sure that a female is *more* likely to get >$50K than a male with the same age, level of education, etc.', \n}\n\nidx = 2### TYPE THE INDEX OF THE CORRECT ANSWER HERE ###\nanswer = POSSIBLE_ANSWERS[idx]\ngrader.submit_is_there_discrimination(answer)", "Authorization & Submission\nTo submit assignment parts to Cousera platform, please, enter your e-mail and token into variables below. You can generate a token on this programming assignment's page. <b>Note:</b> The token expires 30 minutes after generation.", "STUDENT_EMAIL = '[email protected]'\nSTUDENT_TOKEN = '6r463miiML4NWB9M'\ngrader.status()", "If you want to submit these answers, run cell below", "grader.submit(STUDENT_EMAIL, STUDENT_TOKEN)", "(Optional) generating videos of sampling process\nIn this part you will generate videos showing the sampling process.\nSetting things up\nYou don't need to modify the code below, it sets up the plotting functions. The code is based on MCMC visualization tutorial.", "from IPython.display import HTML\n\n# Number of MCMC iteration to animate.\nsamples = 400\n\nfigsize(6, 6)\nfig = plt.figure()\ns_width = (0.81, 1.29)\na_width = (0.11, 0.39)\nsamples_width = (0, samples)\nax1 = fig.add_subplot(221, xlim=s_width, ylim=samples_width)\nax2 = fig.add_subplot(224, xlim=samples_width, ylim=a_width)\nax3 = fig.add_subplot(223, xlim=s_width, ylim=a_width,\n xlabel='male coef',\n ylabel='educ coef')\nfig.subplots_adjust(wspace=0.0, hspace=0.0)\nline1, = ax1.plot([], [], lw=1)\nline2, = ax2.plot([], [], lw=1)\nline3, = ax3.plot([], [], 'o', lw=2, alpha=.1)\nline4, = ax3.plot([], [], lw=1, alpha=.3)\nline5, = ax3.plot([], [], 'k', lw=1)\nline6, = ax3.plot([], [], 'k', lw=1)\nax1.set_xticklabels([])\nax2.set_yticklabels([])\nlines = [line1, line2, line3, line4, line5, line6]\n\ndef init():\n for line in lines:\n line.set_data([], [])\n return lines\n\ndef animate(i):\n with logistic_model:\n if i == 0:\n # Burnin\n for j in range(samples): iter_sample.__next__() \n trace = iter_sample.__next__()\n line1.set_data(trace['sex[T. Male]'][::-1], range(len(trace['sex[T. Male]'])))\n line2.set_data(range(len(trace['educ'])), trace['educ'][::-1])\n line3.set_data(trace['sex[T. Male]'], trace['educ'])\n line4.set_data(trace['sex[T. Male]'], trace['educ'])\n male = trace['sex[T. Male]'][-1]\n educ = trace['educ'][-1]\n line5.set_data([male, male], [educ, a_width[1]])\n line6.set_data([male, s_width[1]], [educ, educ])\n return lines", "Animating Metropolis-Hastings", "with pm.Model() as logistic_model:\n # Again define Bayesian logistic regression model on the following features: sex, age, age_squared, educ, hours\n \n #### YOUR CODE HERE ####\n \n ### END OF YOUR CODE ###\n step = pm.Metropolis()\n iter_sample = pm.iter_sample(2 * samples, step, start=map_estimate)\nanim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=samples, interval=5, blit=True)\nHTML(anim.to_html5_video())\n# Note that generating the video may take a while.", "Animating NUTS\nNow rerun the animation providing the NUTS sampling method as the step argument." ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
karlstroetmann/Formal-Languages
Ply/Conflicts-Resolved.ipynb
gpl-2.0
[ "from IPython.core.display import HTML\nwith open (\"../style.css\", \"r\") as file:\n css = file.read()\nHTML(css)", "Resolving Conflicts Using Precedence Declarations\nThis file shows how shift/reduce and reduce/reduce conflicts can be resolved using operator precedence declarations.\nThe following grammar is ambiguous because it does not specify the precedence of the arithmetical operators:\nexpr : expr '+' expr\n | expr '-' expr\n | expr '*' expr\n | expr '/' expr\n | expr '^' expr\n | '(' expr ')'\n | NUMBER \n ;\nWe will see how the use of precedence declarations can be used to resolve shift/reduce-conflicts.\nSpecification of the Scanner\nWe implement a minimal scanner for arithmetic expressions.", "import ply.lex as lex\n\ntokens = [ 'NUMBER' ]\n\ndef t_NUMBER(t):\n r'0|[1-9][0-9]*'\n t.value = int(t.value)\n return t\n\nliterals = ['+', '-', '*', '/', '^', '(', ')']\n\nt_ignore = ' \\t'\n\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += t.value.count('\\n')\n\ndef t_error(t):\n print(f\"Illegal character '{t.value[0]}'\")\n t.lexer.skip(1)\n\n__file__ = 'main'\n\nlexer = lex.lex()", "Specification of the Parser", "import ply.yacc as yacc", "The start variable of our grammar is expr, but we don't have to specify that. The default\nstart variable is the first variable that is defined.", "start = 'expr'", "The following operator precedence declarations declare that the operators '+'and '-' have a lower precedence than the operators '*'and '/'. The operator '^' has the highest precedence. Furthermore, the declarations specify that the operators '+', '-', '*', and '/' are left associative, while the operator '^' is declared as right associative using the keyword right.\nOperators can also be defined as being non-associative using the keyword nonassoc.", "precedence = (\n ('left', '+', '-') , # precedence 1\n ('left', '*', '/'), # precedence 2\n ('right', '^') # precedence 3\n)\n\ndef p_expr_plus(p):\n \"expr : expr '+' expr\"\n p[0] = ('+', p[1], p[3])\n \ndef p_expr_minus(p):\n \"expr : expr '-' expr\"\n p[0] = ('-', p[1], p[3])\n \ndef p_expr_mult(p): \n \"expr : expr '*' expr\"\n p[0] = ('*', p[1], p[3])\n \ndef p_expr_div(p): \n \"expr : expr '/' expr\"\n p[0] = ('/', p[1], p[3]) \n\ndef p_expr_power(p): \n \"expr : expr '^' expr\"\n p[0] = ('^', p[1], p[3])\n\ndef p_expr_paren(p): \n \"expr : '(' expr ')'\"\n p[0] = p[2]\n \ndef p_expr_NUMBER(p):\n \"expr : NUMBER\"\n p[0] = p[1]\n\ndef p_error(p):\n if p:\n print(f\"Syntax error at character number {p.lexer.lexpos} at token '{p.value}' in line {p.lexer.lineno}.\")\n else:\n print('Syntax error at end of input.')", "Setting the optional argument write_tables to False <B style=\"color:red\">is required</B> to prevent an obscure bug where the parser generator tries to read an empty parse table.", "parser = yacc.yacc(write_tables=False, debug=True)", "As there are no warnings all conflicts have been resolved using the precedence declarations.\nLet's look at the action table that is generated.", "!type parser.out\n\n!cat parser.out\n\n%run ../ANTLR4-Python/AST-2-Dot.ipynb", "The function test(s) takes a string s as its argument an tries to parse this string. If all goes well, an abstract syntax tree is returned.\nIf the string can't be parsed, an error message is printed by the parser.", "def test(s):\n t = yacc.parse(s)\n d = tuple2dot(t)\n display(d)\n return t\n\ntest('2^3*4+5')\n\ntest('1+2*3^4')\n\ntest('1 + 2 * -3^4')" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
willettk/insight
notebooks/neural_networks_and_deep_learning.ipynb
apache-2.0
[ "Work with http://neuralnetworksanddeeplearning.com/", "%matplotlib inline\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport random", "Chapter 1", "def sigmoid(z):\n \n return 1./(1. + np.exp(-z))\n\ndef sigmoid_vector(w,x,b):\n \n return 1./(1. + np.exp(-1 * np.sum(w * x) - b))\n\ndef sigmoid_prime(z):\n \n return sigmoid(z) * (1 - sigmoid(z))\n\n# Plot behavior of sigmoid. Continuous symmetric function, \n# asymptotically bounded by [0,1] in x = [-inf, inf]\n\nx = np.linspace(-10,10)\nplt.plot(x,sigmoid(x))\nplt.ylim(-0.05,1.05);\n\n# Test the vectorized output\n\nw = np.array([1,2,3])\nx = np.array([0.5,0.5,0.7])\nb = 0\n\nprint sigmoid_vector(w,x,b)", "Exercises\nTake all the weights and biases in a network of perceptrons and multiply them by a positive constant $c > 0$. Show that the behavior of the network doesn't change. \nInput: $[x_1,x_2,\\ldots,x_j]$\nOld behavior\n\n\nWeights: $[w_1,w_2,\\ldots,w_j]$\n\n\nBias: $b$\n\n\nPerceptron output: \n\noutput = 0 if $w \\cdot x + b \\leq 0$\noutput = 1 if $w \\cdot x + b > 0$\n\nNew input: \n\n\n$w_\\mathrm{new} = [c w_1,c w_2,\\ldots,c w_j]$\n\n\n$b_\\mathrm{new} = c b$\n\n\nNew output of the perceptron:\n$w_\\mathrm{new} \\cdot x + b_\\mathrm{new} = c w \\cdot x + c b = c (w \\cdot x + b)$. \nThis is just a positive scaling, so $w_\\mathrm{new} \\cdot x + b_\\mathrm{new} = w \\cdot x + b$ at 0 and keeps the same sign on either side since $c > 0$. So the behavior of the perceptron network doesn't change.\nTake a network of perceptrons and fix the input $\\boldsymbol{x}$. Assume $\\boldsymbol{w}\\cdot\\boldsymbol{x} + b \\neq 0$ for all perceptrons. \nOriginal output:\n\n0 if $(w \\cdot x + b) < 0$\n1 if $(w \\cdot x + b) > 0$\n\nReplace perceptrons with sigmoid functions and multiply both weights and biases by a constant $c > 0$. \n\n\n$w_\\mathrm{new} = [c w_1,c w_2,\\ldots,c w_j]$\n\n\n$b_\\mathrm{new} = c b$\n\n\nNew output:\n$\\sigma[c\\boldsymbol{w},\\boldsymbol{x},c b] \\equiv \\frac{1}{1 + \\exp{\\left(-\\sum_j{(c w_j) x_j} - c b\\right)}} = \\frac{1}{1 + \\exp{\\left(c(-\\sum_j{w_j x_j} - b)\\right)}}$\nAs $c \\rightarrow \\infty$, the term $\\exp{\\left(c(-\\sum_j{w_j x_j} - b)\\right)}$ becomes $\\infty$ if $(-\\sum_j{w_j x_j} - b) > 0$, and so $\\sigma \\rightarrow 0$. This is equivalent to $(\\sum_j{w_j x_j} + b) < 0$, or the same as the first output of the perceptron. Similarly, if $(-\\sum_j{w_j x_j} - b) < 0$, then the term goes to 0 and $\\sigma \\rightarrow 1$. So the behavior of the sigmoid network is the same as perceptrons is the same for very large $c$. \nIf $w \\cdot x + b = 0$ for one of the perceptrons, then $\\sigma=1/2$ regardless of the value of $c$. So the sigmoid approximation will fail to match the perceptron output. \nDesign a set of weights and biases such that digits are converted to their bitwise representation.", "# One set of possible weights and a bias; infinite amount\n# of legal combinations\n\ndigits = np.identity(10) * 0.99 + 0.005\n\nweights = np.ones((10,4)) * -1\n\nweights[1::2,0] = 3\nweights[2::4,1] = 3\nweights[3::4,1] = 3\nweights[4:8,2] = 3\nweights[8:10,3] = 3\nweights[0,1:3] = -2\n\nbias = -2\n\nprint \"Weights: \\n{}\".format(weights)\n\nprint \"Bias: {}\".format(bias)\n\nprint \"Bitwise output: \\n{}\".format((np.sign(np.dot(digits,weights) + bias).astype(int) + 1) / 2)\n\n# Initialize the network object\n\nclass Network(object):\n \n def __init__(self,sizes):\n # Initialize the Network object with random (normal) biases, weights\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.biases = [np.random.randn(y,1) for y in sizes[1:]]\n self.weights = [np.random.randn(y,x) for x,y in zip(sizes[:-1],sizes[1:])]\n\n def feedforward(self,a):\n # Return the output of the network\n \n for b,w in zip(self.biases, self.weights):\n a = sigmoid(np.dot(w,a) + b)\n return a\n \n def SGD(self, training_data, epochs, mini_batch_size,\n eta, test_data=None):\n \n if test_data:\n n_test = len(test_data)\n n = len(training_data)\n \n for j in xrange(epochs):\n random.shuffle(training_data)\n mini_batches = [training_data[k:k+mini_batch_size] for k in xrange(0,n,mini_batch_size)]\n for mini_batch in mini_batches:\n self.update_mini_batch(mini_batch,eta)\n if test_data:\n print \"Epoch {}: {} / {}\".format(j,self.evaluate(test_data),n_test)\n else:\n print \"Epoch {} complete.\".format(j)\n \n def update_mini_batch(self,mini_batch,eta):\n \n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n \n for x,y in mini_batch:\n delta_nabla_b, delta_nabla_w = self.backprop(x,y)\n nabla_b = [nb + dnb for nb,dnb in zip(nabla_b,delta_nabla_b)]\n nabla_w = [nw + dnw for nw,dnw in zip(nabla_w,delta_nabla_w)]\n \n self.weights = [w - (eta/len(mini_batch))*nw for w,nw in zip(self.weights,nabla_w)]\n self.biases = [b - (eta/len(mini_batch))*nb for b,nb in zip(self.biases,nabla_b)]\n \n def evaluate(self, test_data):\n \n test_results = [(np.argmax(self.feedforward(x)),y) for (x,y) in test_data]\n \n return sum(int(x == y) for (x,y) in test_results)\n \n def backprop(self, x, y):\n\n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n # feedforward\n activation = x\n activations = [x] # list to store all the activations, layer by layer\n zs = [] # list to store all the z vectors, layer by layer\n for b, w in zip(self.biases, self.weights):\n z = np.dot(w, activation)+b\n zs.append(z)\n activation = sigmoid(z)\n activations.append(activation)\n # backward pass\n delta = self.cost_derivative(activations[-1], y) * \\\n sigmoid_prime(zs[-1])\n nabla_b[-1] = delta\n nabla_w[-1] = np.dot(delta, activations[-2].transpose())\n\n for l in xrange(2, self.num_layers):\n z = zs[-l]\n sp = sigmoid_prime(z)\n delta = np.dot(self.weights[-l+1].transpose(), delta) * sp\n nabla_b[-l] = delta\n nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())\n return (nabla_b, nabla_w)\n \n def cost_derivative(self,output_activations,y):\n \n return (output_activations - y)\n \n", "Load the MNIST data", "import cPickle as pickle\nimport gzip\n\ndef load_data():\n \n with gzip.open(\"neural-networks-and-deep-learning/data/mnist.pkl.gz\",\"rb\") as f:\n training_data,validation_data,test_data = pickle.load(f)\n \n return training_data,validation_data,test_data\n\ndef load_data_wrapper():\n \n tr_d,va_d,te_d = load_data()\n \n training_inputs = [np.reshape(x,(784,1)) for x in tr_d[0]]\n training_results = [vectorized_result(y) for y in tr_d[1]]\n training_data = zip(training_inputs,training_results)\n \n validation_inputs = [np.reshape(x,(784,1)) for x in va_d[0]]\n validation_data = zip(validation_inputs,va_d[1])\n \n test_inputs = [np.reshape(x,(784,1)) for x in te_d[0]]\n test_data = zip(test_inputs,te_d[1])\n \n return (training_data,validation_data,test_data)\n\ndef vectorized_result(j):\n \n e = np.zeros((10,1))\n e[j] = 1.0\n \n return e", "Run the network", "training_data,validation_data,test_data = load_data_wrapper()\n\nnet = Network([784,30,10])\nnet.SGD(training_data,30,10,3.0,test_data = test_data)\n\nnet100 = Network([784,100,10])\nnet100.SGD(training_data,30,10,3.0,test_data=test_data)\n\nnet2 = Network([784,10])\nnet2.SGD(training_data,30,10,3.0,test_data=test_data)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
nagordon/mechpy
tutorials/Composite_Plate_Mechanics_with_Python_Theory.ipynb
mit
[ "Mechpy Tutorials\na mechanical engineering toolbox\nsource code - https://github.com/nagordon/mechpy\ndocumentation - https://nagordon.github.io/mechpy/web/ \n\nNeal Gordon\n2017-02-20 \n\nComposite Plate Mechanics with Python\nreference: hyer page 584. 617\nThe motivation behind this talk is to explore the capability of python as a scientific computation tool as well as solve a typical calcuation that could either be done by hand, or coded. I find coding to be a convient way to learn challenging mathmatics because I can easily replicate my work in the future when I can't remember the details of the calcuation or, if there are any errors, they can typically be easily fixed and the other calcuations re-ran without a lot of duplcation of effort.\nComposite mechanics can be very iterative by nature and is easiest to employ linear algebra to find displacements, strains and stresses of composites. Coding solutions is also handy when visualizations are required.\nFor this example, we are interested in calcuating the stress critical ply in a simple asymteric composite plate with a pressure load applied. We can chooose a variety of boundary conditions of our plate, but this solution is limited to 2 dimensional displacements, x and z. If we are interested in 3 dimensional displacements, the problem becomes much more challenging as partial differentiation of the governing equations gives us a PDE, which is more challenging to solve. \nThe steps to solving are \n- Identify governing and equilibrium equations\n- import python required libraries \n- declare symbolic variables\n- declare numeric variables, including material properties, plate dimensions, and plate pressure\n- solve 4th order differntial equation with 7 constants\n- apply plate boundary conditions and acquire u(x) and w(x) displacement functions\n- acquire strain equations from displacement\n- acquire stress equations from strain\n- determine critical ply from highest ply stress ratio", "# Import Python modules and \nimport numpy as np\nfrom sympy import *\nfrom pprint import pprint\n\n# printing and plotting settings \ninit_printing(use_latex='mathjax')\nget_ipython().magic('matplotlib inline') # inline plotting\n\nx,y,q = symbols('x,y,q')", "As mentioned before, if we want to perform a 3 dimensional displacement model of the composite plate, we would have 6 reaction forces that are a function of x and y. Those 6 reaction forces are related by 3 equalibrium equations", "# # hyer page 584\n# # Equations of equilibrium\n# Nxf = Function('N_x')(x,y)\n# Nyf = Function('N_y')(x,y)\n# Nxyf = Function('N_xy')(x,y)\n# Mxf = Function('M_x')(x,y)\n# Myf = Function('M_y')(x,y)\n# Mxyf = Function('M_xy')(x,y)\n\n# symbols for force and moments\nNx,Ny,Nxy,Mx,My,Mxy = symbols('N_x,N_y,N_xy,M_x,M_y,M_xy')\nNxf,Nyf,Nxyf,Mxf,Myf,Mxyf = symbols('Nxf,Nyf,Nxyf,Mxf,Myf,Mxyf')\n\nEq(0,diff(Nx(x,y), x)+diff(Nxy(x,y),y))\n\nEq(0,diff(Nxy(x,y), x)+diff(Ny(x,y),y))\n\nEq(0, diff(Mx(x,y),x,2) + 2*diff(Mxy(x,y),x,y) + diff(My(x,y) ,y,2)+ q )", "What makes composite plates special is the fact that they typically not isotropic. This is handled by the 6x6 ABD matrix that defines the composites properties axially, in bending, and the coupling between the two.", "# composite properties \nA11,A22,A66,A12,A16,A26,A66 = symbols('A11,A22,A66,A12,A16,A26,A66')\nB11,B22,B66,B12,B16,B26,B66 = symbols('B11,B22,B66,B12,B16,B26,B66')\nD11,D22,D66,D12,D16,D26,D66 = symbols('D11,D22,D66,D12,D16,D26,D66')\n\n## constants of integration when solving differential equation\nC1,C2,C3,C4,C5,C6 = symbols('C1,C2,C3,C4,C5,C6')\n\n# plate and composite parameters\nth,a,b = symbols('th,a,b')\n\n# displacement functions\nu0 = Function('u0')(x,y)\nv0 = Function('v0')(x,y)\nw0 = Function('w0')(x,y)", "Let's compute our 6 displacement conditions which is where our PDE's show up", "Nxf = A11*diff(u0,x) + A12*diff(v0,y) + A16*(diff(u0,y) + diff(v0,x)) - B11*diff(w0,x,2) - B12*diff(w0,y,2) - 2*B16*diff(w0,x,y)\nEq(Nx, Nxf)\n\nNyf = A12*diff(u0,x) + A22*diff(v0,y) + A26*(diff(u0,y) + diff(v0,x)) - B12*diff(w0,x,2) - B22*diff(w0,y,2) - 2*B26*diff(w0,x,y)\nEq(Ny,Nyf)\n\nNxyf = A16*diff(u0,x) + A26*diff(v0,y) + A66*(diff(u0,y) + diff(v0,x)) - B16*diff(w0,x,2) - B26*diff(w0,y,2) - 2*B66*diff(w0,x,y) \nEq(Nxy,Nxyf)\n\nMxf = B11*diff(u0,x) + B12*diff(v0,y) + B16*(diff(u0,y) + diff(v0,x)) - D11*diff(w0,x,2) - D12*diff(w0,y,2) - 2*D16*diff(w0,x,y)\nEq(Mx,Mxf)\n\nMyf = B12*diff(u0,x) + B22*diff(v0,y) + B26*(diff(u0,y) + diff(v0,x)) - D12*diff(w0,x,2) - D22*diff(w0,y,2) - 2*D26*diff(w0,x,y)\nEq(My,Myf)\n\nMxyf = B16*diff(u0,x) + B26*diff(v0,y) + B66*(diff(u0,y) + diff(v0,x)) - D16*diff(w0,x,2) - D26*diff(w0,y,2) - 2*D66*diff(w0,x,y)\nEq(Mxy,Mxyf)", "Now, combine our 6 displacement conditions with our 3 equalibrium equations to get three goverening equations", "eq1 = diff(Nxf,x) + diff(Nxf,y)\neq1\n\neq2 = diff(Nxyf,x) + diff(Nyf,y)\neq2\n\neq3 = diff(Mxf,x,2) + 2*diff(Mxyf,x,y) + diff(Myf,y,2) + q\neq3", "Yikes, I do not want to solve that (at least right now). If we make the assumption that the plate has equal displacement of y in the x and y direction, then we can simply things ALOT! These simplifications are valid for cross ply unsymmetric laminates plate, Hyer pg 616. This is applied by setting some of our material properties to zero. $ A16=A26=D16=D26=B16=B26=B12=B66=0 $\nAlmost like magic, we now have some equations that aren't so scary.", "u0 = Function('u0')(x)\nv0 = Function('v0')(x)\nw0 = Function('w0')(x)\n\nNxf = A11*diff(u0,x) + A12*diff(v0,y) - B11*diff(w0,x,2)\nEq(Nx, Nxf)\n\nNyf = A12*diff(u0,x) + A22*diff(v0,y) - B22*diff(w0,y,2)\nEq(Ny,Nyf)\n\nNxyf = A66*(diff(u0,y) + diff(v0,x))\nEq(Nxy,Nxyf)\n\nMxf = B11*diff(u0,x) - D11*diff(w0,x,2) - D12*diff(w0,y,2)\nEq(Mx,Mxf)\n\nMyf = B22*diff(v0,y) - D12*diff(w0,x,2) - D22*diff(w0,y,2)\nEq(My,Myf)\n\nMxyf = 0\nEq(Mxy,Mxyf)", "Now we are getting somewhere. Finally we can solve the differential equations", "dsolve(diff(Nx(x)))\n\ndsolve(diff(Mx(x),x,2)+q)", "Now solve for u0 and w0 with some pixie dust", "eq4 = (Nxf-C1)\neq4\n\neq5 = Mxf -( -q*x**2 + C2*x + C3 )\neq5\n\neq6 = Eq(solve(eq4,diff(u0,x))[0] , solve(eq5, diff(u0,x))[0])\neq6\n\nw0f = dsolve(eq6, w0)\nw0f\n\neq7 = Eq(solve(eq6, diff(w0,x,2))[0] , solve(eq4,diff(w0,x,2))[0])\neq7\n\nu0f = dsolve(eq7)\nu0f", "" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
pligor/predicting-future-product-prices
04_time_series_prediction/24_price_history_seq2seq-full_dataset_testing.ipynb
agpl-3.0
[ "# -*- coding: UTF-8 -*-\n#%load_ext autoreload\n%reload_ext autoreload\n%autoreload 2\n\nfrom __future__ import division\nimport tensorflow as tf\nfrom os import path, remove\nimport numpy as np\nimport pandas as pd\nimport csv\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom time import time\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nfrom mylibs.jupyter_notebook_helper import show_graph, renderStatsList, renderStatsCollection, \\\n renderStatsListWithLabels, renderStatsCollectionOfCrossValids, plot_res_gp, my_plot_convergence\nfrom tensorflow.contrib import rnn\nfrom tensorflow.contrib import learn\nimport shutil\nfrom tensorflow.contrib.learn.python.learn import learn_runner\nfrom mylibs.tf_helper import getDefaultGPUconfig\nfrom sklearn.metrics import r2_score\nfrom mylibs.py_helper import factors\nfrom fastdtw import fastdtw\nfrom collections import OrderedDict\nfrom scipy.spatial.distance import euclidean\nfrom statsmodels.tsa.stattools import coint\nfrom common import get_or_run_nn\nfrom data_providers.price_history_seq2seq_data_provider import PriceHistorySeq2SeqDataProvider\nfrom data_providers.price_history_dataset_generator import PriceHistoryDatasetGenerator\nfrom sklearn.metrics import mean_squared_error\nfrom skopt.space.space import Integer, Real\nfrom skopt import gp_minimize\nfrom skopt.plots import plot_convergence\nimport pickle\nimport inspect\nimport dill\nimport sys\nfrom models.model_21_price_history_seq2seq_dyn_dec_ins import PriceHistorySeq2SeqDynDecIns\nfrom gp_opt.price_history_23_gp_opt import PriceHistory23GpOpt\nfrom os.path import isdir\nfrom cost_functions.huber_loss import huber_loss\n\ndtype = tf.float32\nseed = 16011984\nrandom_state = np.random.RandomState(seed=seed)\nconfig = getDefaultGPUconfig()\nn_jobs = 1\n%matplotlib inline", "Step 0 - hyperparams\nvocab_size is all the potential words you could have (classification for translation case)\nand max sequence length are the SAME thing\ndecoder RNN hidden units are usually same size as encoder RNN hidden units in translation but for our case it does not seem really to be a relationship there but we can experiment and find out later, not a priority thing right now", "num_units = 400 #state size\n\ninput_len = 60\ntarget_len = 30\n\nbatch_size = 64\nwith_EOS = False\n\ntotal_size = 57994\ntrain_size = 46400\ntest_size = 11584", "Once generate data", "data_folder = '../../../../Dropbox/data'\n\n\nph_data_path = '../data/price_history'\n\nnpz_full = ph_data_path + '/price_history_dp_60to30_57994.npz'\nnpz_train = ph_data_path + '/price_history_dp_60to30_57994_46400_train.npz'\nnpz_test = ph_data_path + '/price_history_dp_60to30_57994_11584_test.npz'", "Step 1 - collect data", "# dp = PriceHistorySeq2SeqDataProvider(npz_path=npz_train, batch_size=batch_size, with_EOS=with_EOS)\n# dp.inputs.shape, dp.targets.shape\n\n# aa, bb = dp.next()\n# aa.shape, bb.shape", "Step 2 - Build model", "model = PriceHistorySeq2SeqDynDecIns(rng=random_state, dtype=dtype, config=config, with_EOS=with_EOS)\n\n# graph = model.getGraph(batch_size=batch_size,\n# num_units=num_units,\n# input_len=input_len,\n# target_len=target_len)\n\n#show_graph(graph)", "Step 3 training the network", "best_params = [500,\n tf.nn.tanh,\n 0.0001,\n 0.62488034788862112,\n 0.001]\n\nnum_units, activation, lamda2, keep_prob_input, learning_rate = best_params\n\nbatch_size\n\ndef experiment():\n return model.run(npz_path=npz_train,\n npz_test = npz_test,\n epochs=100,\n batch_size = batch_size,\n num_units = num_units,\n input_len=input_len,\n target_len=target_len,\n learning_rate = learning_rate,\n preds_gather_enabled=True,\n batch_norm_enabled = True,\n activation = activation,\n decoder_first_input = PriceHistorySeq2SeqDynDecIns.DECODER_FIRST_INPUT.ZEROS,\n keep_prob_input = keep_prob_input,\n lamda2 = lamda2,\n )\n\n#%%time\ndyn_stats, preds_dict, targets = get_or_run_nn(experiment, filename='024_seq2seq_60to30_002',\n nn_runs_folder= data_folder + '/nn_runs')", "One epoch takes approximately 268 secs\nIf we want to let it run for ~8 hours = 8 * 3600 / 268 ~= 107 epochs\nSo let it run for 100 epochs and see how it behaves", "dyn_stats.plotStats()\nplt.show()\n\ndata_len = len(targets)\n\nmses = np.empty(data_len)\nfor ii, (pred, target) in enumerate(zip(preds_dict.values(), targets.values())):\n mses[ii] = mean_squared_error(pred, target)\n\nnp.mean(mses)\n\nhuber_losses = np.empty(data_len)\nfor ii, (pred, target) in enumerate(zip(preds_dict.values(), targets.values())):\n huber_losses[ii] = np.mean(huber_loss(pred, target))\n\nnp.mean(huber_losses)\n\ntargets_arr = np.array(targets.values())\ntargets_arr.shape\n\npreds_arr = np.array(preds_dict.values())\npreds_arr.shape\n\nnp.mean(huber_loss(y_true=targets_arr, y_pred=preds_arr))\n\nr2_scores = [r2_score(y_true=targets[ind], y_pred=preds_dict[ind])\n for ind in range(len(targets))]\n\nind = np.argmin(r2_scores)\nind\n\nreals = targets[ind]\npreds = preds_dict[ind]\n\nr2_score(y_true=reals, y_pred=preds)\n\n#sns.tsplot(data=dp.inputs[ind].flatten())\n\nfig = plt.figure(figsize=(15,6))\nplt.plot(reals, 'b')\nplt.plot(preds, 'g')\nplt.legend(['reals','preds'])\nplt.show()\n\n%%time\ndtw_scores = [fastdtw(targets[ind], preds_dict[ind])[0]\n for ind in range(len(targets))]\n\nnp.mean(dtw_scores)\n\ncoint(preds, reals)\n\ncur_ind = np.random.randint(len(targets))\nreals = targets[cur_ind]\npreds = preds_dict[cur_ind]\nfig = plt.figure(figsize=(15,6))\nplt.plot(reals, 'b')\nplt.plot(preds, 'g')\nplt.legend(['reals','preds'])\nplt.show()", "Conclusion\nWe have managed to make it work. We have done better in MSE and Huber Loss metrics but the DTW is still comparable with the baseline and a little above it. We need to add more information to the model in order to improve it" ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
tuanavu/coursera-university-of-washington
machine_learning/3_classification/assigment/week2/module-3-linear-classifier-learning-assignment-blank.ipynb
mit
[ "Implementing logistic regression from scratch\nThe goal of this notebook is to implement your own logistic regression classifier. You will:\n\nExtract features from Amazon product reviews.\nConvert an SFrame into a NumPy array.\nImplement the link function for logistic regression.\nWrite a function to compute the derivative of the log likelihood function with respect to a single coefficient.\nImplement gradient ascent.\nGiven a set of coefficients, predict sentiments.\nCompute classification accuracy for the logistic regression model.\n\nLet's get started!\nFire up GraphLab Create\nMake sure you have the latest version of GraphLab Create. Upgrade by\npip install graphlab-create --upgrade\nSee this page for detailed instructions on upgrading.", "import graphlab", "Load review dataset\nFor this assignment, we will use a subset of the Amazon product review dataset. The subset was chosen to contain similar numbers of positive and negative reviews, as the original dataset consisted primarily of positive reviews.", "products = graphlab.SFrame('amazon_baby_subset.gl/')", "One column of this dataset is 'sentiment', corresponding to the class label with +1 indicating a review with positive sentiment and -1 indicating one with negative sentiment.", "products['sentiment']", "Let us quickly explore more of this dataset. The 'name' column indicates the name of the product. Here we list the first 10 products in the dataset. We then count the number of positive and negative reviews.", "products.head(10)['name']\n\nprint '# of positive reviews =', len(products[products['sentiment']==1])\nprint '# of negative reviews =', len(products[products['sentiment']==-1])", "Note: For this assignment, we eliminated class imbalance by choosing \na subset of the data with a similar number of positive and negative reviews. \nApply text cleaning on the review data\nIn this section, we will perform some simple feature cleaning using SFrames. The last assignment used all words in building bag-of-words features, but here we limit ourselves to 193 words (for simplicity). We compiled a list of 193 most frequent words into a JSON file. \nNow, we will load these words from this JSON file:", "import json\nwith open('important_words.json', 'r') as f: # Reads the list of most frequent words\n important_words = json.load(f)\nimportant_words = [str(s) for s in important_words]\n\nprint important_words", "Now, we will perform 2 simple data transformations:\n\nRemove punctuation using Python's built-in string functionality.\nCompute word counts (only for important_words)\n\nWe start with Step 1 which can be done as follows:", "def remove_punctuation(text):\n import string\n return text.translate(None, string.punctuation) \n\nproducts['review_clean'] = products['review'].apply(remove_punctuation)", "Now we proceed with Step 2. For each word in important_words, we compute a count for the number of times the word occurs in the review. We will store this count in a separate column (one for each word). The result of this feature processing is a single column for each word in important_words which keeps a count of the number of times the respective word occurs in the review text.\nNote: There are several ways of doing this. In this assignment, we use the built-in count function for Python lists. Each review string is first split into individual words and the number of occurances of a given word is counted.", "for word in important_words:\n products[word] = products['review_clean'].apply(lambda s : s.split().count(word))", "The SFrame products now contains one column for each of the 193 important_words. As an example, the column perfect contains a count of the number of times the word perfect occurs in each of the reviews.", "products['perfect']", "Now, write some code to compute the number of product reviews that contain the word perfect.\nHint: \n* First create a column called contains_perfect which is set to 1 if the count of the word perfect (stored in column perfect) is >= 1.\n* Sum the number of 1s in the column contains_perfect.\nQuiz Question. How many reviews contain the word perfect?\nConvert SFrame to NumPy array\nAs you have seen previously, NumPy is a powerful library for doing matrix manipulation. Let us convert our data to matrices and then implement our algorithms with matrices.\nFirst, make sure you can perform the following import.", "import numpy as np", "We now provide you with a function that extracts columns from an SFrame and converts them into a NumPy array. Two arrays are returned: one representing features and another representing class labels. Note that the feature matrix includes an additional column 'intercept' to take account of the intercept term.", "def get_numpy_data(data_sframe, features, label):\n data_sframe['intercept'] = 1\n features = ['intercept'] + features\n features_sframe = data_sframe[features]\n feature_matrix = features_sframe.to_numpy()\n label_sarray = data_sframe[label]\n label_array = label_sarray.to_numpy()\n return(feature_matrix, label_array)", "Let us convert the data into NumPy arrays.", "# Warning: This may take a few minutes...\nfeature_matrix, sentiment = get_numpy_data(products, important_words, 'sentiment') ", "Are you running this notebook on an Amazon EC2 t2.micro instance? (If you are using your own machine, please skip this section)\nIt has been reported that t2.micro instances do not provide sufficient power to complete the conversion in acceptable amount of time. For interest of time, please refrain from running get_numpy_data function. Instead, download the binary file containing the four NumPy arrays you'll need for the assignment. To load the arrays, run the following commands:\narrays = np.load('module-3-assignment-numpy-arrays.npz')\nfeature_matrix, sentiment = arrays['feature_matrix'], arrays['sentiment']", "feature_matrix.shape", "Quiz Question: How many features are there in the feature_matrix?\n Quiz Question: Assuming that the intercept is present, how does the number of features in feature_matrix relate to the number of features in the logistic regression model?\nNow, let us see what the sentiment column looks like:", "sentiment", "Estimating conditional probability with link function\nRecall from lecture that the link function is given by:\n$$\nP(y_i = +1 | \\mathbf{x}_i,\\mathbf{w}) = \\frac{1}{1 + \\exp(-\\mathbf{w}^T h(\\mathbf{x}_i))},\n$$\nwhere the feature vector $h(\\mathbf{x}_i)$ represents the word counts of important_words in the review $\\mathbf{x}_i$. Complete the following function that implements the link function:", "'''\nproduces probablistic estimate for P(y_i = +1 | x_i, w).\nestimate ranges between 0 and 1.\n'''\ndef predict_probability(feature_matrix, coefficients):\n # Take dot product of feature_matrix and coefficients \n # YOUR CODE HERE\n ...\n \n # Compute P(y_i = +1 | x_i, w) using the link function\n # YOUR CODE HERE\n predictions = ...\n \n # return predictions\n return predictions", "Aside. How the link function works with matrix algebra\nSince the word counts are stored as columns in feature_matrix, each $i$-th row of the matrix corresponds to the feature vector $h(\\mathbf{x}_i)$:\n$$\n[\\text{feature_matrix}] =\n\\left[\n\\begin{array}{c}\nh(\\mathbf{x}_1)^T \\\nh(\\mathbf{x}_2)^T \\\n\\vdots \\\nh(\\mathbf{x}_N)^T\n\\end{array}\n\\right] =\n\\left[\n\\begin{array}{cccc}\nh_0(\\mathbf{x}_1) & h_1(\\mathbf{x}_1) & \\cdots & h_D(\\mathbf{x}_1) \\\nh_0(\\mathbf{x}_2) & h_1(\\mathbf{x}_2) & \\cdots & h_D(\\mathbf{x}_2) \\\n\\vdots & \\vdots & \\ddots & \\vdots \\\nh_0(\\mathbf{x}_N) & h_1(\\mathbf{x}_N) & \\cdots & h_D(\\mathbf{x}_N)\n\\end{array}\n\\right]\n$$\nBy the rules of matrix multiplication, the score vector containing elements $\\mathbf{w}^T h(\\mathbf{x}_i)$ is obtained by multiplying feature_matrix and the coefficient vector $\\mathbf{w}$.\n$$\n[\\text{score}] =\n[\\text{feature_matrix}]\\mathbf{w} =\n\\left[\n\\begin{array}{c}\nh(\\mathbf{x}_1)^T \\\nh(\\mathbf{x}_2)^T \\\n\\vdots \\\nh(\\mathbf{x}_N)^T\n\\end{array}\n\\right]\n\\mathbf{w}\n= \\left[\n\\begin{array}{c}\nh(\\mathbf{x}_1)^T\\mathbf{w} \\\nh(\\mathbf{x}_2)^T\\mathbf{w} \\\n\\vdots \\\nh(\\mathbf{x}_N)^T\\mathbf{w}\n\\end{array}\n\\right]\n= \\left[\n\\begin{array}{c}\n\\mathbf{w}^T h(\\mathbf{x}_1) \\\n\\mathbf{w}^T h(\\mathbf{x}_2) \\\n\\vdots \\\n\\mathbf{w}^T h(\\mathbf{x}_N)\n\\end{array}\n\\right]\n$$\nCheckpoint\nJust to make sure you are on the right track, we have provided a few examples. If your predict_probability function is implemented correctly, then the outputs will match:", "dummy_feature_matrix = np.array([[1.,2.,3.], [1.,-1.,-1]])\ndummy_coefficients = np.array([1., 3., -1.])\n\ncorrect_scores = np.array( [ 1.*1. + 2.*3. + 3.*(-1.), 1.*1. + (-1.)*3. + (-1.)*(-1.) ] )\ncorrect_predictions = np.array( [ 1./(1+np.exp(-correct_scores[0])), 1./(1+np.exp(-correct_scores[1])) ] )\n\nprint 'The following outputs must match '\nprint '------------------------------------------------'\nprint 'correct_predictions =', correct_predictions\nprint 'output of predict_probability =', predict_probability(dummy_feature_matrix, dummy_coefficients)", "Compute derivative of log likelihood with respect to a single coefficient\nRecall from lecture:\n$$\n\\frac{\\partial\\ell}{\\partial w_j} = \\sum_{i=1}^N h_j(\\mathbf{x}_i)\\left(\\mathbf{1}[y_i = +1] - P(y_i = +1 | \\mathbf{x}_i, \\mathbf{w})\\right)\n$$\nWe will now write a function that computes the derivative of log likelihood with respect to a single coefficient $w_j$. The function accepts two arguments:\n* errors vector containing $\\mathbf{1}[y_i = +1] - P(y_i = +1 | \\mathbf{x}_i, \\mathbf{w})$ for all $i$.\n* feature vector containing $h_j(\\mathbf{x}_i)$ for all $i$. \nComplete the following code block:", "def feature_derivative(errors, feature): \n # Compute the dot product of errors and feature\n derivative = ...\n \n # Return the derivative\n return derivative", "In the main lecture, our focus was on the likelihood. In the advanced optional video, however, we introduced a transformation of this likelihood---called the log likelihood---that simplifies the derivation of the gradient and is more numerically stable. Due to its numerical stability, we will use the log likelihood instead of the likelihood to assess the algorithm.\nThe log likelihood is computed using the following formula (see the advanced optional video if you are curious about the derivation of this equation):\n$$\\ell\\ell(\\mathbf{w}) = \\sum_{i=1}^N \\Big( (\\mathbf{1}[y_i = +1] - 1)\\mathbf{w}^T h(\\mathbf{x}_i) - \\ln\\left(1 + \\exp(-\\mathbf{w}^T h(\\mathbf{x}_i))\\right) \\Big) $$\nWe provide a function to compute the log likelihood for the entire dataset.", "def compute_log_likelihood(feature_matrix, sentiment, coefficients):\n indicator = (sentiment==+1)\n scores = np.dot(feature_matrix, coefficients)\n logexp = np.log(1. + np.exp(-scores))\n \n # Simple check to prevent overflow\n mask = np.isinf(logexp)\n logexp[mask] = -scores[mask]\n \n lp = np.sum((indicator-1)*scores - logexp)\n return lp", "Checkpoint\nJust to make sure we are on the same page, run the following code block and check that the outputs match.", "dummy_feature_matrix = np.array([[1.,2.,3.], [1.,-1.,-1]])\ndummy_coefficients = np.array([1., 3., -1.])\ndummy_sentiment = np.array([-1, 1])\n\ncorrect_indicators = np.array( [ -1==+1, 1==+1 ] )\ncorrect_scores = np.array( [ 1.*1. + 2.*3. + 3.*(-1.), 1.*1. + (-1.)*3. + (-1.)*(-1.) ] )\ncorrect_first_term = np.array( [ (correct_indicators[0]-1)*correct_scores[0], (correct_indicators[1]-1)*correct_scores[1] ] )\ncorrect_second_term = np.array( [ np.log(1. + np.exp(-correct_scores[0])), np.log(1. + np.exp(-correct_scores[1])) ] )\n\ncorrect_ll = sum( [ correct_first_term[0]-correct_second_term[0], correct_first_term[1]-correct_second_term[1] ] ) \n\nprint 'The following outputs must match '\nprint '------------------------------------------------'\nprint 'correct_log_likelihood =', correct_ll\nprint 'output of compute_log_likelihood =', compute_log_likelihood(dummy_feature_matrix, dummy_sentiment, dummy_coefficients)", "Taking gradient steps\nNow we are ready to implement our own logistic regression. All we have to do is to write a gradient ascent function that takes gradient steps towards the optimum. \nComplete the following function to solve the logistic regression model using gradient ascent:", "from math import sqrt\n\ndef logistic_regression(feature_matrix, sentiment, initial_coefficients, step_size, max_iter):\n coefficients = np.array(initial_coefficients) # make sure it's a numpy array\n for itr in xrange(max_iter):\n\n # Predict P(y_i = +1|x_i,w) using your predict_probability() function\n # YOUR CODE HERE\n predictions = ...\n \n # Compute indicator value for (y_i = +1)\n indicator = (sentiment==+1)\n \n # Compute the errors as indicator - predictions\n errors = indicator - predictions\n for j in xrange(len(coefficients)): # loop over each coefficient\n \n # Recall that feature_matrix[:,j] is the feature column associated with coefficients[j].\n # Compute the derivative for coefficients[j]. Save it in a variable called derivative\n # YOUR CODE HERE\n derivative = ...\n \n # add the step size times the derivative to the current coefficient\n ## YOUR CODE HERE\n ...\n \n # Checking whether log likelihood is increasing\n if itr <= 15 or (itr <= 100 and itr % 10 == 0) or (itr <= 1000 and itr % 100 == 0) \\\n or (itr <= 10000 and itr % 1000 == 0) or itr % 10000 == 0:\n lp = compute_log_likelihood(feature_matrix, sentiment, coefficients)\n print 'iteration %*d: log likelihood of observed labels = %.8f' % \\\n (int(np.ceil(np.log10(max_iter))), itr, lp)\n return coefficients", "Now, let us run the logistic regression solver.", "coefficients = logistic_regression(feature_matrix, sentiment, initial_coefficients=np.zeros(194),\n step_size=1e-7, max_iter=301)", "Quiz question: As each iteration of gradient ascent passes, does the log likelihood increase or decrease?\nPredicting sentiments\nRecall from lecture that class predictions for a data point $\\mathbf{x}$ can be computed from the coefficients $\\mathbf{w}$ using the following formula:\n$$\n\\hat{y}_i = \n\\left{\n\\begin{array}{ll}\n +1 & \\mathbf{x}_i^T\\mathbf{w} > 0 \\\n -1 & \\mathbf{x}_i^T\\mathbf{w} \\leq 0 \\\n\\end{array} \n\\right.\n$$\nNow, we will write some code to compute class predictions. We will do this in two steps:\n* Step 1: First compute the scores using feature_matrix and coefficients using a dot product.\n* Step 2: Using the formula above, compute the class predictions from the scores.\nStep 1 can be implemented as follows:", "# Compute the scores as a dot product between feature_matrix and coefficients.\nscores = np.dot(feature_matrix, coefficients)", "Now, complete the following code block for Step 2 to compute the class predictions using the scores obtained above:\n Quiz question: How many reviews were predicted to have positive sentiment?\nMeasuring accuracy\nWe will now measure the classification accuracy of the model. Recall from the lecture that the classification accuracy can be computed as follows:\n$$\n\\mbox{accuracy} = \\frac{\\mbox{# correctly classified data points}}{\\mbox{# total data points}}\n$$\nComplete the following code block to compute the accuracy of the model.", "num_mistakes = ... # YOUR CODE HERE\naccuracy = ... # YOUR CODE HERE\nprint \"-----------------------------------------------------\"\nprint '# Reviews correctly classified =', len(products) - num_mistakes\nprint '# Reviews incorrectly classified =', num_mistakes\nprint '# Reviews total =', len(products)\nprint \"-----------------------------------------------------\"\nprint 'Accuracy = %.2f' % accuracy", "Quiz question: What is the accuracy of the model on predictions made above? (round to 2 digits of accuracy)\nWhich words contribute most to positive & negative sentiments?\nRecall that in Module 2 assignment, we were able to compute the \"most positive words\". These are words that correspond most strongly with positive reviews. In order to do this, we will first do the following:\n* Treat each coefficient as a tuple, i.e. (word, coefficient_value).\n* Sort all the (word, coefficient_value) tuples by coefficient_value in descending order.", "coefficients = list(coefficients[1:]) # exclude intercept\nword_coefficient_tuples = [(word, coefficient) for word, coefficient in zip(important_words, coefficients)]\nword_coefficient_tuples = sorted(word_coefficient_tuples, key=lambda x:x[1], reverse=True)", "Now, word_coefficient_tuples contains a sorted list of (word, coefficient_value) tuples. The first 10 elements in this list correspond to the words that are most positive.\nTen \"most positive\" words\nNow, we compute the 10 words that have the most positive coefficient values. These words are associated with positive sentiment.\n Quiz question: Which word is not present in the top 10 \"most positive\" words?\nTen \"most negative\" words\nNext, we repeat this exercise on the 10 most negative words. That is, we compute the 10 words that have the most negative coefficient values. These words are associated with negative sentiment.\n Quiz question: Which word is not present in the top 10 \"most negative\" words?" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
gtesei/DeepExperiments
MNIST_for_beginners_noNN_noCONV_0.12.0-rc1.ipynb
apache-2.0
[ "MNIST For ML Beginners\nA very simple MNIST classifier. See extensive documentation at http://tensorflow.org/tutorials/mnist/beginners/index.md", "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path\n\nimport argparse\nimport sys\nimport datetime\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nimport tensorflow as tf\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_string('data_dir', './', 'Directory to put the training data.')\n\n\n##\niterations = 1000\nbatch_size = 100\n\nprint(\"********* META ***********\")\nprint(\"TensorFlow version: \"+str(tf.__version__))\nprint(\"Date: \"+str(datetime.datetime.now()))\nprint(\"**************************\")", "Visualizing data", "mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)\nbatch_xs, batch_ys = mnist.train.next_batch(batch_size)\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nplt.imshow(batch_xs[0].reshape(28, 28))\n\nbatch_ys[0]\n\nplt.imshow(batch_xs[10].reshape(28, 28))\n\nbatch_ys[10]\n\nplt.imshow(batch_xs[60].reshape(28, 28))\n\nbatch_ys[60]", "The current state of the art in classifying these digits can be found here: http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html#4d4e495354\nModel", "def main(_):\n # Import data \n mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)\n\n # Create the model \n x = tf.placeholder(tf.float32, [None, 784])\n W = tf.Variable(tf.zeros([784, 10]))\n b = tf.Variable(tf.zeros([10]))\n y = tf.matmul(x, W) + b\n\n # Define loss and optimizer \n y_ = tf.placeholder(tf.float32, [None, 10])\n \n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))\n train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\n #sess = tf.InteractiveSession()\n with tf.Session() as sess:\n tf.global_variables_initializer().run() \n #init = tf.initialize_all_variables()\n #sess.run(init)\n # Train \n for _ in range(iterations):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n\n # Test trained model \n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n print(\">>> Test Accuracy::\"+str(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})))\n\n\n\nmain(_)", "TensorBoard: Visualizing Learning", "from tensorflow.contrib.tensorboard.plugins import projector\n\ndef variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar(var.name+'_mean', mean)\n #tf.scalar_summary(var.name+'_mean', mean)\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar(var.name+'_stddev', stddev)\n #tf.scalar_summary(var.name+'_stddev', stddev)\n tf.summary.scalar(var.name+'_max', tf.reduce_max(var))\n #tf.scalar_summary(var.name+'_max', tf.reduce_max(var))\n tf.summary.scalar(var.name+'_min', tf.reduce_min(var))\n #tf.histogram_summary( var.name, var)\n tf.summary.histogram( var.name, var)\n \n\ndef main2(_):\n # Import data \n mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)\n \n #config = tf.contrib.tensorboard.plugins.projector.ProjectorConfig()\n\n # input images\n with tf.name_scope('input'):\n # None -> batch size can be any size, 784 -> flattened mnist image\n x = tf.placeholder(tf.float32, shape=[None, 784], name=\"x-input\") \n \n xs = tf.Variable(tf.zeros([batch_size, 784]) , name=\"x-input-slice1\")\n xs = tf.slice(x, [0, 0], [batch_size, 784] , name=\"x-input-slice2\")\n \n variable_summaries(xs)\n \n #emb1 = config.embeddings.add()\n #emb1.tensor_name = xs.name\n #emb1.metadata_path = os.path.join(FLAGS.data_dir + '/_logs', 'metadata.tsv')\n \n # target 10 output classes\n y_ = tf.placeholder(tf.float32, shape=[None, 10], name=\"y-input\")\n #variable_summaries(y_)\n \n with tf.name_scope('input_image'):\n image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])\n tf.summary.image('image', image_shaped_input, 10)\n \n with tf.name_scope('W'):\n W = tf.Variable(tf.zeros([784, 10]))\n variable_summaries(W)\n \n with tf.name_scope('b'):\n b = tf.Variable(tf.zeros([10]))\n variable_summaries(b)\n \n with tf.name_scope('y'):\n y = tf.matmul(x, W) + b\n variable_summaries(y)\n\n with tf.name_scope('cross_entropy'):\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))\n tf.summary.scalar('cross_entropy', cross_entropy)\n #tf.scalar_summary('cross_entropy', cross_entropy)\n \n with tf.name_scope('train_step'):\n train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n \n # Test trained model \n with tf.name_scope('accuracy-scope'):\n with tf.name_scope('correct_prediction'):\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n\n with tf.name_scope('accuracy'):\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('accuracy-val', accuracy)\n #tf.scalar_summary('accuracy', accuracy)\n \n #######\n #init = tf.initialize_all_variables()\n\n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver()\n\n #sess = tf.InteractiveSession()\n with tf.Session() as sess:\n tf.global_variables_initializer().run() \n\n # Merge all the summaries and write them out to ./logs (by default)\n #merged = tf.merge_all_summaries()\n merged = tf.summary.merge_all()\n #writer = tf.train.SummaryWriter(FLAGS.data_dir + '/_logs',sess.graph)\n writer = tf.summary.FileWriter(FLAGS.data_dir + '/_logs',sess.graph)\n \n #projector.visualize_embeddings(writer, config)\n\n #sess.run(init)\n\n # Train \n for i in range(iterations):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n\n if i % 100 == 0 or i == (iterations-1):\n summary = sess.run(merged, feed_dict={x: batch_xs, y_: batch_ys})\n writer.add_summary(summary, i)\n summary, acc = sess.run([merged, accuracy], feed_dict={x: mnist.test.images,y_: mnist.test.labels})\n writer.add_summary(summary, i)\n writer.flush()\n \n checkpoint_file = os.path.join(FLAGS.data_dir + '/_logs', 'checkpoint')\n saver.save(sess, checkpoint_file, global_step=i)\n \n print('>>> Test Accuracy [%s/%s]: %s' % (i,iterations,acc))\n\nmain2(_)", "To run TensorBoard, use the following command (alternatively python -m tensorflow.tensorboard)\n\ntensorboard --logdir=_logs\n\nCross Entropy on training set by step\n<img src=\"images/cross_entropy_train.png\" />\nAccuracy on test set by step (learning curve)\n<img src=\"images/accuracy_test_set.png\" />\nComputation Graph\n<img src=\"images/mnist_1_graph.png\" />\nDistribution of weights\n<img src=\"images/distr1.png\" />\nHistogram of weights\n<img src=\"images/hist1.png\" />\nImages\n<img src=\"images/digit_image.JPG\" />" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
jrg365/gpytorch
examples/01_Exact_GPs/Spectral_Delta_GP_Regression.ipynb
mit
[ "Spectral GP Learning with Deltas\nIn this paper, we demonstrate another approach to spectral learning with GPs, learning a spectral density as a simple mixture of deltas. This has been explored, for example, as early as Lázaro-Gredilla et al., 2010.\nCompared to learning Gaussian mixtures as in the SM kernel, this approach has a number of pros and cons. In its favor, it is often very robust and does not have as severe issues with local optima, as it is easier to make progress when performing gradient descent on 1 of 1000 deltas compared to the parameters of 1 of 3 Gaussians. Additionally, implemented using CG in GPyTorch, this approach affords linear time and space in the number of data points N. Against it, it has significantly more parameters which can take many more iterations of training to learn, and it corresponds to a finite basis expansion and is therefore a parametric model.", "import gpytorch\nimport torch", "Load Data\nFor this notebook, we'll be using a sample set of timeseries data of BART ridership on the 5 most commonly traveled stations in San Francisco. This subsample of data was selected and processed from Pyro's examples http://docs.pyro.ai/en/stable/_modules/pyro/contrib/examples/bart.html", "import os\nimport urllib.request\n\nsmoke_test = ('CI' in os.environ)\n\nif not smoke_test and not os.path.isfile('../BART_sample.pt'):\n print('Downloading \\'BART\\' sample dataset...')\n urllib.request.urlretrieve('https://drive.google.com/uc?export=download&id=1A6LqCHPA5lHa5S3lMH8mLMNEgeku8lRG', '../BART_sample.pt')\n torch.manual_seed(1)\n \nif smoke_test:\n train_x, train_y, test_x, test_y = torch.randn(2, 100, 1), torch.randn(2, 100), torch.randn(2, 100, 1), torch.randn(2, 100)\nelse:\n train_x, train_y, test_x, test_y = torch.load('../BART_sample.pt', map_location='cpu')\n\nif torch.cuda.is_available():\n train_x, train_y, test_x, test_y = train_x.cuda(), train_y.cuda(), test_x.cuda(), test_y.cuda()\n\nprint(train_x.shape, train_y.shape, test_x.shape, test_y.shape)\n\ntrain_x_min = train_x.min()\ntrain_x_max = train_x.max()\n\ntrain_x = train_x - train_x_min\ntest_x = test_x - train_x_min\n\ntrain_y_mean = train_y.mean(dim=-1, keepdim=True)\ntrain_y_std = train_y.std(dim=-1, keepdim=True)\n\ntrain_y = (train_y - train_y_mean) / train_y_std\n\ntest_y = (test_y - train_y_mean) / train_y_std", "Define a Model\nThe only thing of note here is the use of the kernel. For this example, we'll learn a kernel with 2048 deltas in the mixture, and initialize by sampling directly from the empirical spectrum of the data.", "class SpectralDeltaGP(gpytorch.models.ExactGP):\n def __init__(self, train_x, train_y, num_deltas, noise_init=None):\n likelihood = gpytorch.likelihoods.GaussianLikelihood(noise_constraint=gpytorch.constraints.GreaterThan(1e-11))\n likelihood.register_prior(\"noise_prior\", gpytorch.priors.HorseshoePrior(0.1), \"noise\")\n likelihood.noise = 1e-2\n\n super(SpectralDeltaGP, self).__init__(train_x, train_y, likelihood)\n self.mean_module = gpytorch.means.ConstantMean()\n base_covar_module = gpytorch.kernels.SpectralDeltaKernel(\n num_dims=train_x.size(-1),\n num_deltas=num_deltas,\n )\n base_covar_module.initialize_from_data(train_x[0], train_y[0])\n self.covar_module = gpytorch.kernels.ScaleKernel(base_covar_module)\n\n def forward(self, x):\n mean_x = self.mean_module(x)\n covar_x = self.covar_module(x)\n return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)\n\nmodel = SpectralDeltaGP(train_x, train_y, num_deltas=1500)\n\nif torch.cuda.is_available():\n model = model.cuda()", "Train", "model.train()\nmll = gpytorch.mlls.ExactMarginalLogLikelihood(model.likelihood, model)\noptimizer = torch.optim.Adam(model.parameters(), lr=0.01)\nscheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizer, milestones=[40])\n\nnum_iters = 1000 if not smoke_test else 4\n\nwith gpytorch.settings.max_cholesky_size(0): # Ensure we dont try to use Cholesky\n for i in range(num_iters):\n optimizer.zero_grad()\n output = model(train_x)\n loss = -mll(output, train_y)\n if train_x.dim() == 3:\n loss = loss.mean()\n loss.backward()\n optimizer.step()\n\n if i % 10 == 0:\n print(f'Iteration {i} - loss = {loss:.2f} - noise = {model.likelihood.noise.item():e}')\n\n scheduler.step()\n\n# Get into evaluation (predictive posterior) mode\nmodel.eval()\n\n# Test points are regularly spaced along [0,1]\n# Make predictions by feeding model through likelihood\nwith torch.no_grad(), gpytorch.settings.max_cholesky_size(0), gpytorch.settings.fast_pred_var():\n test_x_f = torch.cat([train_x, test_x], dim=-2)\n observed_pred = model.likelihood(model(test_x_f))\n varz = observed_pred.variance", "Plot Results", "from matplotlib import pyplot as plt\n\n%matplotlib inline\n\n_task = 3\n\nplt.subplots(figsize=(15, 15), sharex=True, sharey=True)\nfor _task in range(2):\n ax = plt.subplot(3, 1, _task + 1)\n\n with torch.no_grad():\n # Initialize plot\n# f, ax = plt.subplots(1, 1, figsize=(16, 12))\n\n # Get upper and lower confidence bounds\n lower = observed_pred.mean - varz.sqrt() * 1.98\n upper = observed_pred.mean + varz.sqrt() * 1.98\n lower = lower[_task] # + weight * test_x_f.squeeze()\n upper = upper[_task] # + weight * test_x_f.squeeze()\n\n # Plot training data as black stars\n ax.plot(train_x[_task].detach().cpu().numpy(), train_y[_task].detach().cpu().numpy(), 'k*')\n ax.plot(test_x[_task].detach().cpu().numpy(), test_y[_task].detach().cpu().numpy(), 'r*')\n # Plot predictive means as blue line\n ax.plot(test_x_f[_task].detach().cpu().numpy(), (observed_pred.mean[_task]).detach().cpu().numpy(), 'b')\n # Shade between the lower and upper confidence bounds\n ax.fill_between(test_x_f[_task].detach().cpu().squeeze().numpy(), lower.detach().cpu().numpy(), upper.detach().cpu().numpy(), alpha=0.5)\n # ax.set_ylim([-3, 3])\n ax.legend(['Training Data', 'Test Data', 'Mean', '95% Confidence'], fontsize=16)\n ax.tick_params(axis='both', which='major', labelsize=16)\n ax.tick_params(axis='both', which='minor', labelsize=16)\n ax.set_ylabel('Passenger Volume (Normalized)', fontsize=16)\n ax.set_xlabel('Hours (Zoomed to Test)', fontsize=16)\n ax.set_xticks([])\n \n plt.xlim([1250, 1680])\n\nplt.tight_layout()" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
balarsen/pymc_learning
Counting/Poisson and exponential.ipynb
bsd-3-clause
[ "Go from exponential to Poisson\nAlso look to: Adams RP, Murray I, MacKay DJC. Tractable nonparametric Bayesian inference in Poisson processes with Gaussian process intensities. Proceedings of the 26th Annual International Conference on Machine Learning; Montreal, Quebec, Canada. 1553376: ACM; 2009. p. 9-16.\nSome thoughts 20171018\n\nPoisson process under the hood, so the time between is Exponential\nWe can then derive the probability of missing a count due to time based on the probability between\nCan we then use this to figure out how many were likely missed?", "%matplotlib inline\n\nfrom pprint import pprint\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport pymc3 as mc\nimport spacepy.toolbox as tb\nimport spacepy.plot as spp\nimport tqdm\nfrom scipy import stats\nimport seaborn as sns\nsns.set(font_scale=1.5)\n# matplotlib.pyplot.rc('figure', figsize=(10,10))\n# matplotlib.pyplot.rc('lines', lw=3)\n# matplotlib.pyplot.rc('font', size=20)\n\n\n\n%matplotlib inline", "Generate Poisson process data and generate exponential\nFor each interval choose $n$ events from a Poisson. Then draw from a uniform the location in the interval for each of the events.", "np.random.seed(8675309)\nnT = 400\ncts = np.random.poisson(20, size=nT)\nedata = []\nfor i in range(nT):\n edata.extend(i + np.sort(np.random.uniform(low=0, high=1, size=cts[i])))\nedata = np.asarray(edata)\nedata.shape\n\nplt.plot(edata, np.arange(len(edata)))\nplt.xlabel('Time of event')\nplt.ylabel('Event number')\nplt.title(\"Modeled underlying data\")\n\nwith mc.Model() as model:\n lam = mc.Uniform('lambda', 0, 1000) # this is the exponential parameter\n meas = mc.Exponential('meas', lam, observed=np.diff(edata))\n lam2 = mc.Uniform('lam2', 0, 1000)\n poi = mc.Poisson('Poisson', lam2, observed=cts)\n start = mc.find_MAP()\n trace = mc.sample(10000, start=start, njobs=8)\n\nmc.traceplot(trace, combined=True, lines={'lambda':20, 'lam2':20})\nmc.summary(trace)\n\nfig, ax = plt.subplots(ncols=1, nrows=2, sharex=True)\nsns.distplot(trace['lambda'], ax=ax[0])\nsns.distplot(trace['lam2'], ax=ax[1])\nplt.xlabel('Lambda')\nax[0].set_ylabel('Exp')\nax[1].set_ylabel('Poisson')\nax[0].axvline(20, c='r', lw=1)\nax[1].axvline(20, c='r', lw=1)\nplt.tight_layout()", "This is consistent with a Poisson of parameter 20! But there seems to be an under prediction going on, wonder why?\nGo through Posterior Predictive Checks (http://docs.pymc.io/notebooks/posterior_predictive.html) and see if we are reprodicting the mean and variance.", "ppc = mc.sample_ppc(trace, samples=500, model=model, size=100)\n\n\nax = plt.subplot()\nsns.distplot([n.mean() for n in ppc['Poisson']], kde=False, ax=ax)\nax.axvline(cts.mean())\nax.set(title='Posterior predictive of the mean (Poisson)', xlabel='mean(x)', ylabel='Frequency');\n\nax = plt.subplot()\nsns.distplot([n.var() for n in ppc['Poisson']], kde=False, ax=ax)\nax.axvline(cts.var())\nax.set(title='Posterior predictive of the variance (Poisson)', xlabel='var(x)', ylabel='Frequency');\n\nax = plt.subplot()\nsns.distplot([n.mean() for n in ppc['meas']], kde=False, ax=ax)\nax.axvline(np.diff(edata).mean())\nax.set(title='Posterior predictive of the mean (Exponential)', xlabel='mean(x)', ylabel='Frequency');\n\nax = plt.subplot()\nsns.distplot([n.var() for n in ppc['meas']], kde=False, ax=ax)\nax.axvline(np.diff(edata).var())\nax.set(title='Posterior predictive of the variance (Exponential)', xlabel='var(x)', ylabel='Frequency');", "We are reprodicting well. \nGiven the data we generated that will be treated as truth, what would we measure with various deadtime and does teh corection match what we think it should?\nCorrection should look like $n_1 = \\frac{R_1}{1-R_1 \\tau}$ where $n_1$ is real rate, $R_1$ is observed rate, and $\\tau$ is the dead time. \nTake edata from above and strep through from beginning to end only keeping points that are dead time away from the previous point.", "deadtime1 = 0.005 # small dead time\ndeadtime2 = 0.1 # large dead time\n\nedata_td1 = []\nedata_td1.append(edata[0])\nedata_td2 = []\nedata_td2.append(edata[0])\n\nfor ii, v in enumerate(edata[1:], 1): # stop one shy to not run over the end, start enumerate at 1\n if v - edata_td1[-1] >= deadtime1:\n edata_td1.append(v)\n if v - edata_td2[-1] >= deadtime2:\n edata_td2.append(v)\n \nedata_td1 = np.asarray(edata_td1) \nedata_td2 = np.asarray(edata_td2) \n \n\n\n\n\nplt.figure(figsize=(8,6))\nplt.plot(edata, np.arange(len(edata)), label='Real data')\n\nplt.plot(edata_td1, np.arange(len(edata_td1)), label='Small dead time')\nplt.plot(edata_td2, np.arange(len(edata_td2)), label='Large dead time')\n\nplt.xlabel('Time of event')\nplt.ylabel('Event number')\nplt.title(\"Modeled underlying data\")\n\nplt.legend(bbox_to_anchor=(1, 1))", "And plot the rates per unit time", "plt.figure(figsize=(8,6))\nh1, b1 = np.histogram(edata, np.arange(1000))\nplt.plot(tb.bin_edges_to_center(b1), h1, label='Real data', c='k')\n\nh2, b2 = np.histogram(edata_td1, np.arange(1000))\nplt.plot(tb.bin_edges_to_center(b2), h2, label='Small dead time', c='r')\n\nh3, b3 = np.histogram(edata_td2, np.arange(1000))\nplt.plot(tb.bin_edges_to_center(b3), h3, label='Large dead time')\n\n\nplt.legend(bbox_to_anchor=(1, 1))\nplt.xlim((0,400))\nplt.ylabel('Rate')\nplt.xlabel('Time')", "Can we use $n_1 = \\frac{R_1}{1-R_1 \\tau}$ to derive the relation and spread in the dist of R?\nAlgerbra changes math to: $R_1=\\frac{n_1}{1+n_1\\tau}$\nUse the small dead time", "# assume R1 is Poisson\n\nwith mc.Model() as model:\n tau = deadtime1\n obsRate = mc.Uniform('obsRate', 0, 1000, shape=1)\n obsData = mc.Poisson('obsData', obsRate, observed=h2[:400], shape=1)\n realRate = mc.Deterministic('realRate', obsData/(1-obsData*tau))\n start = mc.find_MAP()\n trace = mc.sample(10000, start=start, njobs=8)\n\nmc.traceplot(trace, combined=True, varnames=('obsRate', ))\nmc.summary(trace, varnames=('obsRate', ))\n\nsns.distplot(trace['realRate'].mean(axis=0), bins=10)\nplt.xlabel('realRate')\nplt.ylabel('Density')\n\ndt1_bounds = np.percentile(trace['realRate'], (2.5, 50, 97.5))\nprint('The estimate of the real rate given that we know the dead time is:', dt1_bounds, \n (dt1_bounds[2]-dt1_bounds[0])/dt1_bounds[1])\n\ndat_bounds = np.percentile(h1[:400], (2.5, 50, 97.5))\nprint(\"This compares with if we measured without dead time as:\", dat_bounds, \n (dat_bounds[2]-dat_bounds[0])/dat_bounds[1])\n\n", "Use the large dead time", "# assume R1 is Poisson\n\nwith mc.Model() as model:\n tau = deadtime2\n obsRate = mc.Uniform('obsRate', 0, 1000)\n obsData = mc.Poisson('obsData', obsRate, observed=h3[:400])\n realRate = mc.Deterministic('realRate', obsData/(1-obsData*tau))\n start = mc.find_MAP()\n trace = mc.sample(10000, start=start, njobs=8)\n\nmc.traceplot(trace, combined=True, varnames=('obsRate', ))\nmc.summary(trace, varnames=('obsRate', ))\n\nsns.distplot(trace['realRate'].mean(axis=0))\nplt.xlabel('realRate')\nplt.ylabel('Density')\n\ndt2_bounds = np.percentile(trace['realRate'], (2.5, 50, 97.5))\nprint('The estimate of the real rate given that we know the dead time is:', dt1_bounds, \n (dt2_bounds[2]-dt2_bounds[0])/dt2_bounds[1])\n\ndat_bounds = np.percentile(h1[:400], (2.5, 50, 97.5))\nprint(\"This compares with if we measured without dead time as:\", dat_bounds, \n (dat_bounds[2]-dat_bounds[0])/dat_bounds[1])\n\n", "But this is totally broken!!!\nOutput data files for each", "real = pd.Series(edata)\ntd1 = pd.Series(edata_td1)\ntd2 = pd.Series(edata_td2)\n\nreal.to_csv('no_deadtime_times.csv')\ntd1.to_csv('small_deadtime_times.csv')\ntd2.to_csv('large_deadtime_times.csv')\n\n\n\nreal = pd.Series(h1[h1>0])\ntd1 = pd.Series(h2[h2>0])\ntd2 = pd.Series(h3[h3>0])\n\nreal.to_csv('no_deadtime_rates.csv')\ntd1.to_csv('small_deadtime_rates.csv')\ntd2.to_csv('large_deadtime_rates.csv')", "Work on the random thoughts", "with mc.Model() as model:\n BoundedExp = mc.Bound(mc.Exponential, lower=deadtime2, upper=None)\n \n # we observe the following time between counts\n lam = mc.Uniform('lam', 0, 1000)\n time_between = BoundedExp('tb_ob', lam, observed=np.diff(edata_td2))\n start = mc.find_MAP()\n trace = mc.sample(10000, njobs=8, start=start)\n \n " ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
bt3gl/Machine-Learning-Resources
ml_notebooks/synthetic_features_and_outliers.ipynb
gpl-2.0
[ "Copyright 2017 Google LLC.", "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "Synthetic Features and Outliers\nLearning Objectives:\n * Create a synthetic feature that is the ratio of two other features\n * Use this new feature as an input to a linear regression model\n * Improve the effectiveness of the model by identifying and clipping (removing) outliers out of the input data\nLet's revisit our model from the previous First Steps with TensorFlow exercise. \nFirst, we'll import the California housing data into a pandas DataFrame:\nSetup", "from __future__ import print_function\n\nimport math\n\nfrom IPython import display\nfrom matplotlib import cm\nfrom matplotlib import gridspec\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport sklearn.metrics as metrics\nimport tensorflow as tf\nfrom tensorflow.python.data import Dataset\n\ntf.logging.set_verbosity(tf.logging.ERROR)\npd.options.display.max_rows = 10\npd.options.display.float_format = '{:.1f}'.format\n\ncalifornia_housing_dataframe = pd.read_csv(\"https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv\", sep=\",\")\n\ncalifornia_housing_dataframe = california_housing_dataframe.reindex(\n np.random.permutation(california_housing_dataframe.index))\ncalifornia_housing_dataframe[\"median_house_value\"] /= 1000.0\ncalifornia_housing_dataframe", "Next, we'll set up our input function, and define the function for model training:", "def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):\n \"\"\"Trains a linear regression model of one feature.\n \n Args:\n features: pandas DataFrame of features\n targets: pandas DataFrame of targets\n batch_size: Size of batches to be passed to the model\n shuffle: True or False. Whether to shuffle the data.\n num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely\n Returns:\n Tuple of (features, labels) for next data batch\n \"\"\"\n \n # Convert pandas data into a dict of np arrays.\n features = {key:np.array(value) for key,value in dict(features).items()} \n \n # Construct a dataset, and configure batching/repeating.\n ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit\n ds = ds.batch(batch_size).repeat(num_epochs)\n \n # Shuffle the data, if specified.\n if shuffle:\n ds = ds.shuffle(buffer_size=10000)\n \n # Return the next batch of data.\n features, labels = ds.make_one_shot_iterator().get_next()\n return features, labels\n\ndef train_model(learning_rate, steps, batch_size, input_feature):\n \"\"\"Trains a linear regression model.\n \n Args:\n learning_rate: A `float`, the learning rate.\n steps: A non-zero `int`, the total number of training steps. A training step\n consists of a forward and backward pass using a single batch.\n batch_size: A non-zero `int`, the batch size.\n input_feature: A `string` specifying a column from `california_housing_dataframe`\n to use as input feature.\n \n Returns:\n A Pandas `DataFrame` containing targets and the corresponding predictions done\n after training the model.\n \"\"\"\n \n periods = 10\n steps_per_period = steps / periods\n\n my_feature = input_feature\n my_feature_data = california_housing_dataframe[[my_feature]].astype('float32')\n my_label = \"median_house_value\"\n targets = california_housing_dataframe[my_label].astype('float32')\n\n # Create input functions.\n training_input_fn = lambda: my_input_fn(my_feature_data, targets, batch_size=batch_size)\n predict_training_input_fn = lambda: my_input_fn(my_feature_data, targets, num_epochs=1, shuffle=False)\n \n # Create feature columns.\n feature_columns = [tf.feature_column.numeric_column(my_feature)]\n \n # Create a linear regressor object.\n my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)\n linear_regressor = tf.estimator.LinearRegressor(\n feature_columns=feature_columns,\n optimizer=my_optimizer\n )\n\n # Set up to plot the state of our model's line each period.\n plt.figure(figsize=(15, 6))\n plt.subplot(1, 2, 1)\n plt.title(\"Learned Line by Period\")\n plt.ylabel(my_label)\n plt.xlabel(my_feature)\n sample = california_housing_dataframe.sample(n=300)\n plt.scatter(sample[my_feature], sample[my_label])\n colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)]\n\n # Train the model, but do so inside a loop so that we can periodically assess\n # loss metrics.\n print(\"Training model...\")\n print(\"RMSE (on training data):\")\n root_mean_squared_errors = []\n for period in range (0, periods):\n # Train the model, starting from the prior state.\n linear_regressor.train(\n input_fn=training_input_fn,\n steps=steps_per_period,\n )\n # Take a break and compute predictions.\n predictions = linear_regressor.predict(input_fn=predict_training_input_fn)\n predictions = np.array([item['predictions'][0] for item in predictions])\n \n # Compute loss.\n root_mean_squared_error = math.sqrt(\n metrics.mean_squared_error(predictions, targets))\n # Occasionally print the current loss.\n print(\" period %02d : %0.2f\" % (period, root_mean_squared_error))\n # Add the loss metrics from this period to our list.\n root_mean_squared_errors.append(root_mean_squared_error)\n # Finally, track the weights and biases over time.\n # Apply some math to ensure that the data and line are plotted neatly.\n y_extents = np.array([0, sample[my_label].max()])\n \n weight = linear_regressor.get_variable_value('linear/linear_model/%s/weights' % input_feature)[0]\n bias = linear_regressor.get_variable_value('linear/linear_model/bias_weights')\n \n x_extents = (y_extents - bias) / weight\n x_extents = np.maximum(np.minimum(x_extents,\n sample[my_feature].max()),\n sample[my_feature].min())\n y_extents = weight * x_extents + bias\n plt.plot(x_extents, y_extents, color=colors[period]) \n print(\"Model training finished.\")\n\n # Output a graph of loss metrics over periods.\n plt.subplot(1, 2, 2)\n plt.ylabel('RMSE')\n plt.xlabel('Periods')\n plt.title(\"Root Mean Squared Error vs. Periods\")\n plt.tight_layout()\n plt.plot(root_mean_squared_errors)\n\n # Create a table with calibration data.\n calibration_data = pd.DataFrame()\n calibration_data[\"predictions\"] = pd.Series(predictions)\n calibration_data[\"targets\"] = pd.Series(targets)\n display.display(calibration_data.describe())\n\n print(\"Final RMSE (on training data): %0.2f\" % root_mean_squared_error)\n \n return calibration_data", "Task 1: Try a Synthetic Feature\nBoth the total_rooms and population features count totals for a given city block.\nBut what if one city block were more densely populated than another? We can explore how block density relates to median house value by creating a synthetic feature that's a ratio of total_rooms and population.\nIn the cell below, create a feature called rooms_per_person, and use that as the input_feature to train_model().\nWhat's the best performance you can get with this single feature by tweaking the learning rate? (The better the performance, the better your regression line should fit the data, and the lower\nthe final RMSE should be.)\nNOTE: You may find it helpful to add a few code cells below so you can try out several different learning rates and compare the results. To add a new code cell, hover your cursor directly below the center of this cell, and click CODE.", "#\n# YOUR CODE HERE\n#\ncalifornia_housing_dataframe[\"rooms_per_person\"] =\n\ncalibration_data = train_model(\n learning_rate=0.00005,\n steps=500,\n batch_size=5,\n input_feature=\"rooms_per_person\"\n)", "Solution\nClick below for a solution.", "california_housing_dataframe[\"rooms_per_person\"] = (\n california_housing_dataframe[\"total_rooms\"] / california_housing_dataframe[\"population\"])\n\ncalibration_data = train_model(\n learning_rate=0.05,\n steps=500,\n batch_size=5,\n input_feature=\"rooms_per_person\")", "Task 2: Identify Outliers\nWe can visualize the performance of our model by creating a scatter plot of predictions vs. target values. Ideally, these would lie on a perfectly correlated diagonal line.\nUse Pyplot's scatter() to create a scatter plot of predictions vs. targets, using the rooms-per-person model you trained in Task 1.\nDo you see any oddities? Trace these back to the source data by looking at the distribution of values in rooms_per_person.", "# YOUR CODE HERE", "Solution\nClick below for the solution.", "plt.figure(figsize=(15, 6))\nplt.subplot(1, 2, 1)\nplt.scatter(calibration_data[\"predictions\"], calibration_data[\"targets\"])", "The calibration data shows most scatter points aligned to a line. The line is almost vertical, but we'll come back to that later. Right now let's focus on the ones that deviate from the line. We notice that they are relatively few in number.\nIf we plot a histogram of rooms_per_person, we find that we have a few outliers in our input data:", "plt.subplot(1, 2, 2)\n_ = california_housing_dataframe[\"rooms_per_person\"].hist()", "Task 3: Clip Outliers\nSee if you can further improve the model fit by setting the outlier values of rooms_per_person to some reasonable minimum or maximum.\nFor reference, here's a quick example of how to apply a function to a Pandas Series:\nclipped_feature = my_dataframe[\"my_feature_name\"].apply(lambda x: max(x, 0))\n\nThe above clipped_feature will have no values less than 0.", "# YOUR CODE HERE", "Solution\nClick below for the solution.\nThe histogram we created in Task 2 shows that the majority of values are less than 5. Let's clip rooms_per_person to 5, and plot a histogram to double-check the results.", "california_housing_dataframe[\"rooms_per_person\"] = (\n california_housing_dataframe[\"rooms_per_person\"]).apply(lambda x: min(x, 5))\n\n_ = california_housing_dataframe[\"rooms_per_person\"].hist()", "To verify that clipping worked, let's train again and print the calibration data once more:", "calibration_data = train_model(\n learning_rate=0.05,\n steps=500,\n batch_size=5,\n input_feature=\"rooms_per_person\")\n\n_ = plt.scatter(calibration_data[\"predictions\"], calibration_data[\"targets\"])" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
ishank26/nn_from_scratch
.ipynb_checkpoints/mlnn-checkpoint.ipynb
gpl-3.0
[ "import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport sklearn as skl\nimport sklearn.datasets\nimport sklearn.linear_model\n%matplotlib inline\n\n# Generate data\nX, y = sklearn.datasets.make_moons(300, noise=0.22)\nplt.figure(figsize=(7, 5))\nplt.scatter(X[:, 0], X[:, 1], s=15, c=y, cmap=plt.cm.Spectral)\nplt.show()", "&nbsp;\nFeedforward Neural Network", "# import feedforward neural net \nfrom mlnn import neural_net", "<script type=\"text/javascript\" src=\"https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML\"></script>\nLet's build a 4-layer neural network. Our network has one input layer, two hidden layer and one output layer. Our model can be represented as a directed acyclic graph wherein each node in a layer is connected all other nodes in its succesive layer. The neural net is shown below-\n\nEach node in the hidden layer uses a nonlinear activation function $f(x)$, which computes the outputs from its inputs and transfer these outputs to successive layers. Here we've used $f(x)= tanh(x)$, as our non-linear activation. Its derivative is given by- $f'(x)= 1-tanh(x)^2$. \nOur network graph can be represented as-\n\n| Layer No. | Notation | Value | Variable | \n|----------:|-----------:|---------------------------------------------:|----------:|\n|&nbsp;&nbsp;1 | &nbsp;&nbsp; X | &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; $X$| X|\n|&nbsp;&nbsp; 2 |&nbsp;&nbsp; W1(~)+b1 | &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp;&nbsp; $W1X+b1$|&nbsp;&nbsp; pre_act1|\n| &nbsp;&nbsp;2 |&nbsp;&nbsp; tanh | &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp;&nbsp; $tanh(W1X+b1)$| &nbsp;&nbsp; act1|\n| &nbsp;&nbsp;3 |&nbsp;&nbsp; W2(~)+b2 | &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp;&nbsp; $W2(tanh(W1X +b1))+b2$| &nbsp;&nbsp; pre_act2|\n|&nbsp;&nbsp; 3 |&nbsp;&nbsp; tanh | &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;$tanh(W2(tanhW1X+b1))+b2)$| &nbsp;&nbsp; act2|\n| &nbsp;&nbsp;4 |&nbsp;&nbsp; W3(~)+b3 |&nbsp;&nbsp;&nbsp;$W3(tanh(W2(tanhW1X+b1))+b2)+b3$ | &nbsp;&nbsp; pre_act3|\n| &nbsp;&nbsp;4 |&nbsp;&nbsp; softmax |$softmax(W3(tanh(W2(tanhW1X+b1)+b2))+b3)$ </br>| &nbsp;&nbsp; act3| \nBackpropagation\nNow we formulate the backpropagation algorithm or backprop for training the network. For derivation of the backprop, please see Dr. Hugo Larochelle's excellent course on neural networks. \n$ \\large\\frac{\\partial L}{\\partial Pred} = \\frac{\\partial L}{\\partial L} * \\frac{\\partial L}{\\partial Pred} $\n$ \\large\\frac{\\partial L}{\\partial act3} = \\frac{\\partial L}{\\partial Pred} * \\frac{\\partial Pred}{\\partial act3} $\n$ \\large\\frac{\\partial L}{\\partial pre_act3} = \\frac{\\partial L}{\\partial act3} * \\frac{\\partial act3}{\\partial pre_act3}= \\delta4$ \n$ \\large\\frac{\\partial L}{\\partial act2} = \\frac{\\partial L}{\\partial pre_act3} * \\frac{\\partial pre_act3}{\\partial act2} $\n$ \\large\\frac{\\partial L}{\\partial pre_act2} = \\frac{\\partial L}{\\partial act2} * \\frac{\\partial act2}{\\partial pre_act2}= \\delta3$ \n$ \\large\\frac{\\partial L}{\\partial act1} = \\frac{\\partial L}{\\partial pre_act2} * \\frac{\\partial pre_act2}{\\partial act1} $\n$ \\large\\frac{\\partial L}{\\partial pre_act1} = \\frac{\\partial L}{\\partial act1} * \\frac{\\partial act1}{\\partial pre_act1}= \\delta2$ \n$ \\large\\frac{\\partial L}{\\partial W3} = \\delta4 * \\frac{\\partial pre_act3}{\\partial W3}$ \n$ \\large\\frac{\\partial L}{\\partial W2} = \\delta3 * \\frac{\\partial pre_act2}{\\partial W2}$ \n$ \\large\\frac{\\partial L}{\\partial W1} = \\delta2 * \\frac{\\partial pre_act1}{\\partial W1}$", "# Visualize tanh and its derivative\nx = np.linspace(-np.pi, np.pi, 120)\nplt.figure(figsize=(8, 3))\nplt.subplot(1, 2, 1)\nplt.plot(x, np.tanh(x))\nplt.title(\"tanh(x)\")\nplt.xlim(-3, 3)\nplt.subplot(1, 2, 2)\nplt.plot(x, 1 - np.square(np.tanh(x)))\nplt.xlim(-3, 3)\nplt.title(\"tanh\\'(x)\")\nplt.show()", "It can be seen from the above figure that as we increase our input the our activation starts to saturate which can inturn kill gradients. This can be mitigated using rectified activation functions. Another problem that we encounter in training deep neural networks during backpropagation is vanishing gradient and gradient explosion. It can be observed from the derivative of our nth activation- $\\large\\frac{\\partial act_n}{\\partial pre_act_n}$ , is fairly large near zero. Let's assume that the weigths $< 1$, this will usually satisfy $|w_{i}*tanh'(x)| < 1$. The succesive product of such values in each layer will exponentially decrease the computed product leading to vanishing gradient. This is not a robust explanation of vanishing gradient problem. For more information refer to this article. \nSimilarly if the weigths are large 100, 40.., we can formulate the gradient explosion problem.", "# Training the neural network\n\nmy_nn = neural_net([2, 4, 2]) # [2,4,2] = [input nodes, hidden nodes, output nodes]\n\nmy_nn.train(X, y, 0.001, 0.0001) # weights regularization lambda= 0.001 , epsilon= 0.0001\n\n### visualize predictions\nmy_nn.visualize_preds(X ,y)", "Animate Training:", "X_, y_ = sklearn.datasets.make_circles(n_samples=400, noise=0.18, factor=0.005, random_state=1)\nplt.figure(figsize=(7, 5))\nplt.scatter(X_[:, 0], X_[:, 1], s=15, c=y_, cmap=plt.cm.Spectral)\nplt.show() \n\n'''\nUncomment the code below to see classification process for above data.\nTo stop training early reduce no. of iterations.\n'''\n\n#new_nn = neural_net([2, 6, 2])\n#new_nn.animate_preds(X_, y_, 0.001, 0.0001) # max iterations = 35000 " ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
vadim-ivlev/STUDY
handson-data-science-python/DataScience-Python3/.ipynb_checkpoints/MeanMedianMode-checkpoint.ipynb
mit
[ "Mean, Median, Mode, and introducing NumPy\nMean vs. Median\nLet's create some fake income data, centered around 27,000 with a normal distribution and standard deviation of 15,000, with 10,000 data points. (We'll discuss those terms more later, if you're not familiar with them.)\nThen, compute the mean (average) - it should be close to 27,000:", "import numpy as np\n\nincomes = np.random.normal(27000, 15000, 10000)\nnp.mean(incomes)", "We can segment the income data into 50 buckets, and plot it as a histogram:", "%matplotlib inline\n# %config InlineBackend.figure_format='retina'\n# import seaborn as sns\n\n# sns.set_context(\"paper\")\n# sns.set_style(\"white\")\n# sns.set()\n\nimport matplotlib.pyplot as plt\nplt.hist(incomes, 50)\nplt.show()", "Now compute the median - since we have a nice, even distribution it too should be close to 27,000:", "np.median(incomes)", "Now we'll add Donald Trump into the mix. Darn income inequality!", "incomes = np.append(incomes, [1000000000])", "The median won't change much, but the mean does:", "np.median(incomes)\n\nnp.mean(incomes)", "Mode\nNext, let's generate some fake age data for 500 people:", "ages = np.random.randint(18, high=90, size=500)\nages\n\nfrom scipy import stats\nstats.mode(ages)" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
NYUDataBootcamp/Projects
UG_F16/RodriguezBallve-Spain's_Labor_Market.ipynb
mit
[ "Exploring Spain's Broken Labor Market\nAuthor Bosco Rodríguez Ballvé\nDate Fall 2016\nClass Data Bootcamp @ NYU Stern \nInstructors Coleman, Lyon\nAbstract\nA successful economy in the 21st century, in which the mix of products and services is changing constantly, requires a dynamic labor market as a mechanism to shift capital and labor. The ability of an economy to reallocate jobs across firms, industries, and geographical areas is, perhaps, even more important than capital. \nDecades of persistently high unemployment in Spain, regardless of business cycle fluctuations, suggest that lack labor market dynamism has hindered Spain’s economy. Currently, almost a decade after the Great Recession, with economic recovery underway, Spain’s unemployment rate remains stubbornly high. Particularly amongst the youth. Chronic unemployment suggests deep rooted, structural causes that go beyond demand-deficient or cyclical unemployment. \nThe aim of this project is to compile and process data to shed ight on the relationship between education levels, age and structural unemployment in Spain.\nLoading the modules", "import pandas as pd\nfrom pandas_datareader import data, wb # we will be working with World Bank Data \nimport wbdata\nimport pandas\nimport matplotlib.pyplot as plt\nimport sys \nimport matplotlib as mpl \nimport matplotlib.pyplot as plt \nimport datetime as dt \n%matplotlib inline ", "Data extraction and clean up\nMy first data source is the World Bank. We will access World Bank data by using 'Wbdata', Wbdata is a simple python interface to find and request information from the World Bank's various databases, either as a dictionary containing full metadata or as a pandas DataFrame. Currently, wbdata wraps most of the World Bank API, and also adds some convenience functions for searching and retrieving information.\nDocumentation is available at http://wbdata.readthedocs.org/ \nWe install it with 'pip install wbdata'\nCredits go to:\nSherouse, Oliver (2014). Wbdata. Arlington, VA. Available from http://github.com/OliverSherouse/wbdata.\nLet's get to it.", "wb.search('gdp.*capita.*const') # we use this function to search for GDP related indicators\n\nwb.search('employment') # we use this function to search for employment related indicators\n\nwb.search('unemployment') # we use this function to search for unemployment related indicators\n\n#I have identified the relevant variables in the three fields\n#To download data for multiple indicators, I specify them as a list \n#ESP is the ISO code for Spain\n#I equalize the start and end dates\nwb.download( indicator=['NY.GDP.PCAP.CD','SL.UEM.TOTL.ZS','SL.UEM.1524.ZS',\n 'SL.UEM.PRIM.ZS', 'SL.UEM.SECO.ZS','SL.UEM.TERT.ZS','SL.UEM.NEET.MA.ZS','SL.UEM.NEET.MA.ZS'], \n country=['ESP'], start=1990, end=2015)\n#Construct the dataframe\ndata = wb.download(indicator=['NY.GDP.PCAP.CD','SL.UEM.TOTL.ZS','SL.UEM.1524.ZS',\n 'SL.UEM.PRIM.ZS', 'SL.UEM.SECO.ZS','SL.UEM.TERT.ZS','SL.UEM.NEET.MA.ZS','SL.UEM.NEET.MA.ZS'], \n country=['ESP'], start=1990, end=2015)\nesplbr = pd.DataFrame(data)\n#Rename the columns for clarity \nesplbr.columns = [\"GDP/capita(US$ 2016)\", \"UnemploymentRate\", \"YouthUnempRate\", \"UnempW/PrimEd.\", \"UnempW/SecEd\",\"UnempW/TertEd\", \"Ni-nis\"]\nesplbr\n#What on earth are Ni-nis? A Spanish neologism for \"ni estudia, ni trabaja\": percentage of youth \"not working, not studying\"\n#A cultural and socioeconomic phenomenon\n\n# Wbata renders a complex multi-index, which I convert to old-school columns that are easier to work with\nesplbr.reset_index(inplace=True) \nesplbr\n\nesplbr.columns\n\n# housekeeping for column names \nesplbr.columns = [\"Country\", \"Year\", \"GDP/capita(US$ 2016)\", \"UnemploymentRate\", \"YouthUnempRate\", \"UnempW/PrimEd.\", \"UnempW/SecEd\",\"UnempW/TertEd\", \"Ni-nis\"]\nesplbr\n\n\n# we know we are dealing exclusively with Spain, so we drop the reduntdant 'Country' column\nesplbr.drop('Country', axis=1, inplace=True)\nesplbr\n\n# what do I have in my hands?\nesplbr.dtypes\n\nesplbr.index", "Plotting the data", "# with a clean and orthodox Dataframe, I can start to do some graphics\nimport matplotlib.pyplot as plt\n%matplotlib inline\n# we invert the x axis. Never managed to make 'Year' the X axis, lost a lot of hair in the process :(\nplt.gca().invert_xaxis() # Came up with this solution\n# and add the indicators \nplt.plot(esplbr.index, esplbr['UnemploymentRate'])\nplt.plot(esplbr.index, esplbr['YouthUnempRate'])\nplt.plot(esplbr.index, esplbr['Ni-nis'])\n# and modify the plot\nplt.title('Labor Market in Spain', fontsize=14, loc='left') # add title\nplt.ylabel('Percentage Unemployed') # y axis label \nplt.legend(['UnemploymentRate', 'YouthUnempRate','Ni-nis'], fontsize=8, loc=0) ", "Observations\n\n\nSpain has recently lived through a depression without precedent, yet unemployment rates above 20% are nothing new: there is a large structural component in addition to the demand-deficient factor.\n\n\nYouth unemployment is particuarly bad, which is the norm elsewhere too, but the spread is accentuated in Spain. Deductively, this hints at labor market duality between bullet-proof contracts and part-time or 'indefinite' contracts.", "# let's take a look at unemployment by education level\nimport matplotlib.pyplot as plt\n%matplotlib inline\n# we invert the x axis\nplt.gca().invert_xaxis()\n#we add the variables \nplt.plot(esplbr.index, esplbr['UnempW/PrimEd.'])\nplt.plot(esplbr.index, esplbr['UnempW/SecEd'])\nplt.plot(esplbr.index, esplbr['UnempW/TertEd'])\nplt.plot(esplbr.index, esplbr['Ni-nis'])\n# we modify the plot\nplt.title('Education and Employment Outcomes', fontsize=14, loc='left')\nplt.ylabel('Percentage Unemployed') \nplt.legend(['UnempW/PrimEd.', 'UnempW/SecEd','UnempW/TertEd', 'Ni-nis'], fontsize=7, loc=0) ", "Observations\n\nThose unemployed with only primary education completed and ni-nis start to rise hand in hand ten years ago, when the crisis hits. This suggests overlap between the two groups.\nThe elephant in the room a massive construction bubble that made Spain's variant of the crisis particularly brutal. For decades, a debt-fueled bubble in real estate signaled youngsters to drop the books and pick up the bricks.\nThe labor market now faces the painful readjustment of the economy's productive model, from \"deuda y ladrillo\" (debt and brick) to exports, that account for Spain's recent growth\n\nP.S.: if you ever need to investigate (how not to execute) a Keynesian stimulus plan, check out how the government's Plan E added fuel to malinvestments http://www.economist.com/node/13611650 \nDigging for more\nI'm interested in measuring structural unemployment. Ideally, I would build an unemployment model myself based on separation and accesion rates to arrive at the Natural Rate of Unemployment, as we see in one of my three bibles:\nhttp://www.stern.nyu.edu/sites/default/files/assets/documents/The_Global_Economy_Amazon_Digital%20%282%29.pdf\nIn the interest of time, I sought an indicator that acts as a proxy for structural unemployment. The NAIRU and NAWRU come to mind, but they are not reported by the World Bank. \nAnd so I became acquainted with Quandl's API and proceeded to dig through several economic databases, and landed at the notorious OECD database: I suspect Quandl and I are going to become good friends moving forward. \nLoad the modules", "# Don't forget the the DMV paperwork\nimport quandl # Quandl package\nquandl.ApiConfig.api_key = '3w_GYBRfX3ZxG7my_vhs' # register for a key and unlimited number of requests \n# Playing it safe\nimport sys # system module\nimport pandas as pd # data package\nimport matplotlib.pyplot as plt # graphics module \nimport datetime as dt # date and time module\nimport numpy as np\n%matplotlib inline \n", "Data extraction and clean up\nWe're going to be comparing Spain's NAIRU to that of Denmark. Don't tell Sanders, but Denmark is well known for having one of the most 'flexible' labor markets in Europe.", "# We extract the indicators and print the dataframe\nNAIRU = quandl.get((['OECD/EO91_INTERNET_ESP_NAIRU_A','OECD/EO91_INTERNET_DNK_NAIRU_A']), #We call for both\n start_date = \"1990-12-31\", end_date = \"2013-12-31\") # And limit the time horizon \nNAIRU\n\n# What do we have here?\ntype(NAIRU)\n\nNAIRU.columns \n\n# Dataframe housekeeping \nNAIRU.columns = ['NAIRU Spain', 'NAIRU Denmark']\nNAIRU\n\n# Nice and polished\nNAIRU.columns\n\nplt.style.available #Take a look at the menu\n\n# We are ready to plot\nimport matplotlib.pyplot as plt\n%matplotlib inline\n#we add the variables \nplt.plot(NAIRU.index, NAIRU['NAIRU Spain'])\nplt.plot(NAIRU.index, NAIRU['NAIRU Denmark'])\n#We modify the plot\nplt.title('Measuring Structural Unemployment ESP v DEN', fontsize=15, loc='left') # add title\nplt.ylabel('Percentage Unemployed') # y axis label \nplt.legend(['NAIRU Spain', 'NAIRU Denmark'], fontsize=8, loc=2) # more descriptive variable namesDescribe what each of these arguments/parameters does\nplt.style.use(\"bmh\")", "Observations\n\nAlthough the NAIRU is not a perfect proxy for structural unemployment, it's a good place to start\nAgain, we witness how Spain's unemployment problem is almost ingrained in its 'production function'\n\n\nThis project has left me at the doors of great questions,\nthat this course has given me the tools to answer,\nand for that I thank you,\nBosco Rodríguez Ballvé" ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]