index
int64 0
0
| repo_id
stringlengths 21
232
| file_path
stringlengths 34
259
| content
stringlengths 1
14.1M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 | kubeflow_public_repos/kfp-tekton-backend/frontend/src/components | kubeflow_public_repos/kfp-tekton-backend/frontend/src/components/__snapshots__/Router.test.tsx.snap | // Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`Router initial render 1`] = `
<SideNavLayout>
<Switch>
<Route
exact={true}
path="/"
render={[Function]}
/>
<Route
exact={true}
key="0"
path="/start"
render={[Function]}
/>
<Route
exact={true}
key="1"
path="/archive/runs"
render={[Function]}
/>
<Route
exact={true}
key="2"
path="/archive/experiments"
render={[Function]}
/>
<Route
exact={true}
key="3"
path="/artifacts"
render={[Function]}
/>
<Route
exact={false}
key="4"
path="/artifacts/:id"
render={[Function]}
/>
<Route
exact={true}
key="5"
path="/executions"
render={[Function]}
/>
<Route
exact={true}
key="6"
path="/executions/:id"
render={[Function]}
/>
<Route
exact={true}
key="7"
path="/experiments"
render={[Function]}
/>
<Route
exact={true}
key="8"
path="/experiments/details/:eid"
render={[Function]}
/>
<Route
exact={true}
key="9"
path="/experiments/new"
render={[Function]}
/>
<Route
exact={true}
key="10"
path="/pipeline_versions/new"
render={[Function]}
/>
<Route
exact={true}
key="11"
path="/runs/new"
render={[Function]}
/>
<Route
exact={true}
key="12"
path="/pipelines"
render={[Function]}
/>
<Route
exact={true}
key="13"
path="/pipelines/details/:pid/version/:vid?"
render={[Function]}
/>
<Route
exact={true}
key="14"
path="/pipelines/details/:pid?"
render={[Function]}
/>
<Route
exact={true}
key="15"
path="/runs"
render={[Function]}
/>
<Route
exact={true}
key="16"
path="/recurringrun/details/:rid"
render={[Function]}
/>
<Route
exact={true}
key="17"
path="/runs/details/:rid"
render={[Function]}
/>
<Route
exact={true}
key="18"
path="/compare"
render={[Function]}
/>
<Route>
<RoutedPage />
</Route>
</Switch>
</SideNavLayout>
`;
| 8,000 |
0 | kubeflow_public_repos/kfp-tekton-backend/frontend/src/components | kubeflow_public_repos/kfp-tekton-backend/frontend/src/components/__snapshots__/Trigger.test.tsx.snap | // Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`Trigger enables a single day on click 1`] = `
<div>
<Input
label="Trigger type"
onChange={[Function]}
required={true}
select={true}
value={1}
variant="outlined"
>
<WithStyles(MenuItem)
key="0"
value={0}
>
Periodic
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="1"
value={1}
>
Cron
</WithStyles(MenuItem)>
</Input>
<div>
<Input
label="Maximum concurrent runs"
onChange={[Function]}
required={true}
value="10"
variant="outlined"
/>
<div
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={false}
color="primary"
onClick={[Function]}
/>
}
label="Has start date"
/>
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="Start date"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="date"
value="2018-12-21"
variant="outlined"
width={160}
/>
<Separator />
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="Start time"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="time"
value="07:53"
variant="outlined"
width={120}
/>
</div>
<div
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={false}
color="primary"
onClick={[Function]}
/>
}
label="Has end date"
/>
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="End date"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="date"
value="2018-12-21"
variant="outlined"
width={160}
/>
<Separator />
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="End time"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="time"
value="07:53"
variant="outlined"
width={120}
/>
</div>
<span
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={true}
color="primary"
onClick={[Function]}
/>
}
label="Catchup"
/>
<HelpButton
helpText={
<div>
<p>
Whether the recurring run should catch up if behind schedule. Defaults to true.
</p>
<p>
For example, if the recurring run is paused for a while and re-enabled afterwards. If catchup=true, the scheduler will catch up on (backfill) each missed interval. Otherwise, it only schedules the latest interval if more than one interval is ready to be scheduled.
</p>
<p>
Usually, if your pipeline handles backfill internally, you should turn catchup off to avoid duplicate backfill.
</p>
</div>
}
/>
</span>
<span
className="flex"
>
Run every
<Separator />
<Input
height={30}
onChange={[Function]}
required={true}
select={true}
value="Week"
variant="outlined"
width={95}
>
<WithStyles(MenuItem)
key="0"
value="Minute"
>
Minute
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="1"
value="Hour"
>
Hour
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="2"
value="Day"
>
Day
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="3"
value="Week"
>
Week
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="4"
value="Month"
>
Month
</WithStyles(MenuItem)>
</Input>
</span>
</div>
<div>
<div>
<span>
On:
</span>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={false}
color="primary"
onClick={[Function]}
/>
}
label="All"
/>
<Separator />
<WithStyles(Button)
color="primary"
key="0"
mini={true}
onClick={[Function]}
variant="fab"
>
S
</WithStyles(Button)>
<WithStyles(Button)
color="secondary"
key="1"
mini={true}
onClick={[Function]}
variant="fab"
>
M
</WithStyles(Button)>
<WithStyles(Button)
color="primary"
key="2"
mini={true}
onClick={[Function]}
variant="fab"
>
T
</WithStyles(Button)>
<WithStyles(Button)
color="secondary"
key="3"
mini={true}
onClick={[Function]}
variant="fab"
>
W
</WithStyles(Button)>
<WithStyles(Button)
color="primary"
key="4"
mini={true}
onClick={[Function]}
variant="fab"
>
T
</WithStyles(Button)>
<WithStyles(Button)
color="primary"
key="5"
mini={true}
onClick={[Function]}
variant="fab"
>
F
</WithStyles(Button)>
<WithStyles(Button)
color="primary"
key="6"
mini={true}
onClick={[Function]}
variant="fab"
>
S
</WithStyles(Button)>
</div>
<div
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={false}
color="primary"
onClick={[Function]}
/>
}
label={
<span>
Allow editing cron expression. ( format is specified
<a
href="https://godoc.org/github.com/robfig/cron#hdr-CRON_Expression_Format"
>
here
</a>
)
</span>
}
/>
</div>
<Input
disabled={true}
label="cron expression"
onChange={[Function]}
value="0 0 0 ? * 0,2,4,5,6"
variant="outlined"
width={300}
/>
<div>
Note: Start and end dates/times are handled outside of cron.
</div>
</div>
</div>
`;
exports[`Trigger renders all week days enabled 1`] = `
<div>
<Input
label="Trigger type"
onChange={[Function]}
required={true}
select={true}
value={1}
variant="outlined"
>
<WithStyles(MenuItem)
key="0"
value={0}
>
Periodic
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="1"
value={1}
>
Cron
</WithStyles(MenuItem)>
</Input>
<div>
<Input
label="Maximum concurrent runs"
onChange={[Function]}
required={true}
value="10"
variant="outlined"
/>
<div
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={false}
color="primary"
onClick={[Function]}
/>
}
label="Has start date"
/>
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="Start date"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="date"
value="2018-12-21"
variant="outlined"
width={160}
/>
<Separator />
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="Start time"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="time"
value="07:53"
variant="outlined"
width={120}
/>
</div>
<div
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={false}
color="primary"
onClick={[Function]}
/>
}
label="Has end date"
/>
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="End date"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="date"
value="2018-12-21"
variant="outlined"
width={160}
/>
<Separator />
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="End time"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="time"
value="07:53"
variant="outlined"
width={120}
/>
</div>
<span
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={true}
color="primary"
onClick={[Function]}
/>
}
label="Catchup"
/>
<HelpButton
helpText={
<div>
<p>
Whether the recurring run should catch up if behind schedule. Defaults to true.
</p>
<p>
For example, if the recurring run is paused for a while and re-enabled afterwards. If catchup=true, the scheduler will catch up on (backfill) each missed interval. Otherwise, it only schedules the latest interval if more than one interval is ready to be scheduled.
</p>
<p>
Usually, if your pipeline handles backfill internally, you should turn catchup off to avoid duplicate backfill.
</p>
</div>
}
/>
</span>
<span
className="flex"
>
Run every
<Separator />
<Input
height={30}
onChange={[Function]}
required={true}
select={true}
value="Week"
variant="outlined"
width={95}
>
<WithStyles(MenuItem)
key="0"
value="Minute"
>
Minute
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="1"
value="Hour"
>
Hour
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="2"
value="Day"
>
Day
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="3"
value="Week"
>
Week
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="4"
value="Month"
>
Month
</WithStyles(MenuItem)>
</Input>
</span>
</div>
<div>
<div>
<span>
On:
</span>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={false}
color="primary"
onClick={[Function]}
/>
}
label="All"
/>
<Separator />
<WithStyles(Button)
color="secondary"
key="0"
mini={true}
onClick={[Function]}
variant="fab"
>
S
</WithStyles(Button)>
<WithStyles(Button)
color="secondary"
key="1"
mini={true}
onClick={[Function]}
variant="fab"
>
M
</WithStyles(Button)>
<WithStyles(Button)
color="secondary"
key="2"
mini={true}
onClick={[Function]}
variant="fab"
>
T
</WithStyles(Button)>
<WithStyles(Button)
color="secondary"
key="3"
mini={true}
onClick={[Function]}
variant="fab"
>
W
</WithStyles(Button)>
<WithStyles(Button)
color="secondary"
key="4"
mini={true}
onClick={[Function]}
variant="fab"
>
T
</WithStyles(Button)>
<WithStyles(Button)
color="secondary"
key="5"
mini={true}
onClick={[Function]}
variant="fab"
>
F
</WithStyles(Button)>
<WithStyles(Button)
color="secondary"
key="6"
mini={true}
onClick={[Function]}
variant="fab"
>
S
</WithStyles(Button)>
</div>
<div
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={false}
color="primary"
onClick={[Function]}
/>
}
label={
<span>
Allow editing cron expression. ( format is specified
<a
href="https://godoc.org/github.com/robfig/cron#hdr-CRON_Expression_Format"
>
here
</a>
)
</span>
}
/>
</div>
<Input
disabled={true}
label="cron expression"
onChange={[Function]}
value="0 0 0 ? *"
variant="outlined"
width={300}
/>
<div>
Note: Start and end dates/times are handled outside of cron.
</div>
</div>
</div>
`;
exports[`Trigger renders periodic schedule controls for initial render 1`] = `
<div>
<Input
label="Trigger type"
onChange={[Function]}
required={true}
select={true}
value={0}
variant="outlined"
>
<WithStyles(MenuItem)
key="0"
value={0}
>
Periodic
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="1"
value={1}
>
Cron
</WithStyles(MenuItem)>
</Input>
<div>
<Input
label="Maximum concurrent runs"
onChange={[Function]}
required={true}
value="10"
variant="outlined"
/>
<div
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={false}
color="primary"
onClick={[Function]}
/>
}
label="Has start date"
/>
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="Start date"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="date"
value="2018-12-21"
variant="outlined"
width={160}
/>
<Separator />
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="Start time"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="time"
value="07:53"
variant="outlined"
width={120}
/>
</div>
<div
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={false}
color="primary"
onClick={[Function]}
/>
}
label="Has end date"
/>
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="End date"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="date"
value="2018-12-21"
variant="outlined"
width={160}
/>
<Separator />
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="End time"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="time"
value="07:53"
variant="outlined"
width={120}
/>
</div>
<span
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={true}
color="primary"
onClick={[Function]}
/>
}
label="Catchup"
/>
<HelpButton
helpText={
<div>
<p>
Whether the recurring run should catch up if behind schedule. Defaults to true.
</p>
<p>
For example, if the recurring run is paused for a while and re-enabled afterwards. If catchup=true, the scheduler will catch up on (backfill) each missed interval. Otherwise, it only schedules the latest interval if more than one interval is ready to be scheduled.
</p>
<p>
Usually, if your pipeline handles backfill internally, you should turn catchup off to avoid duplicate backfill.
</p>
</div>
}
/>
</span>
<span
className="flex"
>
Run every
<div
className="flex"
>
<Separator />
<Input
error={false}
height={30}
onChange={[Function]}
required={true}
type="number"
value={1}
variant="outlined"
width={65}
/>
</div>
<Separator />
<Input
height={30}
onChange={[Function]}
required={true}
select={true}
value="Minute"
variant="outlined"
width={95}
>
<WithStyles(MenuItem)
key="0"
value="Minute"
>
Minutes
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="1"
value="Hour"
>
Hours
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="2"
value="Day"
>
Days
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="3"
value="Week"
>
Weeks
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="4"
value="Month"
>
Months
</WithStyles(MenuItem)>
</Input>
</span>
</div>
</div>
`;
exports[`Trigger renders periodic schedule controls if the trigger type is CRON 1`] = `
<div>
<Input
label="Trigger type"
onChange={[Function]}
required={true}
select={true}
value={1}
variant="outlined"
>
<WithStyles(MenuItem)
key="0"
value={0}
>
Periodic
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="1"
value={1}
>
Cron
</WithStyles(MenuItem)>
</Input>
<div>
<Input
label="Maximum concurrent runs"
onChange={[Function]}
required={true}
value="10"
variant="outlined"
/>
<div
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={false}
color="primary"
onClick={[Function]}
/>
}
label="Has start date"
/>
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="Start date"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="date"
value="2018-12-21"
variant="outlined"
width={160}
/>
<Separator />
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="Start time"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="time"
value="07:53"
variant="outlined"
width={120}
/>
</div>
<div
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={false}
color="primary"
onClick={[Function]}
/>
}
label="Has end date"
/>
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="End date"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="date"
value="2018-12-21"
variant="outlined"
width={160}
/>
<Separator />
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="End time"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="time"
value="07:53"
variant="outlined"
width={120}
/>
</div>
<span
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={true}
color="primary"
onClick={[Function]}
/>
}
label="Catchup"
/>
<HelpButton
helpText={
<div>
<p>
Whether the recurring run should catch up if behind schedule. Defaults to true.
</p>
<p>
For example, if the recurring run is paused for a while and re-enabled afterwards. If catchup=true, the scheduler will catch up on (backfill) each missed interval. Otherwise, it only schedules the latest interval if more than one interval is ready to be scheduled.
</p>
<p>
Usually, if your pipeline handles backfill internally, you should turn catchup off to avoid duplicate backfill.
</p>
</div>
}
/>
</span>
<span
className="flex"
>
Run every
<Separator />
<Input
height={30}
onChange={[Function]}
required={true}
select={true}
value="Minute"
variant="outlined"
width={95}
>
<WithStyles(MenuItem)
key="0"
value="Minute"
>
Minute
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="1"
value="Hour"
>
Hour
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="2"
value="Day"
>
Day
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="3"
value="Week"
>
Week
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="4"
value="Month"
>
Month
</WithStyles(MenuItem)>
</Input>
</span>
</div>
<div>
<div
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={false}
color="primary"
onClick={[Function]}
/>
}
label={
<span>
Allow editing cron expression. ( format is specified
<a
href="https://godoc.org/github.com/robfig/cron#hdr-CRON_Expression_Format"
>
here
</a>
)
</span>
}
/>
</div>
<Input
disabled={true}
label="cron expression"
onChange={[Function]}
value="0 * * * * ?"
variant="outlined"
width={300}
/>
<div>
Note: Start and end dates/times are handled outside of cron.
</div>
</div>
</div>
`;
exports[`Trigger renders week days if the trigger type is CRON and interval is weekly 1`] = `
<div>
<Input
label="Trigger type"
onChange={[Function]}
required={true}
select={true}
value={1}
variant="outlined"
>
<WithStyles(MenuItem)
key="0"
value={0}
>
Periodic
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="1"
value={1}
>
Cron
</WithStyles(MenuItem)>
</Input>
<div>
<Input
label="Maximum concurrent runs"
onChange={[Function]}
required={true}
value="10"
variant="outlined"
/>
<div
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={false}
color="primary"
onClick={[Function]}
/>
}
label="Has start date"
/>
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="Start date"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="date"
value="2018-12-21"
variant="outlined"
width={160}
/>
<Separator />
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="Start time"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="time"
value="07:53"
variant="outlined"
width={120}
/>
</div>
<div
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={false}
color="primary"
onClick={[Function]}
/>
}
label="Has end date"
/>
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="End date"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="date"
value="2018-12-21"
variant="outlined"
width={160}
/>
<Separator />
<Input
InputLabelProps={
Object {
"classes": Object {
"outlined": "noMargin",
},
"shrink": true,
}
}
label="End time"
onChange={[Function]}
style={
Object {
"visibility": "hidden",
}
}
type="time"
value="07:53"
variant="outlined"
width={120}
/>
</div>
<span
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={true}
color="primary"
onClick={[Function]}
/>
}
label="Catchup"
/>
<HelpButton
helpText={
<div>
<p>
Whether the recurring run should catch up if behind schedule. Defaults to true.
</p>
<p>
For example, if the recurring run is paused for a while and re-enabled afterwards. If catchup=true, the scheduler will catch up on (backfill) each missed interval. Otherwise, it only schedules the latest interval if more than one interval is ready to be scheduled.
</p>
<p>
Usually, if your pipeline handles backfill internally, you should turn catchup off to avoid duplicate backfill.
</p>
</div>
}
/>
</span>
<span
className="flex"
>
Run every
<Separator />
<Input
height={30}
onChange={[Function]}
required={true}
select={true}
value="Week"
variant="outlined"
width={95}
>
<WithStyles(MenuItem)
key="0"
value="Minute"
>
Minute
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="1"
value="Hour"
>
Hour
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="2"
value="Day"
>
Day
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="3"
value="Week"
>
Week
</WithStyles(MenuItem)>
<WithStyles(MenuItem)
key="4"
value="Month"
>
Month
</WithStyles(MenuItem)>
</Input>
</span>
</div>
<div>
<div>
<span>
On:
</span>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={true}
color="primary"
onClick={[Function]}
/>
}
label="All"
/>
<Separator />
<WithStyles(Button)
color="primary"
key="0"
mini={true}
onClick={[Function]}
variant="fab"
>
S
</WithStyles(Button)>
<WithStyles(Button)
color="primary"
key="1"
mini={true}
onClick={[Function]}
variant="fab"
>
M
</WithStyles(Button)>
<WithStyles(Button)
color="primary"
key="2"
mini={true}
onClick={[Function]}
variant="fab"
>
T
</WithStyles(Button)>
<WithStyles(Button)
color="primary"
key="3"
mini={true}
onClick={[Function]}
variant="fab"
>
W
</WithStyles(Button)>
<WithStyles(Button)
color="primary"
key="4"
mini={true}
onClick={[Function]}
variant="fab"
>
T
</WithStyles(Button)>
<WithStyles(Button)
color="primary"
key="5"
mini={true}
onClick={[Function]}
variant="fab"
>
F
</WithStyles(Button)>
<WithStyles(Button)
color="primary"
key="6"
mini={true}
onClick={[Function]}
variant="fab"
>
S
</WithStyles(Button)>
</div>
<div
className="flex"
>
<WithStyles(WithFormControlContext(FormControlLabel))
control={
<WithStyles(Checkbox)
checked={false}
color="primary"
onClick={[Function]}
/>
}
label={
<span>
Allow editing cron expression. ( format is specified
<a
href="https://godoc.org/github.com/robfig/cron#hdr-CRON_Expression_Format"
>
here
</a>
)
</span>
}
/>
</div>
<Input
disabled={true}
label="cron expression"
onChange={[Function]}
value="0 0 0 ? * *"
variant="outlined"
width={300}
/>
<div>
Note: Start and end dates/times are handled outside of cron.
</div>
</div>
</div>
`;
| 8,001 |
0 | kubeflow_public_repos/kfp-tekton-backend/frontend/src | kubeflow_public_repos/kfp-tekton-backend/frontend/src/__mocks__/typestyle.js | /*
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { classes } from 'typestyle';
// Mocks the typestyle module to emit a fixed string for class names to avoid
// test churn on style changes
module.exports = {
classes,
cssRaw: () => null,
cssRule: () => null,
style: (obj) => '',
stylesheet: (obj) => {
const mock = {};
Object.keys(obj).forEach(key => mock[key] = key);
return mock;
},
};
| 8,002 |
0 | kubeflow_public_repos/kfp-tekton-backend/frontend/third_party | kubeflow_public_repos/kfp-tekton-backend/frontend/third_party/argo-ui/argo_template.ts | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import * as kubernetes from './kubernetes';
/**
* Arguments to a template
*/
export interface Arguments {
/**
* Artifacts is the list of artifacts to pass to the template or workflow
*/
artifacts?: Artifact[];
/**
* Parameters is the list of parameters to pass to the template or workflow
*/
parameters?: Parameter[];
}
/**
* Artifact indicates an artifact to place at a specified path
*/
export interface Artifact {
/**
* Artifactory contains artifactory artifact location details
*/
artifactory?: ArtifactoryArtifact;
/**
* From allows an artifact to reference an artifact from a previous step
*/
from?: string;
/**
* Git contains git artifact location details
*/
git?: GitArtifact;
/**
* HTTP contains HTTP artifact location details
*/
http?: HTTPArtifact;
/**
* mode bits to use on this file, must be a value between 0 and 0777 set
* when loading input artifacts.
*/
mode?: number;
/**
* name of the artifact. must be unique within a template's inputs/outputs.
*/
name: string;
/**
* Path is the container path to the artifact
*/
path?: string;
/**
* Raw contains raw artifact location details
*/
raw?: RawArtifact;
/**
* S3 contains S3 artifact location details
*/
s3?: S3Artifact;
}
/**
* ArtifactLocation describes a location for a single or multiple artifacts. It
* is used as single artifact in the context of inputs/outputs (e.g.
* outputs.artifacts.artname). It is also used to describe the location of
* multiple artifacts such as the archive location of a single workflow step,
* which the executor will use as a default location to store its files.
*/
export interface ArtifactLocation {
/**
* Artifactory contains artifactory artifact location details
*/
artifactory?: ArtifactoryArtifact;
/**
* Git contains git artifact location details
*/
git?: GitArtifact;
/**
* HTTP contains HTTP artifact location details
*/
http?: HTTPArtifact;
/**
* Raw contains raw artifact location details
*/
raw?: RawArtifact;
/**
* S3 contains S3 artifact location details
*/
s3?: S3Artifact;
}
/**
* ArtifactoryArtifact is the location of an artifactory artifact
*/
export interface ArtifactoryArtifact {
/**
* PasswordSecret is the secret selector to the repository password
*/
passwordSecret?: kubernetes.SecretKeySelector;
/**
* URL of the artifact
*/
url: string;
/**
* UsernameSecret is the secret selector to the repository username
*/
usernameSecret?: kubernetes.SecretKeySelector;
}
/**
* ArtifactoryAuth describes the secret selectors required for authenticating to artifactory
*/
export interface ArtifactoryAuth {
/**
* PasswordSecret is the secret selector to the repository password
*/
passwordSecret?: kubernetes.SecretKeySelector;
/**
* UsernameSecret is the secret selector to the repository username
*/
usernameSecret?: kubernetes.SecretKeySelector;
}
/**
* GitArtifact is the location of an git artifact
*/
export interface GitArtifact {
/**
* PasswordSecret is the secret selector to the repository password
*/
passwordSecret?: kubernetes.SecretKeySelector;
/**
* Repo is the git repository
*/
repo: string;
/**
* Revision is the git commit, tag, branch to checkout
*/
revision?: string;
/**
* UsernameSecret is the secret selector to the repository username
*/
usernameSecret?: kubernetes.SecretKeySelector;
}
/**
* HTTPArtifact allows an file served on HTTP to be placed as an input artifact in a container
*/
export interface HTTPArtifact {
/**
* URL of the artifact
*/
url: string;
}
/**
* Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another
*/
export interface Inputs {
/**
* Artifact are a list of artifacts passed as inputs
*/
artifacts?: Artifact[];
/**
* Parameters are a list of parameters passed as inputs
*/
parameters?: Parameter[];
}
/**
* Pod metdata
*/
export interface Metadata {
annotations?: { [key: string]: string };
labels?: { [key: string]: string };
}
/**
* Outputs hold parameters, artifacts, and results from a step
*/
export interface Outputs {
/**
* Artifacts holds the list of output artifacts produced by a step
*/
artifacts?: Artifact[];
/**
* Parameters holds the list of output parameters produced by a step
*/
parameters?: Parameter[];
/**
* Result holds the result (stdout) of a script template
*/
result?: string;
}
/**
* Parameter indicate a passed string parameter to a service template with an optional default value
*/
export interface Parameter {
/**
* Default is the default value to use for an input parameter if a value was not supplied
*/
_default?: string;
/**
* Name is the parameter name
*/
name: string;
/**
* Value is the literal value to use for the parameter. If specified in the
* context of an input parameter, the value takes precedence over any
* passed values
*/
value?: string;
/**
* ValueFrom is the source for the output parameter's value
*/
valueFrom?: ValueFrom;
}
/**
* RawArtifact allows raw string content to be placed as an artifact in a container
*/
export interface RawArtifact {
/**
* Data is the string contents of the artifact
*/
data: string;
}
/**
* ResourceTemplate is a template subtype to manipulate kubernetes resources
*/
export interface ResourceTemplate {
/**
* Action is the action to perform to the resource. Must be one of: get,
* create, apply, delete, replace
*/
action: string;
/**
* FailureCondition is a label selector expression which describes the
* conditions of the k8s resource in which the step was considered failed
*/
failureCondition?: string;
/**
* Manifest contains the kubernetes manifest
*/
manifest: string;
/**
* SuccessCondition is a label selector expression which describes the
* conditions of the k8s resource in which it is acceptable to proceed to
* the following step
*/
successCondition?: string;
}
/**
* RetryStrategy provides controls on how to retry a workflow step
*/
export interface RetryStrategy {
/**
* Limit is the maximum number of attempts when retrying a container
*/
limit?: number;
}
/**
* S3Artifact is the location of an S3 artifact
*/
export interface S3Artifact {
/**
* AccessKeySecret is the secret selector to the bucket's access key
*/
accessKeySecret: kubernetes.SecretKeySelector;
/**
* Bucket is the name of the bucket
*/
bucket: string;
/**
* Endpoint is the hostname of the bucket endpoint
*/
endpoint: string;
/**
* Insecure will connect to the service with TLS
*/
insecure?: boolean;
/**
* Key is the key in the bucket where the artifact resides
*/
key: string;
/**
* Region contains the optional bucket region
*/
region?: string;
/**
* SecretKeySecret is the secret selector to the bucket's secret key
*/
secretKeySecret: kubernetes.SecretKeySelector;
}
/**
* S3Bucket contains the access information required for interfacing with an S3 bucket
*/
export interface S3Bucket {
/**
* AccessKeySecret is the secret selector to the bucket's access key
*/
accessKeySecret: kubernetes.SecretKeySelector;
/**
* Bucket is the name of the bucket
*/
bucket: string;
/**
* Endpoint is the hostname of the bucket endpoint
*/
endpoint: string;
/**
* Insecure will connect to the service with TLS
*/
insecure?: boolean;
/**
* Region contains the optional bucket region
*/
region?: string;
/**
* SecretKeySecret is the secret selector to the bucket's secret key
*/
secretKeySecret: kubernetes.SecretKeySelector;
}
/**
* Script is a template subtype to enable scripting through code steps
*/
export interface Script {
/**
* Command is the interpreter coommand to run (e.g. [python])
*/
command: string[];
/**
* Image is the container image to run
*/
image: string;
/**
* Source contains the source code of the script to execute
*/
source: string;
}
/**
* Sidecar is a container which runs alongside the main container
*/
export interface Sidecar {
/**
* Arguments to the entrypoint. The docker image's CMD is used if this is not provided.
* Variable references $(VAR_NAME) are expanded using the container's environment.
* If a variable cannot be resolved, the reference in the input string will be unchanged.
* The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME).
* Escaped references will never be expanded, regardless of whether the variable exists or not.
* Cannot be updated.
*/
args?: string[];
/**
* Entrypoint array. Not executed within a shell. The docker image's
* ENTRYPOINT is used if this is not provided. Variable references
* $(VAR_NAME) are expanded using the container's environment. If a
* variable cannot be resolved, the reference in the input string will be
* unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie:
* $$(VAR_NAME). Escaped references will never be expanded, regardless of
* whether the variable exists or not. Cannot be updated.
*
*/
command?: string[];
/**
* List of environment variables to set in the container. Cannot be updated.
*/
env?: kubernetes.EnvVar[];
/**
* List of sources to populate environment variables in the container. The
* keys defined within a source must be a C_IDENTIFIER. All invalid keys
* will be reported as an event when the container is starting. When a key
* exists in multiple sources, the value associated with the last source
* will take precedence. Values defined by an Env with a duplicate key will
* take precedence. Cannot be updated.
*/
envFrom?: kubernetes.EnvFromSource[];
/**
* Docker image name.
*/
image?: string;
/**
* Image pull policy. One of Always, Never, IfNotPresent. Defaults to
* Always if :latest tag is specified, or IfNotPresent otherwise.
*/
imagePullPolicy?: string;
/**
* Actions that the management system should take in response to container
* lifecycle events. Cannot be updated.
*/
lifecycle?: kubernetes.Lifecycle;
/**
* Periodic probe of container liveness. Container will be restarted if the probe fails.
* Cannot be updated.
*/
livenessProbe?: kubernetes.Probe;
/**
* MirrorVolumeMounts will mount the same volumes specified in the main
* container to the sidecar (including artifacts), at the same mountPaths.
* This enables dind daemon to partially see the same filesystem as the
* main container in order to use features such as docker volume binding
*/
mirrorVolumeMounts?: boolean;
/**
* Name of the container specified as a DNS_LABEL. Each container in a pod
* must have a unique name (DNS_LABEL). Cannot be updated.
*/
name: string;
/**
* List of ports to expose from the container. Exposing a port here gives
* the system additional information about the network connections a
* container uses, but is primarily informational. Not specifying a port
* here DOES NOT prevent that port from being exposed. Any port which is
* listening on the default \"0.0.0.0\" address inside a container will be
* accessible from the network. Cannot be updated.
*/
ports?: kubernetes.ContainerPort[];
/**
* Periodic probe of container service readiness. Container will be removed
* from service endpoints if the probe fails.
*/
readinessProbe?: kubernetes.Probe;
/**
* Compute Resources required by this container. Cannot be updated.
*/
resources?: kubernetes.ResourceRequirements;
/**
* Security options the pod should run with.
*/
securityContext?: kubernetes.SecurityContext;
/**
* Whether this container should allocate a buffer for stdin in the
* container runtime. If this is not set, reads from stdin in the container
* will always result in EOF. Default is false.
*/
stdin?: boolean;
/**
* Whether the container runtime should close the stdin channel after it
* has been opened by a single attach. When stdin is true the stdin stream
* will remain open across multiple attach sessions. If stdinOnce is set to
* true, stdin is opened on container start, is empty until the first
* client attaches to stdin, and then remains open and accepts data until
* the client disconnects, at which time stdin is closed and remains closed
* until the container is restarted. If this flag is false, a container
* processes that reads from stdin will never receive an EOF. Default is
* false
*/
stdinOnce?: boolean;
/**
* Optional: Path at which the file to which the container's termination
* message will be written is mounted into the container's filesystem.
* Message written is intended to be brief final status, such as an
* assertion failure message. Will be truncated by the node if greater than
* 4096 bytes. The total message length across all containers will be
* limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.
*/
terminationMessagePath?: string;
/**
* Indicate how the termination message should be populated. File will use
* the contents of terminationMessagePath to populate the container status
* message on both success and failure. FallbackToLogsOnError will use the
* last chunk of container log output if the termination message file is
* empty and the container exited with an error. The log output is limited
* to 2048 bytes or 80 lines, whichever is smaller. Defaults to File.
* Cannot be updated.
*/
terminationMessagePolicy?: string;
/**
* Whether this container should allocate a TTY for itself, also requires
* 'stdin' to be true. Default is false.
*/
tty?: boolean;
/**
* volumeDevices is the list of block devices to be used by the container.
* This is an alpha feature and may change in the future.
*/
volumeDevices?: kubernetes.VolumeDevice[];
/**
* Pod volumes to mount into the container's filesystem. Cannot be updated.
*/
volumeMounts?: kubernetes.VolumeMount[];
/**
* Container's working directory. If not specified, the container runtime's
* default will be used, which might be configured in the container image.
* Cannot be updated.
*/
workingDir?: string;
}
/**
* SidecarOptions provide a way to customize the behavior of a sidecar and how
* it affects the main container.
*/
export interface SidecarOptions {
/**
* MirrorVolumeMounts will mount the same volumes specified in the main
* container to the sidecar (including artifacts), at the same mountPaths.
* This enables dind daemon to partially see the same filesystem as the
* main container in order to use features such as docker volume binding
*/
mirrorVolumeMounts?: boolean;
}
/**
* Template is a reusable and composable unit of execution in a workflow
*/
export interface Template {
/**
* Optional duration in seconds relative to the StartTime that the pod may
* be active on a node before the system actively tries to terminate the
* pod; value must be positive integer This field is only applicable to
* container and script templates.
*/
activeDeadlineSeconds?: number;
/**
* Affinity sets the pod's scheduling constraints Overrides the affinity
* set at the workflow level (if any)
*/
affinity?: kubernetes.Affinity;
/**
* Location in which all files related to the step will be stored (logs,
* artifacts, etc...). Can be overridden by individual items in Outputs. If
* omitted, will use the default artifact repository location configured in
* the controller, appended with the <workflowname>/<nodename> in the key.
*/
archiveLocation?: ArtifactLocation;
/**
* Container is the main container image to run in the pod
*/
container?: kubernetes.Container;
/**
* Deamon will allow a workflow to proceed to the next step so long as the
* container reaches readiness
*/
daemon?: boolean;
/**
* Inputs describe what inputs parameters and artifacts are supplied to this template
*/
inputs?: Inputs;
/**
* Metdata sets the pods's metadata, i.e. annotations and labels
*/
metadata?: Metadata;
/**
* Name is the name of the template
*/
name: string;
/**
* NodeSelector is a selector to schedule this step of the workflow to be
* run on the selected node(s). Overrides the selector set at the workflow
* level.
*/
nodeSelector?: { [key: string]: string };
/**
* Outputs describe the parameters and artifacts that this template produces
*/
outputs?: Outputs;
/**
* Resource template subtype which can run k8s resources
*/
resource?: ResourceTemplate;
/**
* RetryStrategy describes how to retry a template when it fails
*/
retryStrategy?: RetryStrategy;
/**
* Script runs a portion of code against an interpreter
*/
script?: Script;
/**
* Sidecars is a list of containers which run alongside the main container
* Sidecars are automatically killed when the main container completes
*/
sidecars?: Sidecar[];
/**
* Steps define a series of sequential/parallel workflow steps
*/
steps?: WorkflowStep[][];
/**
* DAG template
*/
dag: DAGTemplate;
}
/**
* ValueFrom describes a location in which to obtain the value to a parameter
*/
export interface ValueFrom {
/**
* JQFilter expression against the resource object in resource templates
*/
jqFilter?: string;
/**
* JSONPath of a resource to retrieve an output parameter value from in resource templates
*/
jsonPath?: string;
/**
* Parameter reference to a step or dag task in which to retrieve an output
* parameter value from (e.g. '{{steps.mystep.outputs.myparam}}')
*/
parameter?: string;
/**
* Path in the container to retrieve an output parameter value from in container templates
*/
path?: string;
}
/**
* Workflow is the definition of a workflow resource
*/
export interface Workflow {
/**
* APIVersion defines the versioned schema of this representation of an object.
* Servers should convert recognized schemas to the latest internal value,
* and may reject unrecognized values.
*/
apiVersion?: string;
/**
* Kind is a string value representing the REST resource this object
* represents. Servers may infer this from the endpoint the client submits
* requests to.
* Cannot be updated. In CamelCase.
*/
kind?: string;
metadata: kubernetes.ObjectMeta;
spec: WorkflowSpec;
status: WorkflowStatus;
}
export type NodeType = 'Pod' | 'Steps' | 'StepGroup' | 'TaskGroup' | 'DAG' | 'Retry' | 'Skipped';
export interface NodeStatus {
/**
* ID is a unique identifier of a node within the worklow
* It is implemented as a hash of the node name, which makes the ID deterministic
*/
id: string;
/**
* Display name is a human readable representation of the node. Unique
* within a template boundary
*/
displayName: string;
/**
* Name is unique name in the node tree used to generate the node ID
*/
name: string;
/**
* Type indicates type of node
*/
type: NodeType;
/**
* Phase a simple, high-level summary of where the node is in its lifecycle.
* Can be used as a state machine.
*/
phase: NodePhase;
/**
* BoundaryID indicates the node ID of the associated template root node in
* which this node belongs to
*/
boundaryID: string;
/**
* A human readable message indicating details about why the node is in this condition.
*/
message: string;
/**
* Time at which this node started.
*/
startedAt: kubernetes.Time;
/**
* Time at which this node completed.
*/
finishedAt: kubernetes.Time;
/**
* PodIP captures the IP of the pod for daemoned steps
*/
podIP: string;
/**
* Daemoned tracks whether or not this node was daemoned and need to be terminated
*/
daemoned: boolean;
retryStrategy: RetryStrategy;
/**
* Outputs captures output parameter values and artifact locations
*/
outputs: Outputs;
/**
* Children is a list of child node IDs
*/
children: string[];
/**
* OutboundNodes tracks the node IDs which are considered "outbound" nodes
* to a template invocation. For every invocation of a template, there are
* nodes which we considered as "outbound". Essentially, these are last
* nodes in the execution sequence to run, before the template is
* considered completed. These nodes are then connected as parents to a
* following step.
*
* In the case of single pod steps (i.e. container, script, resource
* templates), this list will be nil since the pod itself is already
* considered the "outbound" node. In the case of DAGs, outbound nodes are
* the "target" tasks (tasks with no children). In the case of steps,
* outbound nodes are all the containers involved in the last step group.
* NOTE: since templates are composable, the list of outbound nodes are
* carried upwards when a DAG/steps template invokes another DAG/steps
* template. In other words, the outbound nodes of a template, will be a
* superset of the outbound nodes of its last children.
*/
outboundNodes: string[];
/**
* TemplateName is the template name which this node corresponds to. Not
* applicable to virtual nodes (e.g. Retry, StepGroup)
*/
templateName: string;
/**
* Inputs captures input parameter values and artifact locations supplied
* to this template invocation
*/
inputs: Inputs;
}
export interface WorkflowStatus {
/**
* Phase a simple, high-level summary of where the workflow is in its lifecycle.
*/
phase: NodePhase;
startedAt: kubernetes.Time;
finishedAt: kubernetes.Time;
/**
* A human readable message indicating details about why the workflow is in this condition.
*/
message: string;
/**
* Nodes is a mapping between a node ID and the node's status.
*/
nodes: { [nodeId: string]: NodeStatus };
/**
* PersistentVolumeClaims tracks all PVCs that were created as part of the workflow.
* The contents of this list are drained at the end of the workflow.
*/
persistentVolumeClaims: kubernetes.Volume[];
}
/**
* WorkflowList is list of Workflow resources
*/
export interface WorkflowList {
/**
* APIVersion defines the versioned schema of this representation of an object.
* Servers should convert recognized schemas to the latest internal value,
* and may reject unrecognized values.
*/
apiVersion?: string;
items: Workflow[];
/**
* Kind is a string value representing the REST resource this object represents.
* Servers may infer this from the endpoint the client submits requests to.
*/
kind?: string;
metadata: kubernetes.ListMeta;
}
/**
* WorkflowSpec is the specification of a Workflow.
*/
export interface WorkflowSpec {
/**
* Affinity sets the scheduling constraints for all pods in the workflow.
* Can be overridden by an affinity specified in the template
*/
affinity?: kubernetes.Affinity;
/**
* Arguments contain the parameters and artifacts sent to the workflow
* entrypoint.
* Parameters are referencable globally using the 'workflow' variable
* prefix. e.g. {{workflow.parameters.myparam}}
*/
arguments?: Arguments;
/**
* Entrypoint is a template reference to the starting point of the workflow
*/
entrypoint: string;
/**
* ImagePullSecrets is a list of references to secrets in the same
* namespace to use for pulling any images in pods that reference this
* ServiceAccount.
* ImagePullSecrets are distinct from Secrets because Secrets can be
* mounted in the pod, but ImagePullSecrets are only accessed by the
* kubelet.
*/
imagePullSecrets?: kubernetes.LocalObjectReference[];
/**
* NodeSelector is a selector which will result in all pods of the workflow
* to be scheduled on the selected node(s).
* This is able to be overridden by a nodeSelector specified in the template.
*/
nodeSelector?: { [key: string]: string };
/**
* OnExit is a template reference which is invoked at the end of the
* workflow, irrespective of the success, failure, or error of the primary
* workflow.
*/
onExit?: string;
/**
* ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as.
*/
serviceAccountName?: string;
/**
* Templates is a list of workflow templates used in a workflow
*/
templates: Template[];
/**
* VolumeClaimTemplates is a list of claims that containers are allowed to reference.
* The Workflow controller will create the claims at the beginning of the
* workflow and delete the claims upon completion of the workflow
*/
volumeClaimTemplates?: kubernetes.PersistentVolumeClaim[];
/**
* Volumes is a list of volumes that can be mounted by containers in a workflow.
*/
volumes?: kubernetes.Volume[];
}
export interface DAGTemplate {
/**
* Target are one or more names of targets to execute in a DAG
*/
targets: string;
/**
* Tasks are a list of DAG tasks
*/
tasks: DAGTask[];
}
export interface DAGTask {
name: string;
/**
* Name of template to execute
*/
template: string;
/**
* Arguments are the parameter and artifact arguments to the template
*/
arguments: Arguments;
/**
* Dependencies are name of other targets which this depends on
*/
dependencies: string[];
// TODO: This exists in https://github.com/argoproj/argo/blob/master/api/openapi-spec/swagger.json
// but not in https://github.com/argoproj/argo-ui/blob/master/src/models/workflows.ts
// Perhaps we should generate this definition file from the swagger?
/**
* When is an expression in which the task should conditionally execute
*/
when?: string;
}
/**
* WorkflowStep is a reference to a template to execute in a series of step
*/
export interface WorkflowStep {
/**
* Arguments hold arguments to the template
*/
arguments?: Arguments;
/**
* Name of the step
*/
name?: string;
/**
* Template is a reference to the template to execute as the step
*/
template?: string;
/**
* When is an expression in which the step should conditionally execute
*/
when?: string;
/**
* WithParam expands a step into from the value in the parameter
*/
withParam?: string;
}
export type NodePhase = 'Pending' | 'Running' | 'Succeeded' | 'Skipped' | 'Failed' | 'Error';
export const NODE_PHASE = {
ERROR: 'Error',
FAILED: 'Failed',
PENDING: 'Pending',
RUNNING: 'Running',
SKIPPED: 'Skipped',
SUCCEEDED: 'Succeeded',
};
| 8,003 |
0 | kubeflow_public_repos/kfp-tekton-backend/frontend/third_party | kubeflow_public_repos/kfp-tekton-backend/frontend/third_party/argo-ui/kubernetes.ts | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
export type Time = string;
export type VolumeDevice = any;
export type Volume = any;
export type EnvFromSource = any;
export type EnvVarSource = any;
export type ResourceRequirements = any;
export type Probe = any;
export type Lifecycle = any;
export type TerminationMessagePolicy = any;
export type PullPolicy = any;
export type SecurityContext = any;
export type PersistentVolumeClaim = any;
export type Affinity = any;
export interface VolumeMount {
name: string;
mountPath?: string;
}
export interface ListMeta {
_continue?: string;
resourceVersion?: string;
selfLink?: string;
}
export interface ObjectMeta {
name?: string;
generateName?: string;
namespace?: string;
selfLink?: string;
uid?: string;
resourceVersion?: string;
generation?: number;
creationTimestamp?: Time;
deletionTimestamp?: Time;
deletionGracePeriodSeconds?: number;
labels?: { [name: string]: string };
annotations?: { [name: string]: string };
ownerReferences?: any[];
initializers?: any;
finalizers?: string[];
clusterName?: string;
}
export interface TypeMeta {
kind: string;
apiVersion: string;
}
export interface LocalObjectReference {
name: string;
}
export interface SecretKeySelector extends LocalObjectReference {
key: string;
optional: boolean;
}
export interface ContainerPort {
name: string;
hostPort: number;
containerPort: number;
protocol: string;
hostIP: string;
}
export interface EnvVar {
name: string;
value: string;
valueFrom: EnvVarSource;
}
export interface Container {
name: string;
image: string;
command: string[];
args: string[];
workingDir: string;
ports: ContainerPort[];
envFrom: EnvFromSource[];
env: EnvVar[];
resources: ResourceRequirements;
volumeMounts: VolumeMount[];
livenessProbe: Probe;
readinessProbe: Probe;
lifecycle: Lifecycle;
terminationMessagePath: string;
terminationMessagePolicy: TerminationMessagePolicy;
imagePullPolicy: PullPolicy;
securityContext: SecurityContext;
stdin: boolean;
stdinOnce: boolean;
tty: boolean;
}
export interface WatchEvent<T> {
object: T;
type: 'ADDED' | 'MODIFIED' | 'DELETED' | 'ERROR';
}
| 8,004 |
0 | kubeflow_public_repos/kfp-tekton-backend/frontend/third_party | kubeflow_public_repos/kfp-tekton-backend/frontend/third_party/argo-ui/LICENSE |
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017-2018 The Argo Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 8,005 |
0 | kubeflow_public_repos/kfp-tekton-backend/frontend/third_party | kubeflow_public_repos/kfp-tekton-backend/frontend/third_party/jest/LICENSE | MIT License
For Jest software
Copyright (c) 2014-present, Facebook, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| 8,006 |
0 | kubeflow_public_repos/kfp-tekton-backend/frontend/third_party/@kubernetes | kubeflow_public_repos/kfp-tekton-backend/frontend/third_party/@kubernetes/client-node/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 8,007 |
0 | kubeflow_public_repos/kfp-tekton-backend/frontend/third_party | kubeflow_public_repos/kfp-tekton-backend/frontend/third_party/mamacro/LICENSE | The MIT License
Copyright (c) 2019 Sven Sauleau
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
| 8,008 |
0 | kubeflow_public_repos/kfp-tekton-backend | kubeflow_public_repos/kfp-tekton-backend/docs/conf.py | # -*- coding: utf-8 -*-
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# Initially all the files in docs were generated by the https://www.sphinx-doc.org/en/master/man/sphinx-quickstart.html tool. There is no need to run this tool again.
# The docs can now be generated using the `make html` command which calls the https://www.sphinx-doc.org/en/master/man/sphinx-build.html tool
# Afterwards I made many changes to the generated files
# Changes I made:
# conf.py: Added the package path to sys.path
# conf.py: Added extensions: sphinx.ext.autodoc, sphinx.ext.napoleon
# conf.py: Set the project information
# conf.py: Set the theme to sphinx_rtd_theme
# *.rst: Added ":imported-members:" to all automodule invocations so that the members imported in __init__.py are included
# *.rst: Reworked the files removing empty or unneeded sections
# *.rst: Manually split out some modules and classes into separate pages: kfp.Client, kfp.extensions
# *.rst: Fully reworked the kfp.rst and index.rst pages
# When SDK code changes are submitted to master, the GitHub sends a signal to ReadTheDocs using a webhook.
# RTD automatically pulls the branch and generates the documentation at https://kf-pipelines.readthedocs.io
import os
import sys
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../sdk/python/'))
# -- Project information -----------------------------------------------------
project = 'Kubeflow Pipelines'
copyright = '2019, Google'
author = 'Google'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
except ImportError:
pass
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'KubeflowPipelinesdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'KubeflowPipelines.tex', 'Kubeflow Pipelines Documentation',
'Google', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'kubeflowpipelines', 'Kubeflow Pipelines Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'KubeflowPipelines', 'Kubeflow Pipelines Documentation',
author, 'KubeflowPipelines', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| 8,009 |
0 | kubeflow_public_repos/kfp-tekton-backend | kubeflow_public_repos/kfp-tekton-backend/docs/Makefile | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) | 8,010 |
0 | kubeflow_public_repos/kfp-tekton-backend | kubeflow_public_repos/kfp-tekton-backend/docs/make.bat | REM Copyright 2019 Google LLC
REM
REM Licensed under the Apache License, Version 2.0 (the "License");
REM you may not use this file except in compliance with the License.
REM You may obtain a copy of the License at
REM
REM http://www.apache.org/licenses/LICENSE-2.0
REM
REM Unless required by applicable law or agreed to in writing, software
REM distributed under the License is distributed on an "AS IS" BASIS,
REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
REM See the License for the specific language governing permissions and
REM limitations under the License.
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=.
set BUILDDIR=_build
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
:end
popd
| 8,011 |
0 | kubeflow_public_repos/kfp-tekton-backend | kubeflow_public_repos/kfp-tekton-backend/docs/_config.yml | theme: jekyll-theme-minimal | 8,012 |
0 | kubeflow_public_repos/kfp-tekton-backend | kubeflow_public_repos/kfp-tekton-backend/docs/index.rst | .. Kubeflow Pipelines documentation master file, created by
sphinx-quickstart on Wed Mar 27 22:34:54 2019.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to Kubeflow Pipelines SDK API reference
================================================
Main documentation: https://www.kubeflow.org/docs/pipelines/
Source code: https://github.com/kubeflow/pipelines/
.. toctree::
:maxdepth: 3
:caption: Contents:
source/kfp
.. * :ref:`modindex`
.. * :ref:`kfp-ref`
.. * :ref:`search`
* :ref:`genindex`
| 8,013 |
0 | kubeflow_public_repos/kfp-tekton-backend/docs | kubeflow_public_repos/kfp-tekton-backend/docs/source/kfp.components.structures.rst | kfp.components.structures package
=================================
.. automodule:: kfp.components.structures
:members:
:undoc-members:
:show-inheritance:
:imported-members:
.. toctree::
kfp.components.structures.kubernetes
| 8,014 |
0 | kubeflow_public_repos/kfp-tekton-backend/docs | kubeflow_public_repos/kfp-tekton-backend/docs/source/kfp.containers.rst | kfp.containers package
======================
.. automodule:: kfp.containers
:members:
:undoc-members:
:show-inheritance:
:imported-members:
| 8,015 |
0 | kubeflow_public_repos/kfp-tekton-backend/docs | kubeflow_public_repos/kfp-tekton-backend/docs/source/kfp.notebook.rst | kfp.notebook package
====================
.. automodule:: kfp.notebook
:members:
:undoc-members:
:show-inheritance:
:imported-members:
| 8,016 |
0 | kubeflow_public_repos/kfp-tekton-backend/docs | kubeflow_public_repos/kfp-tekton-backend/docs/source/kfp.dsl.rst | kfp.dsl package
===============
.. automodule:: kfp.dsl
:members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: Pipeline, OpsGroup, match_serialized_pipelineparam
.. toctree::
kfp.dsl.types
| 8,017 |
0 | kubeflow_public_repos/kfp-tekton-backend/docs | kubeflow_public_repos/kfp-tekton-backend/docs/source/kfp.components.rst | kfp.components package
======================
.. automodule:: kfp.components
:members:
:undoc-members:
:show-inheritance:
:imported-members:
kfp.components.structures subpackage
-------------------------------------
.. toctree::
kfp.components.structures
| 8,018 |
0 | kubeflow_public_repos/kfp-tekton-backend/docs | kubeflow_public_repos/kfp-tekton-backend/docs/source/kfp.compiler.rst | kfp.compiler package
====================
.. automodule:: kfp.compiler
:members:
:undoc-members:
:show-inheritance:
:imported-members:
| 8,019 |
0 | kubeflow_public_repos/kfp-tekton-backend/docs | kubeflow_public_repos/kfp-tekton-backend/docs/source/kfp.extensions.rst | KFP extension modules
========================
kfp.onprem module
-----------------
.. automodule:: kfp.onprem
:members:
:undoc-members:
:show-inheritance:
kfp.gcp module
--------------
.. automodule:: kfp.gcp
:members:
:undoc-members:
:show-inheritance:
kfp.aws module
----------------
.. automodule:: kfp.aws
:members:
:undoc-members:
:show-inheritance:
kfp.azure module
----------------
.. automodule:: kfp.azure
:members:
:undoc-members:
:show-inheritance:
| 8,020 |
0 | kubeflow_public_repos/kfp-tekton-backend/docs | kubeflow_public_repos/kfp-tekton-backend/docs/source/modules.rst | kfp
===
.. toctree::
:maxdepth: 4
kfp
| 8,021 |
0 | kubeflow_public_repos/kfp-tekton-backend/docs | kubeflow_public_repos/kfp-tekton-backend/docs/source/kfp.dsl.types.rst | kfp.dsl.types module
--------------------
.. automodule:: kfp.dsl.types
:members:
:undoc-members:
:show-inheritance:
| 8,022 |
0 | kubeflow_public_repos/kfp-tekton-backend/docs | kubeflow_public_repos/kfp-tekton-backend/docs/source/kfp.components.structures.kubernetes.rst | kfp.components.structures.kubernetes package
============================================
kfp.components.structures.kubernetes.v1 module
----------------------------------------------
.. automodule:: kfp.components.structures.kubernetes.v1
:members:
:undoc-members:
:show-inheritance:
.. Empty
Module contents
---------------
.. automodule:: kfp.components.structures.kubernetes
:members:
:undoc-members:
:show-inheritance:
| 8,023 |
0 | kubeflow_public_repos/kfp-tekton-backend/docs | kubeflow_public_repos/kfp-tekton-backend/docs/source/kfp.rst | .. _kfp-ref:
kfp package
===========
.. toctree::
:maxdepth: 2
kfp.compiler
kfp.components
kfp.containers
kfp.dsl
kfp.client
kfp.notebook
kfp.extensions
.. automodule:: kfp
:members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: Client
| 8,024 |
0 | kubeflow_public_repos/kfp-tekton-backend/docs | kubeflow_public_repos/kfp-tekton-backend/docs/source/kfp.client.rst | kfp.Client class
------------------
.. autoclass:: kfp.Client
:members:
:undoc-members:
:show-inheritance:
| 8,025 |
0 | kubeflow_public_repos/kfp-tekton-backend | kubeflow_public_repos/kfp-tekton-backend/proxy/attempt-register-vm-on-proxy.sh | #!/bin/bash
#
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null && pwd)"
function run-proxy-agent {
# Start the proxy process
# https://github.com/google/inverting-proxy/blob/master/agent/Dockerfile
# Connect proxy agent to Kubeflow Pipelines UI
/opt/bin/proxy-forwarding-agent \
--debug=${DEBUG} \
--proxy=${PROXY_URL} \
--proxy-timeout=${PROXY_TIMEOUT} \
--backend=${BACKEND_ID} \
--host=${ML_PIPELINE_UI_SERVICE_HOST}:${ML_PIPELINE_UI_SERVICE_PORT} \
--shim-websockets=true \
--shim-path=websocket-shim \
--health-check-path=${HEALTH_CHECK_PATH} \
--health-check-interval-seconds=${HEALTH_CHECK_INTERVAL_SECONDS} \
--health-check-unhealthy-threshold=${HEALTH_CHECK_UNHEALTHY_THRESHOLD}
}
# Don't reuse existing hostname. It means if proxy-agent got restarted,
# it will get a new hostname.
# https://github.com/kubeflow/pipelines/issues/3143
# Another option is that We may try to fix it in InverseProxy server side.
# Activate service account for gcloud SDK first
if [[ ! -z "${GOOGLE_APPLICATION_CREDENTIALS}" ]]; then
gcloud auth activate-service-account --key-file="${GOOGLE_APPLICATION_CREDENTIALS}"
fi
INSTANCE_ZONE="/"$(curl http://metadata.google.internal/computeMetadata/v1/instance/zone -H "Metadata-Flavor: Google")
INSTANCE_ZONE="${INSTANCE_ZONE##/*/}"
# Get latest Proxy server URL
wget https://storage.googleapis.com/ml-pipeline/proxy-agent-config.json
PROXY_URL=$(python ${DIR}/get_proxy_url.py --config-file-path "proxy-agent-config.json" --location "${INSTANCE_ZONE}" --version "latest")
if [[ -z "${PROXY_URL}" ]]; then
echo "Proxy URL for the zone ${INSTANCE_ZONE} no found, exiting."
exit 1
fi
echo "Proxy URL from the config: ${PROXY_URL}"
# Register the proxy agent
VM_ID=$(curl -H 'Metadata-Flavor: Google' "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/identity?format=full&audience=${PROXY_URL}/request-service-account-endpoint" 2>/dev/null)
RESULT_JSON=$(curl -H "Authorization: Bearer $(gcloud auth print-access-token)" -H "X-Inverting-Proxy-VM-ID: ${VM_ID}" -d "" "${PROXY_URL}/request-service-account-endpoint" 2>/dev/null)
echo "Response from the registration server: ${RESULT_JSON}"
HOSTNAME=$(echo "${RESULT_JSON}" | jq -r ".hostname")
BACKEND_ID=$(echo "${RESULT_JSON}" | jq -r ".backendID")
echo "Hostname: ${HOSTNAME}"
echo "Backend id: ${BACKEND_ID}"
# Store the registration information in a ConfigMap
PATCH_TEMP='{"data": {"Hostname":"'${HOSTNAME}'","ProxyUrl":"'${PROXY_URL}'","BackendId":"'${BACKEND_ID}'"}}'
PATCH_JSON=$(printf "${PATCH_TEMP}" "${HOSTNAME}" "${PROXY_URL}" "${BACKEND_ID}")
echo "PACTH_JSON: ${PATCH_JSON}"
kubectl patch configmap/inverse-proxy-config \
--type merge \
--patch "${PATCH_JSON}"
# Patch again as catched case that it actually didn't merge successfully but no error.
# It looks like a bug of K8s master or configmap is not ready when run here.
# Here sleep for 20 seconds and patch it again to mitigate the problem first.
sleep 20
kubectl patch configmap/inverse-proxy-config \
--type merge \
--patch "${PATCH_JSON}"
echo "Patched configmap/inverse-proxy-config"
run-proxy-agent
| 8,026 |
0 | kubeflow_public_repos/kfp-tekton-backend | kubeflow_public_repos/kfp-tekton-backend/proxy/Dockerfile | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Pin to a specific version of invert proxy agent
FROM gcr.io/inverting-proxy/agent@sha256:d0a06a247bb443f9528356a1341cadfa4c4479a034097ef9ed8cf200c6383ec0
RUN apt-get update && apt-get install -y curl jq python-pip
RUN pip install requests
RUN curl https://raw.githubusercontent.com/requests/requests/master/LICENSE --output /opt/license.txt
# Install gcloud SDK
RUN curl https://dl.google.com/dl/cloudsdk/release/google-cloud-sdk.tar.gz > /tmp/google-cloud-sdk.tar.gz
RUN mkdir -p /usr/local/gcloud
RUN tar -C /usr/local/gcloud -xf /tmp/google-cloud-sdk.tar.gz
RUN /usr/local/gcloud/google-cloud-sdk/install.sh
ENV PATH $PATH:/usr/local/gcloud/google-cloud-sdk/bin
# Install kubectl
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
RUN chmod +x ./kubectl
RUN mv kubectl /usr/local/bin/
ADD ./ /opt/proxy
CMD ["/bin/sh", "-c", "/opt/proxy/attempt-register-vm-on-proxy.sh"]
| 8,027 |
0 | kubeflow_public_repos/kfp-tekton-backend | kubeflow_public_repos/kfp-tekton-backend/proxy/get_proxy_url_test.py | #!/usr/bin/env python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
from get_proxy_url import urls_for_zone
url_map_json = """
{
"us": ["https://datalab-us-west1.cloud.google.com"],
"us-west1": ["https://datalab-us-west1.cloud.google.com"],
"us-east1": ["https://datalab-us-east1.cloud.google.com"]
}
"""
class TestUrlsForZone(unittest.TestCase):
def test_get_urls(self):
self.assertEqual(
set(["https://datalab-us-east1.cloud.google.com","https://datalab-us-west1.cloud.google.com"]),
urls_for_zone("us-east1-a",json.loads(url_map_json)))
def test_get_urls_no_match(self):
self.assertEqual(set([]), urls_for_zone("euro-west1-a",json.loads(url_map_json)))
def test_get_urls_incorrect_format(self):
with self.assertRaises(ValueError):
urls_for_zone("weird-format-a",json.loads(url_map_json))
if __name__ == '__main__':
unittest.main() | 8,028 |
0 | kubeflow_public_repos/kfp-tekton-backend | kubeflow_public_repos/kfp-tekton-backend/proxy/get_proxy_url.py | #!/usr/bin/env python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CLI tool that returns URL of the proxy for particular zone and version."""
import argparse
import functools
import json
import logging
import re
import requests
try:
unicode
except NameError:
unicode = str
def urls_for_zone(zone, location_to_urls_map):
"""Returns list of potential proxy URLs for a given zone.
Returns:
List of possible URLs, in order of proximity.
Args:
zone: GCP zone
location_to_urls_map: Maps region/country/continent to list of URLs, e.g.:
{
"us-west1" : [ us-west1-url ],
"us-east1" : [ us-east1-url ],
"us" : [ us-west1-url ],
...
}
"""
zone_match = re.match("((([a-z]+)-[a-z]+)\d+)-[a-z]", zone)
if not zone_match:
raise ValueError("Incorrect zone specified: {}".format(zone))
# e.g. zone = us-west1-b
region = zone_match.group(1) # us-west1
approx_region = zone_match.group(2) # us-west
country = zone_match.group(3) # us
urls = []
if region in location_to_urls_map:
urls.extend(location_to_urls_map[region])
region_regex = re.compile("([a-z]+-[a-z]+)\d+")
for location in location_to_urls_map:
region_match = region_regex.match(location)
if region_match and region_match.group(1) == approx_region:
urls.extend(location_to_urls_map[location])
if country in location_to_urls_map:
urls.extend(location_to_urls_map[country])
return set(urls)
def main():
unicode_type = functools.partial(unicode, encoding="utf8")
parser = argparse.ArgumentParser(
description="Get proxy URL")
parser.add_argument("--config-file-path", required=True, type=unicode_type)
parser.add_argument("--location", required=True, type=unicode_type)
parser.add_argument("--version", required=True, type=unicode_type)
args = parser.parse_args()
with open(args.config_file_path, "r") as config_file:
data = json.loads(config_file.read())
agent_containers_config = data["agent-docker-containers"]
version = args.version
if version not in agent_containers_config:
version = "latest"
if version not in agent_containers_config:
raise ValueError("Version latest not found in the config file.")
container_config = agent_containers_config[version]
regional_urls = container_config["proxy-urls"]
location = args.location
urls = urls_for_zone(location, regional_urls)
if not urls:
raise ValueError("No valid URLs found for zone: {}".format(location))
for url in urls:
try:
status_code = requests.head(url).status_code
except requests.ConnectionError:
pass
expected_codes = frozenset([307])
# 307 - Temporary Redirect, Proxy server sends this if VM has access rights.
if status_code in expected_codes:
logging.debug("Status code from the url %s", status_code)
print(url)
exit(0)
logging.debug("Incorrect status_code from the server: %s. Expected: %s",
status_code, expected_codes)
raise ValueError("No working URL found")
if __name__ == '__main__':
main() | 8,029 |
0 | kubeflow_public_repos/kfp-tekton-backend | kubeflow_public_repos/kfp-tekton-backend/components/license.sh | #!/bin/bash -e
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A script to gather locally-installed python packages, and based on
# a specified license table (3 columns: name,license_link,license_type) csv file,
# download and save all license files into specified directory.
# Usage:
# license.sh third_party_licenses.csv /usr/licenses
# Get the list of python packages installed locally.
IFS=$'\n'
INSTALLED_PACKAGES=($(pip freeze | sed s/=.*//))
# Get the list of python packages tracked in the given CSV file.
REGISTERED_PACKAGES=()
while IFS=, read -r col1 col2 col3
do
REGISTERED_PACKAGES+=($col1)
done < $1
# Make sure all locally installed packages are covered.
DIFF=()
for i in "${INSTALLED_PACKAGES[@]}"; do
skip=
for j in "${REGISTERED_PACKAGES[@]}"; do
[[ $i == $j ]] && { skip=1; break; }
done
[[ -n $skip ]] || DIFF+=("$i")
done
if [ -n "$DIFF" ]; then
echo "The following packages are not found for licenses tracking."
echo "Please add an entry in $1 for each of them."
echo ${DIFF[@]}
exit 1
fi
# Gather license files for each package. For packages with GPL license we mirror the source code.
mkdir -p $2/source
while IFS=, read -r col1 col2 col3
do
if [[ " ${INSTALLED_PACKAGES[@]} " =~ " ${col1} " ]]; then
wget -O $2/$col1.LICENSE $col2
if [[ "${col3}" == *GPL* ]]; then
pip install -t "$2/source/${col1}" ${col1}
fi
fi
done < $1
| 8,030 |
0 | kubeflow_public_repos/kfp-tekton-backend | kubeflow_public_repos/kfp-tekton-backend/components/README.md | # Kubeflow pipeline components
Kubeflow pipeline components are implementations of Kubeflow pipeline tasks. Each task takes
one or more [artifacts](https://www.kubeflow.org/docs/pipelines/overview/concepts/output-artifact/)
as input and may produce one or more
[artifacts](https://www.kubeflow.org/docs/pipelines/overview/concepts/output-artifact/) as output.
**Example: XGBoost DataProc components**
* [Set up cluster](deprecated/dataproc/create_cluster/src/create_cluster.py)
* [Analyze](deprecated/dataproc/analyze/src/analyze.py)
* [Transform](deprecated/dataproc/transform/src/transform.py)
* [Distributed train](deprecated/dataproc/train/src/train.py)
* [Delete cluster](deprecated/dataproc/delete_cluster/src/delete_cluster.py)
Each task usually includes two parts:
``Client code``
The code that talks to endpoints to submit jobs. For example, code to talk to Google
Dataproc API to submit a Spark job.
``Runtime code``
The code that does the actual job and usually runs in the cluster. For example, Spark code
that transforms raw data into preprocessed data.
``Container``
A container image that runs the client code.
Note the naming convention for client code and runtime code—for a task named "mytask":
* The `mytask.py` program contains the client code.
* The `mytask` directory contains all the runtime code.
See how to [use the Kubeflow Pipelines SDK](https://www.kubeflow.org/docs/pipelines/sdk/sdk-overview/)
and [build your own components](https://www.kubeflow.org/docs/pipelines/sdk/build-component/).
| 8,031 |
0 | kubeflow_public_repos/kfp-tekton-backend | kubeflow_public_repos/kfp-tekton-backend/components/build_image.sh | #!/bin/bash -e
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
while getopts ":hp:t:i:l:" opt; do
case "${opt}" in
h) echo "-p: project name"
echo "-t: tag name"
echo "-i: image name. If provided, project name and tag name are not necessary"
echo "-l: local image name."
exit
;;
p) PROJECT_ID=${OPTARG}
;;
t) TAG_NAME=${OPTARG}
;;
i) IMAGE_NAME=${OPTARG}
;;
l) LOCAL_IMAGE_NAME=${OPTARG}
;;
\? ) echo "Usage: cmd [-p] project [-t] tag [-i] image [-l] local image"
exit
;;
esac
done
if [ -z "${PROJECT_ID}" ]; then
PROJECT_ID=$(gcloud config config-helper --format "value(configuration.properties.core.project)")
fi
if [ -z "${TAG_NAME}" ]; then
TAG_NAME=$(date +v%Y%m%d)-$(git describe --tags --always --dirty)-$(git diff | shasum -a256 | cut -c -6)
fi
if [ -z "${IMAGE_NAME}" ]; then
docker pull gcr.io/${PROJECT_ID}/${LOCAL_IMAGE_NAME}:latest || true
fi
docker build -t ${LOCAL_IMAGE_NAME} . --cache-from gcr.io/${PROJECT_ID}/${LOCAL_IMAGE_NAME}:latest
if [ -z "${IMAGE_NAME}" ]; then
docker tag ${LOCAL_IMAGE_NAME} gcr.io/${PROJECT_ID}/${LOCAL_IMAGE_NAME}:${TAG_NAME}
docker tag ${LOCAL_IMAGE_NAME} gcr.io/${PROJECT_ID}/${LOCAL_IMAGE_NAME}:latest
docker push gcr.io/${PROJECT_ID}/${LOCAL_IMAGE_NAME}:${TAG_NAME}
docker push gcr.io/${PROJECT_ID}/${LOCAL_IMAGE_NAME}:latest
else
docker tag ${LOCAL_IMAGE_NAME} "${IMAGE_NAME}"
docker push "${IMAGE_NAME}"
fi
| 8,032 |
0 | kubeflow_public_repos/kfp-tekton-backend | kubeflow_public_repos/kfp-tekton-backend/components/release.sh | #!/bin/bash
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script automated the process to release the component images.
# To run it, find a good release candidate commit SHA from ml-pipeline-test project,
# and provide a full github COMMIT SHA to the script. E.g.
# ./release.sh 2118baf752d3d30a8e43141165e13573b20d85b8
# The script copies the images from test to prod, and update the local code.
# You can then send a PR using your local branch.
set -xe
images=(
"ml-pipeline-kubeflow-deployer"
"ml-pipeline-kubeflow-tf-trainer"
"ml-pipeline-kubeflow-tf-trainer-gpu"
"ml-pipeline-kubeflow-tfjob"
"ml-pipeline-dataproc-analyze"
"ml-pipeline-dataproc-create-cluster"
"ml-pipeline-dataproc-delete-cluster"
"ml-pipeline-dataproc-predict"
"ml-pipeline-dataproc-transform"
"ml-pipeline-dataproc-train"
"ml-pipeline-local-confusion-matrix"
"ml-pipeline-local-roc"
"ml-pipeline-gcp"
)
COMMIT_SHA=$1
FROM_GCR_PREFIX='gcr.io/ml-pipeline-test/'
TO_GCR_PREFIX='gcr.io/ml-pipeline/'
REPO=kubeflow/pipelines
if [ -z "$COMMIT_SHA" ]; then
echo "Usage: release.sh <commit-SHA>" >&2
exit 1
fi
# Checking out the repo
clone_dir=$(mktemp -d)
git clone "[email protected]:${REPO}.git" "$clone_dir"
cd "$clone_dir"
branch="release-$COMMIT_SHA"
# Creating the release branch from the specified commit
release_head=$COMMIT_SHA
git checkout "$release_head" -b "$branch"
# Releasing the container images to public. Updating components and samples.
for image in "${images[@]}"
do
TARGET_IMAGE_BASE=${TO_GCR_PREFIX}${image}
TARGET_IMAGE=${TARGET_IMAGE_BASE}:${COMMIT_SHA}
# Move image from test to prod GCR
gcloud container images add-tag --quiet \
${FROM_GCR_PREFIX}${image}:${COMMIT_SHA} ${TARGET_IMAGE}
# Update the code
find components samples -type f | while read file; do sed -i -e "s|${TARGET_IMAGE_BASE}:\([a-zA-Z0-9_.-]\)\+|${TARGET_IMAGE}|g" "$file"; done
done
# Checking-in the container image changes
git add --all
git commit --message "Updated component images to version $COMMIT_SHA"
image_update_commit_sha=$(git rev-parse HEAD)
# Updating the samples to use the updated components
git diff HEAD~1 HEAD --name-only | while read component_file; do
echo $component_file
find components samples -type f | while read file; do
sed -i -E "s|(https://raw.githubusercontent.com/kubeflow/pipelines/)[^/]+(/$component_file)|\1${image_update_commit_sha}\2|g" "$file";
done
done
# Checking-in the component changes
git add --all
git commit --message "Updated components to version $image_update_commit_sha"
component_update_commit_sha=$(git rev-parse HEAD)
# Pushing the changes upstream
read -p "Do you want to push the new branch to upstream to create a PR? [y|n]"
if [ "$REPLY" != "y" ]; then
exit
fi
git push --set-upstream origin "$branch"
sensible-browser "https://github.com/${REPO}/compare/master...$branch"
| 8,033 |
0 | kubeflow_public_repos/kfp-tekton-backend | kubeflow_public_repos/kfp-tekton-backend/components/test_load_all_components.sh | #!/bin/bash -e
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script automated the process to release the component images.
# To run it, find a good release candidate commit SHA from ml-pipeline-staging project,
# and provide a full github COMMIT SHA to the script. E.g.
# ./release.sh 2118baf752d3d30a8e43141165e13573b20d85b8
# The script copies the images from staging to prod, and update the local code.
# You can then send a PR using your local branch.
cd "$(dirname "$0")"
PYTHONPATH="$PYTHONPATH:../sdk/python"
echo "Testing loading all components"
python3 -c '
import sys
import kfp
for component_file in sys.argv[1:]:
print(component_file)
kfp.components.load_component_from_file(component_file)
' $(find . -name component.yaml)
| 8,034 |
0 | kubeflow_public_repos/kfp-tekton-backend | kubeflow_public_repos/kfp-tekton-backend/components/third_party_licenses.csv | chainer,https://raw.githubusercontent.com/chainer/chainer/master/LICENSE,MIT
greenlet,https://raw.githubusercontent.com/python-greenlet/greenlet/master/LICENSE,MIT
gevent,https://raw.githubusercontent.com/gevent/gevent/master/LICENSE,MIT
msgpack,https://raw.githubusercontent.com/msgpack/msgpack/python-0.1.7/python/COPYING,Apache v2
Mako,https://raw.githubusercontent.com/zzzeek/mako/master/LICENSE,MIT
python-editor,https://raw.githubusercontent.com/fmoo/python-editor/master/LICENSE,Apache v2
tzlocal,https://raw.githubusercontent.com/regebro/tzlocal/master/LICENSE.txt,MIT
itsdangerous,https://raw.githubusercontent.com/pallets/itsdangerous/master/LICENSE.rst,3-Clause BSD
Werkzeug,https://raw.githubusercontent.com/pallets/werkzeug/master/LICENSE.rst,3-Clause BSD
click,https://raw.githubusercontent.com/pallets/click/master/LICENSE.rst,3-Clause BSD
alembic,https://raw.githubusercontent.com/zzzeek/alembic/master/LICENSE,MIT
sqlalchemy,https://raw.githubusercontent.com/zzzeek/sqlalchemy/master/LICENSE,MIT
APScheduler,https://raw.githubusercontent.com/agronholm/apscheduler/master/LICENSE.txt,MIT
Flask,https://raw.githubusercontent.com/pallets/flask/master/LICENSE,3-Clause BSD
chainerui,https://raw.githubusercontent.com/chainer/chainerui/master/LICENSE,MIT
chainercv,https://raw.githubusercontent.com/chainer/chainercv/master/LICENSE,MIT
pyglet,https://raw.githubusercontent.com/adamlwgriffiths/Pyglet/master/LICENSE,3-Clause BSD
gym,https://raw.githubusercontent.com/openai/gym/master/LICENSE.md,MIT
future,https://raw.githubusercontent.com/PythonCharmers/python-future/master/LICENSE.txt,MIT
cached-property,https://raw.githubusercontent.com/pydanny/cached-property/master/LICENSE,3-Clause BSD
chainerrl,https://raw.githubusercontent.com/chainer/chainerrl/master/LICENSE,MIT
docutils,https://raw.githubusercontent.com/docutils-mirror/docutils/master/COPYING.txt,Public Domain
statistics,https://raw.githubusercontent.com/digitalemagine/py-statistics/master/LICENSE,PSF v2
fastcache,https://raw.githubusercontent.com/pbrady/fastcache/master/LICENSE,MIT
mpi4py,https://raw.githubusercontent.com/mpi4py/mpi4py/master/LICENSE.rst,2-Clause BSD
chainermn,https://raw.githubusercontent.com/chainer/chainermn/master/LICENSE,MIT
pbr,https://raw.githubusercontent.com/openstack-dev/pbr/master/LICENSE,Apache v2
filelock,https://raw.githubusercontent.com/benediktschmitt/py-filelock/master/LICENSE.rst,Public Domain
fastrlock,https://raw.githubusercontent.com/scoder/fastrlock/master/LICENSE,MIT
mock,https://raw.githubusercontent.com/testing-cabal/mock/master/LICENSE.txt,2-Clause BSD
ideep4py,https://raw.githubusercontent.com/intel/ideep/master/LICENSE,MIT
cupy,https://raw.githubusercontent.com/cupy/cupy/master/LICENSE,MIT
ipykernel,https://raw.githubusercontent.com/ipython/ipykernel/master/COPYING.md,3-Clause BSD
ipython,https://raw.githubusercontent.com/ipython/ipython/master/LICENSE,3-Clause BSD
jedi,https://raw.githubusercontent.com/davidhalter/jedi/master/LICENSE.txt,MIT
prompt-toolkit,https://raw.githubusercontent.com/jonathanslenders/python-prompt-toolkit/master/LICENSE,3-Clause BSD
parso,https://raw.githubusercontent.com/davidhalter/parso/master/LICENSE.txt,MIT
python-dateutil,https://raw.githubusercontent.com/dateutil/dateutil/283a3a3a67245e1d5791e5478ce6ff688f2c348a/LICENSE,"Apache Software License, BSD License (Dual License)"
pexpect,https://raw.githubusercontent.com/pexpect/pexpect/ab7d99a670794fc2b0365440340b899a346422da/LICENSE,ISCL
setuptools,https://raw.githubusercontent.com/pypa/setuptools/master/LICENSE,MIT
tornado,https://raw.githubusercontent.com/tornadoweb/tornado/stable/LICENSE,Apache Software License 2.0
traitlets,https://raw.githubusercontent.com/ipython/traitlets/master/COPYING.md,BSD
subprocess32,https://raw.githubusercontent.com/google/python-subprocess32/master/LICENSE,Python Software Foundation License
jinja2,https://raw.githubusercontent.com/pallets/jinja/master/LICENSE.rst,BSD
html5lib,https://raw.githubusercontent.com/html5lib/html5lib-python/master/LICENSE,MIT
opencv-python,https://raw.githubusercontent.com/skvark/opencv-python/master/LICENSE.txt,MIT
httplib2,https://raw.githubusercontent.com/httplib2/httplib2/master/LICENSE,MIT
pygments,https://bitbucket.org/birkenfeld/pygments-main/raw/7941677dc77d4f2bf0bbd6140ade85a9454b8b80/LICENSE,BSD
pyasn1-modules,https://raw.githubusercontent.com/etingof/pyasn1-modules/master/LICENSE.txt,BSD
rsa,https://raw.githubusercontent.com/sybrenstuvel/python-rsa/master/LICENSE,Apache Software License (ASL 2)
pyasn1,https://raw.githubusercontent.com/etingof/pyasn1/master/LICENSE.rst,BSD
jupyter-http-over-ws,https://raw.githubusercontent.com/googlecolab/jupyter_http_over_ws/master/LICENSE,Apache Software License (Apache 2.0)
nbconvert,https://raw.githubusercontent.com/jupyter/nbconvert/master/LICENSE,BSD-4-Clause
nbformat,https://raw.githubusercontent.com/jupyter/nbformat/master/COPYING.md,BSD-4-Clause
testpath,https://raw.githubusercontent.com/jupyter/testpath/master/LICENSE,MIT
cffi,https://raw.githubusercontent.com/cffi/cffi/master/COPYRIGHT,MIT
pycparser,https://raw.githubusercontent.com/eliben/pycparser/master/LICENSE,BSD-3-Clause
numpy,https://raw.githubusercontent.com/numpy/numpy/master/LICENSE.txt,BSD-3-Clause
termcolor,https://raw.githubusercontent.com/hfeeki/termcolor/master/COPYING.txt,MIT
backports.weakref,https://raw.githubusercontent.com/pjdelport/backports.weakref/master/LICENSE,PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
gast,https://raw.githubusercontent.com/serge-sans-paille/gast/master/LICENSE,BSD-3-Clause
astor,https://raw.githubusercontent.com/berkerpeksag/astor/master/LICENSE,BSD-3-Clause
horovod,https://raw.githubusercontent.com/uber/horovod/master/LICENSE,Apache 2.0
google-api-python-client,https://raw.githubusercontent.com/google/google-api-python-client/master/LICENSE,Apache 2.0
google-auth,https://raw.githubusercontent.com/GoogleCloudPlatform/google-auth-library-python/master/LICENSE,Apache 2.0
jupyterlab,https://raw.githubusercontent.com/jupyterlab/jupyterlab/master/LICENSE,BSD-3-clause
grpcio,https://raw.githubusercontent.com/grpc/grpc/master/LICENSE,Apache 2.0
grpcio-gcp,https://raw.githubusercontent.com/GoogleCloudPlatform/grpc-gcp-python/master/LICENSE,Apache 2.0
absl-py,https://raw.githubusercontent.com/abseil/abseil-py/master/LICENSE,Apache 2.0
protobuf,https://raw.githubusercontent.com/protocolbuffers/protobuf/master/LICENSE,BSD-3-clause
tensorflow,https://raw.githubusercontent.com/tensorflow/tensorflow/master/LICENSE,Apache 2.0
tensorboard,https://raw.githubusercontent.com/tensorflow/tensorboard/master/LICENSE,Apache 2.0
keras,https://raw.githubusercontent.com/keras-team/keras/master/LICENSE,MIT
Keras-Applications,https://raw.githubusercontent.com/keras-team/keras-applications/master/LICENSE,MIT
jupyter-tensorboard,https://raw.githubusercontent.com/lspvic/jupyter_tensorboard/master/LICENSE,MIT
torch,https://raw.githubusercontent.com/pytorch/pytorch/master/LICENSE,BSD-3-clause
torchvision,https://raw.githubusercontent.com/pytorch/vision/master/LICENSE,BSD-3-clause
apache-beam,https://raw.githubusercontent.com/apache/beam/master/LICENSE,Apache 2.0
avro,https://raw.githubusercontent.com/apache/avro/master/LICENSE.txt,Apache 2.0
bleach,https://raw.githubusercontent.com/mozilla/bleach/master/LICENSE,Apache 2.0
cachetools,https://raw.githubusercontent.com/tkem/cachetools/master/LICENSE,MIT
certifi,https://raw.githubusercontent.com/certifi/python-certifi/master/LICENSE,MLP 2.0
chardet,https://raw.githubusercontent.com/chardet/chardet/master/LICENSE,LGPL 2.1
dill,https://raw.githubusercontent.com/uqfoundation/dill/master/LICENSE,BSD-3-clause
docopt,https://raw.githubusercontent.com/docopt/docopt/master/LICENSE-MIT,MIT
fasteners,https://raw.githubusercontent.com/harlowja/fasteners/master/LICENSE,Apache 2.0
funcsigs,https://raw.githubusercontent.com/aliles/funcsigs/master/LICENSE,Apache 2.0
gapic-google-cloud-pubsub-v1,https://raw.githubusercontent.com/googleapis/api-client-staging/master/generated/python/gapic-google-cloud-pubsub-v1/LICENSE,Apache 2.0
google-apitools,https://raw.githubusercontent.com/google/apitools/master/LICENSE,Apache 2.0
google-auth-library-python-httplib2,https://raw.githubusercontent.com/GoogleCloudPlatform/google-auth-library-python-httplib2/master/LICENSE,Apache 2.0
google-cloud-bigquery,https://raw.githubusercontent.com/googleapis/google-cloud-python/master/LICENSE,Apache 2.0
google-cloud-bigtable,https://raw.githubusercontent.com/googleapis/google-cloud-python/master/LICENSE,Apache 2.0
google-cloud-core,https://raw.githubusercontent.com/googleapis/google-cloud-python/master/LICENSE,Apache 2.0
google-cloud-dlp,https://raw.githubusercontent.com/googleapis/python-dlp/master/LICENSE,Apache 2.0
google-cloud-language,https://raw.githubusercontent.com/googleapis/google-cloud-python/master/LICENSE,Apache 2.0
google-cloud-pubsub,https://raw.githubusercontent.com/googleapis/google-cloud-python/master/LICENSE,Apache 2.0
google-cloud-spanner,https://raw.githubusercontent.com/googleapis/python-spanner/master/LICENSE,Apache 2.0
google-cloud-videointelligence,https://raw.githubusercontent.com/googleapis/python-videointelligence/master/LICENSE,Apache 2.0
google-cloud-vision,https://raw.githubusercontent.com/googleapis/python-vision/master/LICENSE,Apache 2.0
google-gax,https://raw.githubusercontent.com/googleapis/gax-python/master/LICENSE,Apache 2.0
googleapis-common-protos,https://raw.githubusercontent.com/googleapis/api-common-protos/master/LICENSE,Apache 2.0
googledatastore,https://raw.githubusercontent.com/GoogleCloudPlatform/google-cloud-datastore/master/LICENSE,Apache 2.0
hdfs,https://github.com/mtth/hdfs/blob/master/LICENSE,MIT
idna,https://raw.githubusercontent.com/kjd/idna/master/LICENSE.rst,BSD-like
Markdown,https://raw.githubusercontent.com/Python-Markdown/markdown/master/LICENSE.md,BSD
monotonic,https://raw.githubusercontent.com/atdt/monotonic/master/LICENSE,Apache 2.0
oauth2client,https://raw.githubusercontent.com/google/oauth2client/master/LICENSE,Apache 2.0
ply,https://raw.githubusercontent.com/dabeaz/ply/master/README.md,BSD
google-cloud-datastore-v1,https://raw.githubusercontent.com/GoogleCloudPlatform/google-cloud-datastore/master/LICENSE,Apache 2.0
proto-google-cloud-pubsub-v1,https://raw.githubusercontent.com/googleapis/google-cloud-python/master/LICENSE,Apache 2.0
pytz,https://raw.githubusercontent.com/newvem/pytz/master/LICENSE.txt,MIT
PyVCF,https://raw.githubusercontent.com/jamescasbon/PyVCF/master/LICENSE,BSD
requests,https://raw.githubusercontent.com/requests/requests/master/LICENSE,Apache 2.0
six,https://raw.githubusercontent.com/benjaminp/six/master/LICENSE,MIT
tensorflow-transform,https://raw.githubusercontent.com/tensorflow/transform/master/LICENSE,Apache 2.0
typing,https://raw.githubusercontent.com/python/typing/master/LICENSE,PSF
urllib3,https://raw.githubusercontent.com/urllib3/urllib3/master/LICENSE.txt,MIT
PyYAML,https://raw.githubusercontent.com/yaml/pyyaml/master/LICENSE,MIT
uritemplate,https://github.com/python-hyper/uritemplate/blob/master/LICENSE,BSD 3-Clause License or Apache 2.0
adal,https://raw.githubusercontent.com/AzureAD/azure-activedirectory-library-for-python/dev/LICENSE,MIT
asn1crypto,https://raw.githubusercontent.com/wbond/asn1crypto/master/LICENSE,MIT
cryptography,https://raw.githubusercontent.com/pyca/cryptography/master/LICENSE,Apache or BSD
google-auth-httplib2,https://raw.githubusercontent.com/GoogleCloudPlatform/google-auth-library-python-httplib2/master/LICENSE,Apache 2.0
ipaddress,https://raw.githubusercontent.com/phihag/ipaddress/master/LICENSE,PSF
kubernetes,https://raw.githubusercontent.com/kubernetes-client/python/master/LICENSE,Apache 2.0
oauthlib,https://raw.githubusercontent.com/oauthlib/oauthlib/master/LICENSE,BSD
PyJWT,https://raw.githubusercontent.com/jpadilla/pyjwt/master/LICENSE,MIT
requests-oauthlib,https://raw.githubusercontent.com/requests/requests-oauthlib/master/LICENSE,BSD
retrying,https://raw.githubusercontent.com/rholder/retrying/master/LICENSE,Apache 2.0
websocket-client,https://raw.githubusercontent.com/websocket-client/websocket-client/master/LICENSE,LGPL
enum34,https://bitbucket.org/stoneleaf/enum34/raw/f24487b45cd041fc9406d67441d2186ac70772b7/enum/LICENSE,BSD
futures,https://raw.githubusercontent.com/agronholm/pythonfutures/master/LICENSE,PSF
backports-abc,https://raw.githubusercontent.com/cython/backports_abc/master/LICENSE,PSF
backports.shutil-get-terminal-size,https://raw.githubusercontent.com/chrippa/backports.shutil_get_terminal_size/master/LICENSE,MIT
configparser,https://bitbucket.org/ambv/configparser/raw/78998f2ded2e840376adc712337545014fa9b622/README.rst,MIT
crcmod,https://raw.githubusercontent.com/gsutil-mirrors/crcmod/master/LICENSE,MIT
decorator,https://raw.githubusercontent.com/micheles/decorator/master/LICENSE.txt,2-Clause BSD
entrypoints,https://raw.githubusercontent.com/takluyver/entrypoints/master/LICENSE,MIT
fastavro,https://raw.githubusercontent.com/fastavro/fastavro/master/LICENSE,MIT
functools32,https://raw.githubusercontent.com/michilu/python-functools32/master/LICENSE,PSF
grpc-google-iam-v1,https://raw.githubusercontent.com/googleapis/googleapis/master/LICENSE,Apache 2.0
ipython-genutils,https://raw.githubusercontent.com/ipython/ipython_genutils/master/COPYING.md,BSD
ipywidgets,https://raw.githubusercontent.com/jupyter-widgets/ipywidgets/master/LICENSE,3-Clause BSD
Jinja2,https://raw.githubusercontent.com/pallets/jinja/master/LICENSE.rst,BSD
jsonschema,https://raw.githubusercontent.com/Julian/jsonschema/master/COPYING,MIT
jupyter,https://raw.githubusercontent.com/jupyter/notebook/master/LICENSE,BSD
notebook,https://raw.githubusercontent.com/jupyter/notebook/master/LICENSE,BSD
jupyter-client,https://raw.githubusercontent.com/jupyter/jupyter_client/master/COPYING.md,BSD
jupyter-console,https://raw.githubusercontent.com/jupyter/jupyter_console/master/COPYING.md,BSD
jupyter-core,https://raw.githubusercontent.com/jupyter/jupyter_core/master/COPYING.md,BSD
MarkupSafe,https://raw.githubusercontent.com/pallets/markupsafe/master/LICENSE.rst,BSD
mistune,https://raw.githubusercontent.com/lepture/mistune/master/LICENSE,3-Clause BSD
pandocfilters,https://raw.githubusercontent.com/jgm/pandocfilters/master/LICENSE,3-Clause BSD
pathlib2,https://raw.githubusercontent.com/mcmtroffaes/pathlib2/develop/LICENSE.rst,MIT
pickleshare,https://raw.githubusercontent.com/pickleshare/pickleshare/master/LICENSE,MIT
prometheus-client,https://raw.githubusercontent.com/prometheus/client_python/master/LICENSE,Apache 2.0
proto-google-cloud-datastore-v1,https://raw.githubusercontent.com/googleapis/googleapis/master/LICENSE,Apache 2.0
ptyprocess,https://raw.githubusercontent.com/pexpect/ptyprocess/master/LICENSE,ISC
pydot,https://raw.githubusercontent.com/erocarrera/pydot/master/LICENSE,MIT
Pygments,https://raw.githubusercontent.com/nex3/pygments/master/LICENSE,2-Clause BSD
pyparsing,https://raw.githubusercontent.com/pyparsing/pyparsing/master/LICENSE,MIT
pyzmq,https://raw.githubusercontent.com/zeromq/pyzmq/master/COPYING.BSD,LGPL+BSD
qtconsole,https://raw.githubusercontent.com/jupyter/qtconsole/master/LICENSE,3-Clause BSD
scandir,https://raw.githubusercontent.com/benhoyt/scandir/master/LICENSE.txt,3-Clause BSD
Send2Trash,https://raw.githubusercontent.com/hsoft/send2trash/master/LICENSE,3-Clause BSD
simplegeneric,https://opensource.org/licenses/ZPL-2.0,ZPL 2.1
singledispatch,https://opensource.org/licenses/MIT,MIT
tensorflow-model-analysis,https://github.com/tensorflow/model-analysis/blob/master/LICENSE,Apache 2.0
terminado,https://raw.githubusercontent.com/jupyter/terminado/master/LICENSE,BSD
wcwidth,https://raw.githubusercontent.com/jquast/wcwidth/master/LICENSE.txt,MIT
widgetsnbextension,https://raw.githubusercontent.com/jupyter-widgets/ipywidgets/master/widgetsnbextension/LICENSE,BSD
pandas,https://raw.githubusercontent.com/pandas-dev/pandas/master/LICENSE,3-Clause BSD
scikit-learn,https://raw.githubusercontent.com/scikit-learn/scikit-learn/master/COPYING,BSD
scipy,https://raw.githubusercontent.com/scipy/scipy/master/LICENSE.txt,BSD
tensorflow-tensorboard,https://raw.githubusercontent.com/tensorflow/tensorboard/master/LICENSE,Apache 2.0
tensorflow-data-validation,https://raw.githubusercontent.com/tensorflow/data-validation/master/LICENSE,Apache 2.0
tensorflow-metadata,https://raw.githubusercontent.com/tensorflow/metadata/master/LICENSE,Apache 2.0
defusedxml,https://raw.githubusercontent.com/tiran/defusedxml/master/LICENSE,PSF
backports.functools-lru-cache,https://raw.githubusercontent.com/jaraco/backports.functools_lru_cache/master/LICENSE,MIT
cycler,https://raw.githubusercontent.com/matplotlib/cycler/master/LICENSE,MIT
h5py,https://raw.githubusercontent.com/h5py/h5py/master/licenses/license.txt
matplotlib,https://raw.githubusercontent.com/matplotlib/matplotlib/master/LICENSE/LICENSE
Pillow,https://raw.githubusercontent.com/python-pillow/Pillow/master/LICENSE
sklearn,https://raw.githubusercontent.com/scikit-learn/scikit-learn/master/COPYING,BSD
tensorflow-gpu,https://raw.githubusercontent.com/tensorflow/tensorflow/master/LICENSE,Apache 2.0
webencodings,https://raw.githubusercontent.com/gsnedders/python-webencodings/master/LICENSE,BSD
google-api-core,https://raw.githubusercontent.com/googleapis/google-cloud-python/master/LICENSE,Apache 2.0
google-resumable-media,https://raw.githubusercontent.com/googleapis/google-resumable-media-python/master/LICENSE,Apache 2.0
trainer,https://raw.githubusercontent.com/kubeflow/pipelines/master/LICENSE,Apache 2.0
pyarrow,https://raw.githubusercontent.com/apache/arrow/master/LICENSE.txt,Apache 2.0
attrs,https://raw.githubusercontent.com/python-attrs/attrs/master/LICENSE,MIT
pyrsistent,https://raw.githubusercontent.com/tobgu/pyrsistent/master/LICENCE.mit,MIT
fire,https://raw.githubusercontent.com/google/python-fire/master/LICENSE,Apache 2.0
kfp-component,https://raw.githubusercontent.com/kubeflow/pipelines/master/LICENSE,Apache 2.0
google-cloud-storage,https://raw.githubusercontent.com/googleapis/google-cloud-python/master/LICENSE,Apache 2.0
gcloud,https://raw.githubusercontent.com/googleapis/google-cloud-python/master/LICENSE,Apache 2.0
gax-google-logging-v2,https://raw.githubusercontent.com/googleapis/googleapis/master/LICENSE,Apache 2.0
gax-google-pubsub-v1,https://raw.githubusercontent.com/googleapis/googleapis/master/LICENSE,Apache 2.0
grpc-google-logging-v2,https://raw.githubusercontent.com/googleapis/googleapis/master/LICENSE,Apache 2.0
grpc-google-pubsub-v1,https://raw.githubusercontent.com/googleapis/googleapis/master/LICENSE,Apache 2.0
google-cloud-datastore,https://raw.githubusercontent.com/GoogleCloudPlatform/google-cloud-datastore/master/LICENSE,Apache 2.0
pymongo,https://raw.githubusercontent.com/mongodb/mongo-python-driver/master/LICENSE,Apache 2.0
google-auth-oauthlib,https://raw.githubusercontent.com/googleapis/google-auth-library-python-oauthlib/master/LICENSE,Apache 2.0
google-pasta,https://raw.githubusercontent.com/google/pasta/master/LICENSE,Apache 2.0
Keras-Preprocessing,https://raw.githubusercontent.com/keras-team/keras-preprocessing/master/LICENSE,MIT
ml-metadata,https://raw.githubusercontent.com/google/ml-metadata/master/LICENSE,Apache 2.0
opt-einsum,https://raw.githubusercontent.com/dgasmith/opt_einsum/master/LICENSE,MIT
tensorflow-estimator,https://raw.githubusercontent.com/tensorflow/estimator/master/LICENSE,Apache 2.0
wrapt,https://github.com/GrahamDumpleton/wrapt/blob/develop/LICENSE,2-Clause BSD
tensorflow-serving-api,https://raw.githubusercontent.com/tensorflow/serving/master/LICENSE,Apache 2.0
tfx-bsl,https://raw.githubusercontent.com/tensorflow/tfx-bsl/master/LICENSE,Apache 2.0
keyring,https://raw.githubusercontent.com/jaraco/keyring/master/LICENSE,MIT
keyrings.alt,https://raw.githubusercontent.com/jaraco/keyrings.alt/master/LICENSE,MIT
pycrypto,https://raw.githubusercontent.com/dlitz/pycrypto/master/COPYRIGHT,Public Domain
pygobject,https://raw.githubusercontent.com/GNOME/pygobject/mainline/COPYING,LGPL
pyxdg,https://cgit.freedesktop.org/xdg/pyxdg/plain/COPYING,LGPL
SecretStorage,https://raw.githubusercontent.com/mitya57/secretstorage/master/LICENSE,BSD-3
typing-extensions,https://raw.githubusercontent.com/python/typing/master/typing_extensions/LICENSE,Python Software Foundation License
| 8,035 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/presto | kubeflow_public_repos/kfp-tekton-backend/components/presto/query/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: Presto Query
description: |
A Kubeflow Pipeline component to submit a query to Presto.
inputs:
- name: host
type: String
description: 'Presto Host.'
- name: catalog
type: String
description: 'The name of the catalog.'
- name: schema
type: String
description: 'The name of the schema.'
- name: query
type: String
description: 'The SQL query statements to be executed in Presto'
- name: user
type: String
description: 'The user of the Presto.'
- name: pwd
type: String
description: 'The password of the Presto.'
- name: output
description: 'The path or name of the emitted output.'
outputs:
- name: output
description: 'The path or name of the emitted output.'
implementation:
container:
image: docker.io/mkavi/kubeflow-pipeline-presto:latest
command: [
python3, /pipelines/component/src/program.py,
--host, {inputValue: host},
--catalog, {inputValue: catalog},
--schema, {inputValue: schema},
--query, {inputValue: query},
--user, {inputValue: user},
--pwd, {inputValue: pwd},
--output, {inputValue: output}
]
fileOutputs:
output: /output.txt
| 8,036 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/presto | kubeflow_public_repos/kfp-tekton-backend/components/presto/query/Dockerfile | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM python:3.7
RUN python3 -m pip install pyhive[presto]
COPY ./src /pipelines/component/src
| 8,037 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/presto/query | kubeflow_public_repos/kfp-tekton-backend/components/presto/query/src/program.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pyhive import presto
def get_conn(host=None, catalog=None, schema=None, user=None, pwd=None):
conn = presto.connect(
host=host,
port=443,
protocol="https",
catalog=catalog,
schema=schema,
username=user,
password=pwd,
)
return conn
def query(conn, query):
cursor = conn.cursor()
cursor.execute(query)
cursor.fetchall()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, help="Presto Host.")
parser.add_argument(
"--catalog", type=str, required=True, help="The name of the catalog."
)
parser.add_argument(
"--schema", type=str, required=True, help="The name of the schema."
)
parser.add_argument(
"--query",
type=str,
required=True,
help="The SQL query statements to be executed in Presto.",
)
parser.add_argument(
"--user", type=str, required=True, help="The user of the Presto."
)
parser.add_argument(
"--pwd", type=str, required=True, help="The password of the Presto."
)
parser.add_argument(
"--output",
type=str,
required=True,
help="The path or name of the emitted output.",
)
args = parser.parse_args()
conn = get_conn(args.host, args.catalog, args.schema, args.user, args.pwd)
query(conn, args.query)
with open("/output.txt", "w+") as w:
w.write(args.output)
if __name__ == "__main__":
main()
| 8,038 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/git | kubeflow_public_repos/kfp-tekton-backend/components/git/clone/component.yaml | name: Git clone
description: Creates a shallow clone of the specified repo branch
inputs:
- {name: Repo URI, type: URI}
- {name: Branch, type: String, default: master}
outputs:
- {name: Repo dir, type: Directory}
implementation:
container:
image: alpine/git
command:
- git
- clone
- --depth=1
- --branch
- inputValue: Branch
- inputValue: Repo URI
- outputPath: Repo dir
| 8,039 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/deployer/component.yaml | name: Kubeflow - Serve TF model
description: Serve TensorFlow model using Kubeflow TF-serving
inputs:
- {name: Model dir, type: GCSPath, description: 'Path of GCS directory containing exported Tensorflow model.'} # type: {GCSPath: {path_type: Directory}}
- {name: Cluster name, type: String, default: '', description: 'Kubernetes cluster name where the TS-serving service should be deployed. Uses the current cluster by default.'}
- {name: Namespace, type: String, default: 'kubeflow', description: 'Kubernetes namespace where the TS-serving service should be deployed.'}
- {name: Server name, type: String, default: 'model-server', description: 'TF-serving server name to use when deploying.'}
- {name: PVC name, type: String, default: '' , description: 'Optional PersistentVolumeClaim to use.'}
- {name: Service type, type: String, default: 'ClusterIP' , description: 'Optional Service type to use, two options: "ClusterIP" (default if not set) and "NodePort".'}
#outputs:
# - {name: Endppoint URI, type: Serving URI, description: 'URI of the deployed prediction service..'}
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-deployer:ad9bd5648dd0453005225779f25d8cebebc7ca00
command: [/bin/deploy.sh]
args: [
--model-export-path, {inputValue: Model dir},
--cluster-name, {inputValue: Cluster name},
--namespace, {inputValue: Namespace},
--server-name, {inputValue: Server name},
--pvc-name, {inputValue: PVC name},
--service-type, {inputValue: Service type},
]
| 8,040 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/deployer/Dockerfile | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM debian
RUN apt-get update -q && apt-get upgrade -y && \
apt-get install -y -qq --no-install-recommends \
apt-transport-https \
ca-certificates \
git \
gnupg \
lsb-release \
unzip \
wget && \
wget --no-verbose -O /opt/ks_0.13.1_linux_amd64.tar.gz \
https://github.com/ksonnet/ksonnet/releases/download/v0.13.1/ks_0.13.1_linux_amd64.tar.gz && \
tar -C /opt -xzf /opt/ks_0.13.1_linux_amd64.tar.gz && \
cp /opt/ks_0.13.1_linux_amd64/ks /bin/. && \
rm -f /opt/ks_0.13.1_linux_amd64.tar.gz && \
wget --no-verbose -O /bin/kubectl \
https://storage.googleapis.com/kubernetes-release/release/v1.11.2/bin/linux/amd64/kubectl && \
chmod u+x /bin/kubectl && \
wget --no-verbose -O /opt/kubernetes_v1.11.2 \
https://github.com/kubernetes/kubernetes/archive/v1.11.2.tar.gz && \
mkdir -p /src && \
tar -C /src -xzf /opt/kubernetes_v1.11.2 && \
rm -rf /opt/kubernetes_v1.11.2 && \
wget --no-verbose -O /opt/google-apt-key.gpg \
https://packages.cloud.google.com/apt/doc/apt-key.gpg && \
apt-key add /opt/google-apt-key.gpg && \
export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" && \
echo "deb https://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" >> \
/etc/apt/sources.list.d/google-cloud-sdk.list && \
apt-get update -q && \
apt-get install -y -qq --no-install-recommends google-cloud-sdk && \
gcloud config set component_manager/disable_update_check true
ENV KUBEFLOW_VERSION v0.4.0
# Checkout the kubeflow packages at image build time so that we do not
# require calling in to the GitHub API at run time.
RUN cd /src && \
mkdir -p github.com/kubeflow && \
cd github.com/kubeflow && \
git clone https://github.com/kubeflow/kubeflow && \
cd kubeflow && \
git checkout ${KUBEFLOW_VERSION}
ADD ./src/deploy.sh /bin/.
ENTRYPOINT ["/bin/deploy.sh"]
| 8,041 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/deployer | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/deployer/src/deploy.sh | #!/bin/bash -e
# Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -x
KUBERNETES_NAMESPACE="${KUBERNETES_NAMESPACE:-kubeflow}"
SERVER_NAME="${SERVER_NAME:-model-server}"
while (($#)); do
case $1 in
"--model-export-path")
shift
MODEL_EXPORT_PATH="$1"
shift
;;
"--cluster-name")
shift
CLUSTER_NAME="$1"
shift
;;
"--namespace")
shift
KUBERNETES_NAMESPACE="$1"
shift
;;
"--server-name")
shift
SERVER_NAME="$1"
shift
;;
"--pvc-name")
shift
PVC_NAME="$1"
shift
;;
"--service-type")
shift
SERVICE_TYPE="$1"
shift
;;
*)
echo "Unknown argument: '$1'"
exit 1
;;
esac
done
if [ -z "${MODEL_EXPORT_PATH}" ]; then
echo "You must specify a path to the saved model"
exit 1
fi
echo "Deploying the model '${MODEL_EXPORT_PATH}'"
if [ -z "${CLUSTER_NAME}" ]; then
CLUSTER_NAME=$(wget -q -O- --header="Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name)
fi
# Ensure the server name is not more than 63 characters.
SERVER_NAME="${SERVER_NAME:0:63}"
# Trim any trailing hyphens from the server name.
while [[ "${SERVER_NAME:(-1)}" == "-" ]]; do SERVER_NAME="${SERVER_NAME::-1}"; done
echo "Deploying ${SERVER_NAME} to the cluster ${CLUSTER_NAME}"
# Connect kubectl to the local cluster
kubectl config set-cluster "${CLUSTER_NAME}" --server=https://kubernetes.default --certificate-authority=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
kubectl config set-credentials pipeline --token "$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)"
kubectl config set-context kubeflow --cluster "${CLUSTER_NAME}" --user pipeline
kubectl config use-context kubeflow
# Configure and deploy the TF serving app
cd /src/github.com/kubeflow/kubeflow
git checkout ${KUBEFLOW_VERSION}
cd /opt
echo "Initializing KSonnet app..."
ks init tf-serving-app
cd tf-serving-app/
if [ -n "${KUBERNETES_NAMESPACE}" ]; then
echo "Setting Kubernetes namespace: ${KUBERNETES_NAMESPACE} ..."
ks env set default --namespace "${KUBERNETES_NAMESPACE}"
fi
echo "Installing Kubeflow packages..."
ks registry add kubeflow /src/github.com/kubeflow/kubeflow/kubeflow
ks pkg install kubeflow/common@${KUBEFLOW_VERSION}
ks pkg install kubeflow/tf-serving@${KUBEFLOW_VERSION}
echo "Generating the TF Serving config..."
ks generate tf-serving server --name="${SERVER_NAME}"
ks param set server modelPath "${MODEL_EXPORT_PATH}"
# service type: ClusterIP or NodePort
if [ -n "${SERVICE_TYPE}" ];then
ks param set server serviceType "${SERVICE_TYPE}"
fi
# support local storage to deploy tf-serving.
if [ -n "${PVC_NAME}" ];then
# TODO: Remove modelStorageType setting after the hard code nfs was removed at
# https://github.com/kubeflow/kubeflow/blob/v0.4-branch/kubeflow/tf-serving/tf-serving.libsonnet#L148-L151
ks param set server modelStorageType nfs
ks param set server nfsPVC "${PVC_NAME}"
fi
echo "Deploying the TF Serving service..."
ks apply default -c server
# Wait for the deployment to have at least one available replica
echo "Waiting for the TF Serving deployment to show up..."
timeout="1000"
start_time=`date +%s`
while [[ $(kubectl get deploy --namespace "${KUBERNETES_NAMESPACE}" --selector=app="${SERVER_NAME}" 2>&1|wc -l) != "2" ]];do
current_time=`date +%s`
elapsed_time=$(expr $current_time + 1 - $start_time)
if [[ $elapsed_time -gt $timeout ]];then
echo "timeout"
exit 1
fi
sleep 2
done
echo "Waiting for the valid workflow json..."
start_time=`date +%s`
exit_code="1"
while [[ $exit_code != "0" ]];do
kubectl get deploy --namespace "${KUBERNETES_NAMESPACE}" --selector=app="${SERVER_NAME}" --output=jsonpath='{.items[0].status.availableReplicas}'
exit_code=$?
current_time=`date +%s`
elapsed_time=$(expr $current_time + 1 - $start_time)
if [[ $elapsed_time -gt $timeout ]];then
echo "timeout"
exit 1
fi
sleep 2
done
echo "Waiting for the TF Serving deployment to have at least one available replica..."
start_time=`date +%s`
while [[ $(kubectl get deploy --namespace "${KUBERNETES_NAMESPACE}" --selector=app="${SERVER_NAME}" --output=jsonpath='{.items[0].status.availableReplicas}') < "1" ]]; do
current_time=`date +%s`
elapsed_time=$(expr $current_time + 1 - $start_time)
if [[ $elapsed_time -gt $timeout ]];then
echo "timeout"
exit 1
fi
sleep 5
done
echo "Obtaining the pod name..."
start_time=`date +%s`
pod_name=""
while [[ $pod_name == "" ]];do
pod_name=$(kubectl get pods --namespace "${KUBERNETES_NAMESPACE}" --selector=app="${SERVER_NAME}" --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}')
current_time=`date +%s`
elapsed_time=$(expr $current_time + 1 - $start_time)
if [[ $elapsed_time -gt $timeout ]];then
echo "timeout"
exit 1
fi
sleep 2
done
echo "Pod name is: " $pod_name
# Wait for the pod container to start running
echo "Waiting for the TF Serving pod to start running..."
start_time=`date +%s`
exit_code="1"
while [[ $exit_code != "0" ]];do
kubectl get po ${pod_name} --namespace "${KUBERNETES_NAMESPACE}" -o jsonpath='{.status.containerStatuses[0].state.running}'
exit_code=$?
current_time=`date +%s`
elapsed_time=$(expr $current_time + 1 - $start_time)
if [[ $elapsed_time -gt $timeout ]];then
echo "timeout"
exit 1
fi
sleep 2
done
start_time=`date +%s`
while [ -z "$(kubectl get po ${pod_name} --namespace "${KUBERNETES_NAMESPACE}" -o jsonpath='{.status.containerStatuses[0].state.running}')" ]; do
current_time=`date +%s`
elapsed_time=$(expr $current_time + 1 - $start_time)
if [[ $elapsed_time -gt $timeout ]];then
echo "timeout"
exit 1
fi
sleep 5
done
# Wait a little while and then grab the logs of the running server
sleep 10
echo "Logs from the TF Serving pod:"
kubectl logs ${pod_name} --namespace "${KUBERNETES_NAMESPACE}"
| 8,042 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/common/launch_crd.py | # Copyright 2019 kubeflow.org.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import logging
import multiprocessing
import time
from kubernetes import client as k8s_client
from kubernetes.client import rest
class K8sCR(object):
def __init__(self, group, plural, version, client):
self.group = group
self.plural = plural
self.version = version
self.client = k8s_client.CustomObjectsApi(client)
def wait_for_condition(self,
namespace,
name,
expected_conditions=[],
timeout=datetime.timedelta(days=365),
polling_interval=datetime.timedelta(seconds=30),
status_callback=None):
"""Waits until any of the specified conditions occur.
Args:
namespace: namespace for the CR.
name: Name of the CR.
expected_conditions: A list of conditions. Function waits until any of the
supplied conditions is reached.
timeout: How long to wait for the CR.
polling_interval: How often to poll for the status of the CR.
status_callback: (Optional): Callable. If supplied this callable is
invoked after we poll the CR. Callable takes a single argument which
is the CR.
"""
end_time = datetime.datetime.now() + timeout
while True:
try:
results = self.client.get_namespaced_custom_object(
self.group, self.version, namespace, self.plural, name)
except Exception as e:
logging.error("There was a problem waiting for %s/%s %s in namespace %s; Exception: %s",
self.group, self.plural, name, namespace, e)
raise
if results:
if status_callback:
status_callback(results)
expected, condition = self.is_expected_conditions(results, expected_conditions)
if expected:
logging.info("%s/%s %s in namespace %s has reached the expected condition: %s.",
self.group, self.plural, name, namespace, condition)
return results
else:
if condition:
logging.info("Current condition of %s/%s %s in namespace %s is %s.",
self.group, self.plural, name, namespace, condition)
if datetime.datetime.now() + polling_interval > end_time:
raise Exception(
"Timeout waiting for {0}/{1} {2} in namespace {3} to enter one of the "
"conditions {4}.".format(self.group, self.plural, name, namespace, expected_conditions))
time.sleep(polling_interval.seconds)
def is_expected_conditions(self, cr_object, expected_conditions):
return False, ""
def create(self, spec):
"""Create a CR.
Args:
spec: The spec for the CR.
"""
try:
# Create a Resource
namespace = spec["metadata"].get("namespace", "default")
logging.info("Creating %s/%s %s in namespace %s.",
self.group, self.plural, spec["metadata"]["name"], namespace)
api_response = self.client.create_namespaced_custom_object(
self.group, self.version, namespace, self.plural, spec)
logging.info("Created %s/%s %s in namespace %s.",
self.group, self.plural, spec["metadata"]["name"], namespace)
return api_response
except rest.ApiException as e:
self._log_and_raise_exception(e, "create")
def delete(self, name, namespace):
try:
body = {
# Set garbage collection so that CR won't be deleted until all
# owned references are deleted.
"propagationPolicy": "Foreground",
}
logging.info("Deleteing %s/%s %s in namespace %s.",
self.group, self.plural, name, namespace)
api_response = self.client.delete_namespaced_custom_object(
self.group,
self.version,
namespace,
self.plural,
name,
body)
logging.info("Deleted %s/%s %s in namespace %s.",
self.group, self.plural, name, namespace)
return api_response
except rest.ApiException as e:
self._log_and_raise_exception(e, "delete")
def _log_and_raise_exception(self, ex, action):
message = ""
if ex.message:
message = ex.message
if ex.body:
try:
body = json.loads(ex.body)
message = body.get("message")
except ValueError:
logging.error("Exception when %s %s/%s: %s", action, self.group, self.plural, ex.body)
raise
logging.error("Exception when %s %s/%s: %s", action, self.group, self.plural, ex.body)
raise ex
| 8,043 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/launcher/build_image.sh | #!/bin/bash -e
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
while getopts ":hp:t:i:" opt; do
case "${opt}" in
h) echo "-p: project name"
echo "-t: tag name"
echo "-i: image name. If provided, project name and tag name are not necessary"
exit
;;
p) PROJECT_ID=${OPTARG}
;;
t) TAG_NAME=${OPTARG}
;;
i) LAUNCHER_IMAGE_NAME=${OPTARG}
;;
\? ) echo "Usage: cmd [-p] project [-t] tag [-i] image"
exit
;;
esac
done
mkdir -p ./build
rsync -arvp ./src/ ./build/
rsync -arvp ../common/ ./build/
cp ../../license.sh ./build
cp ../../third_party_licenses.csv ./build
LOCAL_LAUNCHER_IMAGE_NAME=ml-pipeline-kubeflow-tfjob
docker build -t ${LOCAL_LAUNCHER_IMAGE_NAME} .
if [ -z "${TAG_NAME}" ]; then
TAG_NAME=$(date +v%Y%m%d)-$(git describe --tags --always --dirty)-$(git diff | shasum -a256 | cut -c -6)
fi
if [ -z "${LAUNCHER_IMAGE_NAME}" ]; then
if [ -z "${PROJECT_ID}" ]; then
PROJECT_ID=$(gcloud config config-helper --format "value(configuration.properties.core.project)")
fi
docker tag ${LOCAL_LAUNCHER_IMAGE_NAME} gcr.io/${PROJECT_ID}/${LOCAL_LAUNCHER_IMAGE_NAME}:${TAG_NAME}
docker push gcr.io/${PROJECT_ID}/${LOCAL_LAUNCHER_IMAGE_NAME}:${TAG_NAME}
else
docker tag ${LOCAL_LAUNCHER_IMAGE_NAME} ${LAUNCHER_IMAGE_NAME}:${TAG_NAME}
docker push ${LAUNCHER_IMAGE_NAME}:${TAG_NAME}
fi
rm -rf ./build
| 8,044 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/launcher/sample.py | import json
from kfp import components
import kfp.dsl as dsl
@dsl.pipeline(
name="Launch kubeflow tfjob",
description="An example to launch tfjob."
)
def mnist_train(
name="mnist",
namespace="kubeflow",
workerNum=3,
ttlSecondsAfterFinished=-1,
tfjobTimeoutMinutes=60,
deleteAfterDone=False):
tfjob_launcher_op = components.load_component_from_file("./component.yaml")
# tfjob_launcher_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/kubeflow/launcher/component.yaml')
chief = {
"replicas": 1,
"restartPolicy": "OnFailure",
"template": {
"spec": {
"containers": [
{
"command": [
"python",
"/opt/model.py"
],
"args": [
"--tf-train-steps=6000"
],
"image": "liuhougangxa/tf-estimator-mnist",
"name": "tensorflow",
}
]
}
}
}
worker = {}
if workerNum > 0:
worker = {
"replicas": workerNum,
"restartPolicy": "OnFailure",
"template": {
"spec": {
"containers": [
{
"command": [
"python",
"/opt/model.py"
],
"args": [
"--tf-train-steps=6000"
],
"image": "liuhougangxa/tf-estimator-mnist",
"name": "tensorflow",
}
]
}
}
}
tfjob_launcher_op(
name=name,
namespace=namespace,
ttl_seconds_after_finished=ttlSecondsAfterFinished,
worker_spec=worker,
chief_spec=chief,
tfjob_timeout_minutes=tfjobTimeoutMinutes,
delete_finished_tfjob=deleteAfterDone
)
if __name__ == "__main__":
import kfp.compiler as compiler
compiler.Compiler().compile(mnist_train, __file__ + ".tar.gz")
| 8,045 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/launcher/component.yaml | name: Kubeflow - Launch TFJob
description: Kubeflow TFJob launcher
inputs:
- {name: Name, type: String, description: 'TFJob name.'}
- {name: Namespace, type: String, default: kubeflow, description: 'TFJob namespace.'}
- {name: Version, type: String, default: v1, description: 'TFJob version.'}
- {name: ActiveDeadlineSeconds, type: Integer, default: -1, description: 'Specifies the duration (in seconds) since startTime during which the job can remain active before it is terminated. Must be a positive integer. This setting applies only to pods where restartPolicy is OnFailure or Always.'}
- {name: BackoffLimit, type: Integer, default: -1, description: 'Number of retries before marking this job as failed.'}
- {name: ttl Seconds After Finished, type: Integer, default: -1, description: 'Defines the TTL for cleaning up finished TFJobs.'}
- {name: CleanPodPolicy, type: String, default: Running, description: 'Defines the policy for cleaning up pods after the TFJob completes.'}
- {name: PS Spec, type: JSON, default: '{}', description: 'TFJob ps replicaSpecs.'}
- {name: Worker Spec, type: JSON, default: '{}', description: 'TFJob worker replicaSpecs.'}
- {name: Chief Spec, type: JSON, default: '{}', description: 'TFJob chief replicaSpecs.'}
- {name: Evaluator Spec, type: JSON, default: '{}', description: 'TFJob evaluator replicaSpecs.'}
- {name: Tfjob Timeout Minutes, type: Integer, default: 1440, description: 'Time in minutes to wait for the TFJob to complete.'}
- {name: Delete Finished Tfjob, type: Bool, default: 'True' , description: 'Whether to delete the tfjob after it is finished.'}
implementation:
container:
image: liuhougangxa/kubeflow-tfjob-launcher:latest
command: [python, /ml/launch_tfjob.py]
args: [
--name, {inputValue: Name},
--namespace, {inputValue: Namespace},
--version, {inputValue: Version},
--activeDeadlineSeconds, {inputValue: ActiveDeadlineSeconds},
--backoffLimit, {inputValue: BackoffLimit},
--cleanPodPolicy, {inputValue: CleanPodPolicy},
--ttlSecondsAfterFinished, {inputValue: ttl Seconds After Finished},
--psSpec, {inputValue: PS Spec},
--workerSpec, {inputValue: Worker Spec},
--chiefSpec, {inputValue: Chief Spec},
--evaluatorSpec, {inputValue: Evaluator Spec},
--tfjobTimeoutMinutes, {inputValue: Tfjob Timeout Minutes},
--deleteAfterDone, {inputValue: Delete Finished Tfjob},
]
| 8,046 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/launcher/Dockerfile | # Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:16.04
RUN apt-get update -y && \
apt-get install --no-install-recommends -y -q ca-certificates python-dev python-setuptools wget && \
easy_install pip && \
pip install pyyaml==3.12 kubernetes
ADD build /ml
RUN mkdir /usr/licenses && \
/ml/license.sh /ml/third_party_licenses.csv /usr/licenses
ENTRYPOINT ["python", "/ml/launch_tfjob.py"]
| 8,047 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/launcher | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/launcher/src/__init__.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 8,048 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/launcher | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/launcher/src/launch_tfjob.py | # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
from distutils.util import strtobool
import json
import os
import logging
import yaml
import launch_crd
from kubernetes import client as k8s_client
from kubernetes import config
def yamlOrJsonStr(str):
if str == "" or str == None:
return None
return yaml.safe_load(str)
TFJobGroup = "kubeflow.org"
TFJobPlural = "tfjobs"
class TFJob(launch_crd.K8sCR):
def __init__(self, version="v1", client=None):
super(TFJob, self).__init__(TFJobGroup, TFJobPlural, version, client)
def is_expected_conditions(self, inst, expected_conditions):
conditions = inst.get('status', {}).get("conditions")
if not conditions:
return False, ""
if conditions[-1]["type"] in expected_conditions and conditions[-1]["status"] == "True":
return True, conditions[-1]["type"]
else:
return False, conditions[-1]["type"]
def main(argv=None):
parser = argparse.ArgumentParser(description='Kubeflow TFJob launcher')
parser.add_argument('--name', type=str,
help='TFJob name.')
parser.add_argument('--namespace', type=str,
default='kubeflow',
help='TFJob namespace.')
parser.add_argument('--version', type=str,
default='v1',
help='TFJob version.')
parser.add_argument('--activeDeadlineSeconds', type=int,
default=-1,
help='Specifies the duration (in seconds) since startTime during which the job can remain active before it is terminated. Must be a positive integer. This setting applies only to pods where restartPolicy is OnFailure or Always.')
parser.add_argument('--backoffLimit', type=int,
default=-1,
help='Number of retries before marking this job as failed.')
parser.add_argument('--cleanPodPolicy', type=str,
default="Running",
help='Defines the policy for cleaning up pods after the TFJob completes.')
parser.add_argument('--ttlSecondsAfterFinished', type=int,
default=-1,
help='Defines the TTL for cleaning up finished TFJobs.')
parser.add_argument('--psSpec', type=yamlOrJsonStr,
default={},
help='TFJob ps replicaSpecs.')
parser.add_argument('--workerSpec', type=yamlOrJsonStr,
default={},
help='TFJob worker replicaSpecs.')
parser.add_argument('--chiefSpec', type=yamlOrJsonStr,
default={},
help='TFJob chief replicaSpecs.')
parser.add_argument('--evaluatorSpec', type=yamlOrJsonStr,
default={},
help='TFJob evaluator replicaSpecs.')
parser.add_argument('--deleteAfterDone', type=strtobool,
default=True,
help='When tfjob done, delete the tfjob automatically if it is True.')
parser.add_argument('--tfjobTimeoutMinutes', type=int,
default=60*24,
help='Time in minutes to wait for the TFJob to reach end')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
logging.info('Generating tfjob template.')
config.load_incluster_config()
api_client = k8s_client.ApiClient()
tfjob = TFJob(version=args.version, client=api_client)
inst = {
"apiVersion": "%s/%s" % (TFJobGroup, args.version),
"kind": "TFJob",
"metadata": {
"name": args.name,
"namespace": args.namespace,
},
"spec": {
"cleanPodPolicy": args.cleanPodPolicy,
"tfReplicaSpecs": {
},
},
}
if args.ttlSecondsAfterFinished >=0:
inst["spec"]["ttlSecondsAfterFinished"] = args.ttlSecondsAfterFinished
if args.backoffLimit >= 0:
inst["spec"]["backoffLimit"] = args.backoffLimit
if args.activeDeadlineSeconds >=0:
inst["spec"]["activeDeadlineSecond"] = args.activeDeadlineSeconds
if args.psSpec:
inst["spec"]["tfReplicaSpecs"]["PS"] = args.psSpec
if args.chiefSpec:
inst["spec"]["tfReplicaSpecs"]["Chief"] = args.chiefSpec
if args.workerSpec:
inst["spec"]["tfReplicaSpecs"]["Worker"] = args.workerSpec
if args.evaluatorSpec:
inst["spec"]["tfReplicaSpecs"]["Evaluator"] = args.evaluatorSpec
create_response = tfjob.create(inst)
expected_conditions = ["Succeeded", "Failed"]
tfjob.wait_for_condition(
args.namespace, args.name, expected_conditions,
timeout=datetime.timedelta(minutes=args.tfjobTimeoutMinutes))
if args.deleteAfterDone:
tfjob.delete(args.name, args.namespace)
if __name__== "__main__":
main()
| 8,049 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/kfserving/component.yaml | name: Kubeflow - Serve Model using KFServing
description: Serve Models using Kubeflow KFServing
inputs:
- {name: Action, type: String, default: 'create', description: 'Action to execute on KFServing'}
- {name: Model Name, type: String, default: '', description: 'Name to give to the deployed model'}
- {name: Default Model URI, type: String, default: '', description: 'Path of the S3 or GCS compatible directory containing default model.'}
- {name: Canary Model URI, type: String, default: '', description: 'Optional Path of the S3 or GCS compatible directory containing canary model.'}
- {name: Canary Model Traffic Percentage, type: String, default: '0', description: 'Optional Traffic to be sent to default model'}
- {name: Namespace, type: String, default: 'kubeflow', description: 'Kubernetes namespace where the KFServing service is deployed.'}
- {name: Framework, type: String, default: 'tensorflow', description: 'Machine Learning Framework for Model Serving.'}
- {name: Default Custom Model Spec, type: String, default: '{}', description: 'Custom runtime default custom model container spec.'}
- {name: Canary Custom Model Spec, type: String, default: '{}', description: 'Custom runtime canary custom model container spec.'}
- {name: Autoscaling Target, type: String, default: '0', description: 'Autoscaling Target Number'}
- {name: KFServing Endpoint, type: String, default: '', description: 'KFServing remote deployer API endpoint'}
outputs:
- {name: Service Endpoint URI, type: String, description: 'URI of the deployed prediction service..'}
implementation:
container:
image: aipipeline/kfserving-component:v0.2.1
command: ['python']
args: [
-u, kfservingdeployer.py,
--action, {inputValue: Action},
--model-name, {inputValue: Model Name},
--default-model-uri, {inputValue: Default Model URI},
--canary-model-uri, {inputValue: Canary Model URI},
--canary-model-traffic, {inputValue: Canary Model Traffic Percentage},
--namespace, {inputValue: Namespace},
--framework, {inputValue: Framework},
--default-custom-model-spec,{inputValue: Default Custom Model Spec},
--canary-custom-model-spec, {inputValue: Canary Custom Model Spec},
--kfserving-endpoint, {inputValue: KFServing Endpoint},
--autoscaling-target, {inputValue: Autoscaling Target},
--output_path, {outputPath: Service Endpoint URI}
]
| 8,050 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/kfserving/Dockerfile | FROM python:3.6-slim
RUN pip3 install kubernetes==10.0.1 kfserving==0.2.1 requests==2.22.0 Flask==1.1.1 flask-cors==3.0.8
ENV APP_HOME /app
COPY src $APP_HOME
WORKDIR $APP_HOME
ENTRYPOINT ["python"]
CMD ["kfservingdeployer.py"]
| 8,051 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/kfserving | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/kfserving/src/app.py | # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, request, abort
from flask_cors import CORS
import json
import os
from kfservingdeployer import deploy_model
app = Flask(__name__)
CORS(app)
@app.route('/deploy-model', methods=['POST'])
def deploy_model_post():
if not request.json:
abort(400)
return json.dumps(deploy_model(
action=request.json['action'],
model_name=request.json['model_name'],
default_model_uri=request.json['default_model_uri'],
canary_model_uri=request.json['canary_model_uri'],
canary_model_traffic=request.json['canary_model_traffic'],
namespace=request.json['namespace'],
framework=request.json['framework'],
default_custom_model_spec=request.json['default_custom_model_spec'],
canary_custom_model_spec=request.json['canary_custom_model_spec'],
autoscaling_target=request.json['autoscaling_target']
))
@app.route('/', methods=['GET'])
def root_get():
return 200
@app.route('/', methods=['OPTIONS'])
def root_options():
return "200"
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
| 8,052 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/kfserving | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/kfserving/src/kfservingdeployer.py | # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import argparse
import os
import requests
import re
from kubernetes import client
from kfserving import KFServingClient
from kfserving import constants
from kfserving import V1alpha2EndpointSpec
from kfserving import V1alpha2PredictorSpec
from kfserving import V1alpha2TensorflowSpec
from kfserving import V1alpha2PyTorchSpec
from kfserving import V1alpha2SKLearnSpec
from kfserving import V1alpha2XGBoostSpec
from kfserving.models.v1alpha2_onnx_spec import V1alpha2ONNXSpec
from kfserving import V1alpha2TensorRTSpec
from kfserving import V1alpha2CustomSpec
from kfserving import V1alpha2InferenceServiceSpec
from kfserving import V1alpha2InferenceService
def EndpointSpec(framework, storage_uri):
if framework == 'tensorflow':
return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(tensorflow=V1alpha2TensorflowSpec(storage_uri=storage_uri)))
elif framework == 'pytorch':
return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(pytorch=V1alpha2PyTorchSpec(storage_uri=storage_uri)))
elif framework == 'sklearn':
return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(sklearn=V1alpha2SKLearnSpec(storage_uri=storage_uri)))
elif framework == 'xgboost':
return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(xgboost=V1alpha2XGBoostSpec(storage_uri=storage_uri)))
elif framework == 'onnx':
return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(onnx=V1alpha2ONNXSpec(storage_uri=storage_uri)))
elif framework == 'tensorrt':
return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(tensorrt=V1alpha2TensorRTSpec(storage_uri=storage_uri)))
else:
raise("Error: No matching framework: " + framework)
def customEndpointSpec(custom_model_spec):
env = [client.V1EnvVar(name=i['name'], value=i['value']) for i in custom_model_spec['env']] if custom_model_spec.get('env', '') else None
ports = [client.V1ContainerPort(container_port=int(custom_model_spec.get('port', '')))] if custom_model_spec.get('port', '') else None
containerSpec = client.V1Container(
name=custom_model_spec.get('name', 'custom-container'),
image=custom_model_spec['image'],
env=env,
ports=ports,
command=custom_model_spec.get('command', None),
args=custom_model_spec.get('args', None),
image_pull_policy=custom_model_spec.get('image_pull_policy', None),
working_dir=custom_model_spec.get('working_dir', None)
)
return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(custom=V1alpha2CustomSpec(container=containerSpec)))
def InferenceService(metadata, default_model_spec, canary_model_spec=None, canary_model_traffic=None):
return V1alpha2InferenceService(api_version=constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION,
kind=constants.KFSERVING_KIND,
metadata=metadata,
spec=V1alpha2InferenceServiceSpec(default=default_model_spec,
canary=canary_model_spec,
canary_traffic_percent=canary_model_traffic))
def deploy_model(action, model_name, default_model_uri, canary_model_uri, canary_model_traffic, namespace, framework, default_custom_model_spec, canary_custom_model_spec, autoscaling_target=0):
if int(autoscaling_target) != 0:
annotations = {"autoscaling.knative.dev/target": str(autoscaling_target)}
else:
annotations = None
metadata = client.V1ObjectMeta(name=model_name, namespace=namespace, annotations=annotations)
# Create Default deployment if default model uri is provided.
if framework != 'custom' and default_model_uri:
default_model_spec = EndpointSpec(framework, default_model_uri)
elif framework == 'custom' and default_custom_model_spec:
default_model_spec = customEndpointSpec(default_custom_model_spec)
# Create Canary deployment if canary model uri is provided.
if framework != 'custom' and canary_model_uri:
canary_model_spec = EndpointSpec(framework, canary_model_uri)
kfsvc = InferenceService(metadata, default_model_spec, canary_model_spec, canary_model_traffic)
elif framework == 'custom' and canary_custom_model_spec:
canary_model_spec = customEndpointSpec(canary_custom_model_spec)
kfsvc = InferenceService(metadata, default_model_spec, canary_model_spec, canary_model_traffic)
else:
kfsvc = InferenceService(metadata, default_model_spec)
KFServing = KFServingClient()
if action == 'create':
KFServing.create(kfsvc, watch=True, timeout_seconds=120)
elif action == 'update':
KFServing.patch(model_name, kfsvc)
elif action == 'rollout':
KFServing.rollout_canary(model_name, canary=canary_model_spec, percent=canary_model_traffic, namespace=namespace, watch=True, timeout_seconds=120)
elif action == 'promote':
KFServing.promote(model_name, namespace=namespace, watch=True, timeout_seconds=120)
elif action == 'delete':
KFServing.delete(model_name, namespace=namespace)
else:
raise("Error: No matching action: " + action)
model_status = KFServing.get(model_name, namespace=namespace)
return model_status
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--action', type=str, help='Action to execute on KFServing', default='create')
parser.add_argument('--model-name', type=str, help='Name to give to the deployed model', default="")
parser.add_argument('--default-model-uri', type=str, help='Path of the S3, GCS or PVC directory containing default model.')
parser.add_argument('--canary-model-uri', type=str, help='Optional path of the S3, GCS or PVC directory containing canary model.', default="")
parser.add_argument('--canary-model-traffic', type=str, help='Optional Traffic to be sent to the default model', default='0')
parser.add_argument('--namespace', type=str, help='Kubernetes namespace where the KFServing service is deployed.', default='kubeflow')
parser.add_argument('--framework', type=str, help='Model Serving Framework', default='tensorflow')
parser.add_argument('--default-custom-model-spec', type=json.loads, help='Custom runtime default custom model container spec', default={})
parser.add_argument('--canary-custom-model-spec', type=json.loads, help='Custom runtime canary custom model container spec', default={})
parser.add_argument('--kfserving-endpoint', type=str, help='kfserving remote deployer api endpoint', default='')
parser.add_argument('--autoscaling-target', type=str, help='Autoscaling target number', default='0')
parser.add_argument('--output_path', type=str, help='Path to store URI output')
args = parser.parse_args()
url = re.compile(r"https?://")
action = args.action.lower()
model_name = args.model_name
default_model_uri = args.default_model_uri
canary_model_uri = args.canary_model_uri
canary_model_traffic = int(args.canary_model_traffic)
namespace = args.namespace
framework = args.framework.lower()
output_path = args.output_path
default_custom_model_spec = args.default_custom_model_spec
canary_custom_model_spec = args.canary_custom_model_spec
kfserving_endpoint = url.sub('', args.kfserving_endpoint)
autoscaling_target = int(args.autoscaling_target)
if kfserving_endpoint:
formData = {
"action": action,
"model_name": model_name,
"default_model_uri": default_model_uri,
"canary_model_uri": canary_model_uri,
"canary_model_traffic": canary_model_traffic,
"namespace": namespace,
"framework": framework,
"default_custom_model_spec": default_custom_model_spec,
"canary_custom_model_spec": canary_custom_model_spec,
"autoscaling_target": autoscaling_target
}
response = requests.post("http://" + kfserving_endpoint + "/deploy-model", json=formData)
model_status = response.json()
else:
model_status = deploy_model(
action=action,
model_name=model_name,
default_model_uri=default_model_uri,
canary_model_uri=canary_model_uri,
canary_model_traffic=canary_model_traffic,
namespace=namespace,
framework=framework,
default_custom_model_spec=default_custom_model_spec,
canary_custom_model_spec=canary_custom_model_spec,
autoscaling_target=autoscaling_target
)
print(model_status)
try:
print(model_status['status']['url'] + ' is the knative domain header. $ISTIO_INGRESS_ENDPOINT are defined in the below commands')
print('Sample test commands: ')
print('# Note: If Istio Ingress gateway is not served with LoadBalancer, use $CLUSTER_NODE_IP:31380 as the ISTIO_INGRESS_ENDPOINT')
print('ISTIO_INGRESS_ENDPOINT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath=\'{.status.loadBalancer.ingress[0].ip}\')')
# model_status['status']['url'] is like http://flowers-sample.kubeflow.example.com/v1/models/flowers-sample
host, path = url.sub('', model_status['status']['url']).split("/", 1)
print('curl -X GET -H "Host: ' + host + '" http://$ISTIO_INGRESS_ENDPOINT/' + path)
except:
print('Model is not ready, check the logs for the Knative URL status.')
if not os.path.exists(os.path.dirname(output_path)):
os.makedirs(os.path.dirname(output_path))
with open(output_path, "w") as report:
report.write(json.dumps(model_status))
| 8,053 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/katib-launcher/build_image.sh | #!/bin/bash -e
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
while getopts ":hp:t:i:" opt; do
case "${opt}" in
h) echo "-p: project name"
echo "-t: tag name"
echo "-i: image name. If provided, project name and tag name are not necessary"
exit
;;
p) PROJECT_ID=${OPTARG}
;;
t) TAG_NAME=${OPTARG}
;;
i) LAUNCHER_IMAGE_NAME=${OPTARG}
;;
\? ) echo "Usage: cmd [-p] project [-t] tag [-i] image"
exit
;;
esac
done
mkdir -p ./build
rsync -arvp ./src/ ./build/
rsync -arvp ../common/ ./build/
cp ../../license.sh ./build
cp ../../third_party_licenses.csv ./build
LOCAL_LAUNCHER_IMAGE_NAME=ml-pipeline-kubeflow-experiment
docker build -t ${LOCAL_LAUNCHER_IMAGE_NAME} .
if [ -z "${TAG_NAME}" ]; then
TAG_NAME=$(date +v%Y%m%d)-$(git describe --tags --always --dirty)-$(git diff | shasum -a256 | cut -c -6)
fi
if [ -z "${LAUNCHER_IMAGE_NAME}" ]; then
if [ -z "${PROJECT_ID}" ]; then
PROJECT_ID=$(gcloud config config-helper --format "value(configuration.properties.core.project)")
fi
docker tag ${LOCAL_LAUNCHER_IMAGE_NAME} gcr.io/${PROJECT_ID}/${LOCAL_LAUNCHER_IMAGE_NAME}:${TAG_NAME}
docker push gcr.io/${PROJECT_ID}/${LOCAL_LAUNCHER_IMAGE_NAME}:${TAG_NAME}
else
docker tag ${LOCAL_LAUNCHER_IMAGE_NAME} ${LAUNCHER_IMAGE_NAME}:${TAG_NAME}
docker push ${LAUNCHER_IMAGE_NAME}:${TAG_NAME}
fi
rm -rf ./build
| 8,054 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/katib-launcher/sample.py | import json
import kfp.dsl as dsl
@dsl.pipeline(
name="Launch katib experiment",
description="An example to launch katib experiment."
)
def mnist_hpo(
name="mnist",
namespace="kubeflow",
goal=0.99,
parallelTrialCount=3,
maxTrialCount=12,
experimentTimeoutMinutes=60,
deleteAfterDone=True):
objectiveConfig = {
"type": "maximize",
"goal": goal,
"objectiveMetricName": "Validation-accuracy",
"additionalMetricNames": ["accuracy"]
}
algorithmConfig = {"algorithmName" : "random"}
parameters = [
{"name": "--lr", "parameterType": "double", "feasibleSpace": {"min": "0.01","max": "0.03"}},
{"name": "--num-layers", "parameterType": "int", "feasibleSpace": {"min": "2", "max": "5"}},
{"name": "--optimizer", "parameterType": "categorical", "feasibleSpace": {"list": ["sgd", "adam", "ftrl"]}}
]
rawTemplate = {
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {
"name": "{{.Trial}}",
"namespace": "{{.NameSpace}}"
},
"spec": {
"template": {
"spec": {
"restartPolicy": "Never",
"containers": [
{"name": "{{.Trial}}",
"image": "docker.io/katib/mxnet-mnist-example",
"command": [
"python /mxnet/example/image-classification/train_mnist.py --batch-size=64 {{- with .HyperParameters}} {{- range .}} {{.Name}}={{.Value}} {{- end}} {{- end}}"
]
}
]
}
}
}
}
trialTemplate = {
"goTemplate": {
"rawTemplate": json.dumps(rawTemplate)
}
}
op1 = katib_experiment_launcher_op(
name,
namespace,
parallelTrialCount=parallelTrialCount,
maxTrialCount=maxTrialCount,
objectiveConfig=str(objectiveConfig),
algorithmConfig=str(algorithmConfig),
trialTemplate=str(trialTemplate),
parameters=str(parameters),
experimentTimeoutMinutes=experimentTimeoutMinutes,
deleteAfterDone=deleteAfterDone
)
op_out = dsl.ContainerOp(
name="my-out-cop",
image="library/bash:4.4.23",
command=["sh", "-c"],
arguments=["echo hyperparameter: %s" % op1.output],
)
def katib_experiment_launcher_op(
name,
namespace,
maxTrialCount=100,
parallelTrialCount=3,
maxFailedTrialCount=3,
objectiveConfig='{}',
algorithmConfig='{}',
metricsCollector='{}',
trialTemplate='{}',
parameters='[]',
experimentTimeoutMinutes=60,
deleteAfterDone=True,
outputFile='/output.txt'):
return dsl.ContainerOp(
name = "mnist-hpo",
image = 'liuhougangxa/katib-experiment-launcher:latest',
arguments = [
'--name', name,
'--namespace', namespace,
'--maxTrialCount', maxTrialCount,
'--maxFailedTrialCount', maxFailedTrialCount,
'--parallelTrialCount', parallelTrialCount,
'--objectiveConfig', objectiveConfig,
'--algorithmConfig', algorithmConfig,
'--metricsCollector', metricsCollector,
'--trialTemplate', trialTemplate,
'--parameters', parameters,
'--outputFile', outputFile,
'--deleteAfterDone', deleteAfterDone,
'--experimentTimeoutMinutes', experimentTimeoutMinutes,
],
file_outputs = {'bestHyperParameter': outputFile}
)
if __name__ == "__main__":
import kfp.compiler as compiler
compiler.Compiler().compile(mnist_hpo, __file__ + ".tar.gz")
| 8,055 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/katib-launcher/component.yaml | name: Kubeflow - Launch Experiment
description: Kubeflow Experiment launcher
inputs:
- {name: Experiment Name, type: String, description: 'Experiment name.'}
- {name: Experiment Namespace, type: String, default: kubeflow, description: 'Experiment namespace.'}
- {name: Experiment Version, type: String, default: v1alpha3, description: 'Experiment version.'}
- {name: Max Trial Count, type: Integer, description: 'How many trials can be executed at most.'}
- {name: Max Failed Trial Count, type: Integer, default: 3, description: 'How many trials can fail at most.'}
- {name: Parallel Trial Count, type: Integer, default: 3, description: 'How many trials can be running in parallel at most.'}
- {name: Objective, type: JSON, description: 'Experiment objective.'}
- {name: Algorithm, type: JSON, description: 'Experiment algorithm.'}
- {name: Trial Template, type: JSON, description: 'Experiment trialTemplate.'}
- {name: Parameters, type: JSON, description: 'Experiment Parameter configuration.'}
- {name: Metrics Collector, type: JSON, default: '{}', description: 'Experiment metricsCollector.'}
- {name: Experiment Timeout Minutes, type: Integer, default: 1440, description: 'Time in minutes to wait for the Experiment to complete.'}
- {name: Delete Finished Experiment, type: Bool, default: 'True', description: 'Whether to delete the experiment after it is finished.'}
outputs:
- {name: Best Parameter Set, type: JSON, description: 'The parameter set of the best Experiment trial.'}
implementation:
container:
image: liuhougangxa/katib-experiment-launcher:latest
command: [python, /ml/launch_experiment.py]
args: [
--name, {inputValue: Experiment Name},
--namespace, {inputValue: Experiment Namespace},
--version, {inputValue: Experiment Version},
--maxTrialCount, {inputValue: Max Trial Count},
--maxFailedTrialCount, {inputValue: Max Failed Trial Count},
--parallelTrialCount, {inputValue: Parallel Trial Count},
--objectiveConfig, {inputValue: Objective},
--algorithmConfig, {inputValue: Algorithm},
--trialTemplate, {inputValue: Trial Template},
--parameters, {inputValue: Parameters},
--metricsCollector, {inputValue: Metrics Collector},
--experimentTimeoutMinutes, {inputValue: Experiment Timeout Minutes},
--deleteAfterDone, {inputValue: Delete Finished Experiment},
--outputFile, {outputPath: Best Parameter Set},
]
| 8,056 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/katib-launcher/Dockerfile | # Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:16.04
RUN apt-get update -y && \
apt-get install --no-install-recommends -y -q ca-certificates python-dev python-setuptools wget && \
easy_install pip && \
pip install pyyaml==3.12 kubernetes
ADD build /ml
RUN mkdir /usr/licenses && \
/ml/license.sh /ml/third_party_licenses.csv /usr/licenses
ENTRYPOINT ["python", "/ml/launch_experiment.py"]
| 8,057 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/katib-launcher/sample2.py | import json
from kfp import components
import kfp.dsl as dsl
@dsl.pipeline(
name="Launch katib experiment",
description="An example to launch katib experiment."
)
def mnist_hpo(
name="mnist",
namespace="kubeflow",
goal=0.99,
parallelTrialCount=3,
maxTrialCount=12,
experimentTimeoutMinutes=60,
deleteAfterDone=True):
objectiveConfig = {
"type": "maximize",
"goal": goal,
"objectiveMetricName": "Validation-accuracy",
"additionalMetricNames": ["accuracy"]
}
algorithmConfig = {"algorithmName" : "random"}
parameters = [
{"name": "--lr", "parameterType": "double", "feasibleSpace": {"min": "0.01","max": "0.03"}},
{"name": "--num-layers", "parameterType": "int", "feasibleSpace": {"min": "2", "max": "5"}},
{"name": "--optimizer", "parameterType": "categorical", "feasibleSpace": {"list": ["sgd", "adam", "ftrl"]}}
]
rawTemplate = {
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {
"name": "{{.Trial}}",
"namespace": "{{.NameSpace}}"
},
"spec": {
"template": {
"spec": {
"restartPolicy": "Never",
"containers": [
{"name": "{{.Trial}}",
"image": "docker.io/katib/mxnet-mnist-example",
"command": [
"python /mxnet/example/image-classification/train_mnist.py --batch-size=64 {{- with .HyperParameters}} {{- range .}} {{.Name}}={{.Value}} {{- end}} {{- end}}"
]
}
]
}
}
}
}
trialTemplate = {
"goTemplate": {
"rawTemplate": json.dumps(rawTemplate)
}
}
katib_experiment_launcher_op = components.load_component_from_file("./component.yaml")
# katib_experiment_launcher_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/kubeflow/katib-launcher/component.yaml')
op1 = katib_experiment_launcher_op(
experiment_name=name,
experiment_namespace=namespace,
parallel_trial_count=parallelTrialCount,
max_trial_count=maxTrialCount,
objective=str(objectiveConfig),
algorithm=str(algorithmConfig),
trial_template=str(trialTemplate),
parameters=str(parameters),
experiment_timeout_minutes=experimentTimeoutMinutes,
delete_finished_experiment=deleteAfterDone)
op_out = dsl.ContainerOp(
name="my-out-cop",
image="library/bash:4.4.23",
command=["sh", "-c"],
arguments=["echo hyperparameter: %s" % op1.output],
)
if __name__ == "__main__":
import kfp.compiler as compiler
compiler.Compiler().compile(mnist_hpo, __file__ + ".tar.gz")
| 8,058 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/katib-launcher | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/katib-launcher/src/__init__.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 8,059 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/katib-launcher | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/katib-launcher/src/launch_experiment.py | # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
from distutils.util import strtobool
import json
import os
import logging
import yaml
import uuid
import launch_crd
from kubernetes import client as k8s_client
from kubernetes import config
def yamlOrJsonStr(str):
if str == "" or str == None:
return None
try:
return json.loads(str)
except:
return yaml.safe_load(str)
ExperimentGroup = "kubeflow.org"
ExperimentPlural = "experiments"
class Experiment(launch_crd.K8sCR):
def __init__(self, version="v1alpha3", client=None):
super(Experiment, self).__init__(ExperimentGroup, ExperimentPlural, version, client)
def is_expected_conditions(self, inst, expected_conditions):
conditions = inst.get('status', {}).get("conditions")
if not conditions:
return False, ""
if conditions[-1]["type"] in expected_conditions:
return True, conditions[-1]["type"]
else:
return False, conditions[-1]["type"]
def main(argv=None):
parser = argparse.ArgumentParser(description='Kubeflow Experiment launcher')
parser.add_argument('--name', type=str,
help='Experiment name.')
parser.add_argument('--namespace', type=str,
default='kubeflow',
help='Experiment namespace.')
parser.add_argument('--version', type=str,
default='v1alpha3',
help='Experiment version.')
parser.add_argument('--maxTrialCount', type=int,
help='How many trial will be created for the experiment at most.')
parser.add_argument('--maxFailedTrialCount', type=int,
help='Stop the experiment when $maxFailedTrialCount trials failed.')
parser.add_argument('--parallelTrialCount', type=int,
default=3,
help='How many trials can be running at most.')
parser.add_argument('--objectiveConfig', type=yamlOrJsonStr,
default={},
help='Experiment objective.')
parser.add_argument('--algorithmConfig', type=yamlOrJsonStr,
default={},
help='Experiment algorithm.')
parser.add_argument('--trialTemplate', type=yamlOrJsonStr,
default={},
help='Experiment trialTemplate.')
parser.add_argument('--parameters', type=yamlOrJsonStr,
default=[],
help='Experiment parameters.')
parser.add_argument('--metricsCollector', type=yamlOrJsonStr,
default={},
help='Experiment metricsCollectorSpec.')
parser.add_argument('--outputFile', type=str,
default='/output.txt',
help='The file which stores the best trial of the experiment.')
parser.add_argument('--deleteAfterDone', type=strtobool,
default=True,
help='When experiment done, delete the experiment automatically if it is True.')
parser.add_argument('--experimentTimeoutMinutes', type=int,
default=60*24,
help='Time in minutes to wait for the Experiment to reach end')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
logging.info('Generating experiment template.')
config.load_incluster_config()
api_client = k8s_client.ApiClient()
experiment = Experiment(version=args.version, client=api_client)
exp_name = (args.name+'-'+uuid.uuid4().hex)[0:63]
inst = {
"apiVersion": "%s/%s" % (ExperimentGroup, args.version),
"kind": "Experiment",
"metadata": {
"name": exp_name,
"namespace": args.namespace,
},
"spec": {
"algorithm": args.algorithmConfig,
"maxFailedTrialCount": args.maxFailedTrialCount,
"maxTrialCount": args.maxTrialCount,
"metricsCollectorSpec": args.metricsCollector,
"objective": args.objectiveConfig,
"parallelTrialCount": args.parallelTrialCount,
"parameters": args.parameters,
"trialTemplate": args.trialTemplate,
},
}
create_response = experiment.create(inst)
expected_conditions = ["Succeeded", "Failed"]
current_inst = experiment.wait_for_condition(
args.namespace, exp_name, expected_conditions,
timeout=datetime.timedelta(minutes=args.experimentTimeoutMinutes))
expected, conditon = experiment.is_expected_conditions(current_inst, ["Succeeded"])
if expected:
paramAssignments = current_inst["status"]["currentOptimalTrial"]["parameterAssignments"]
if not os.path.exists(os.path.dirname(args.outputFile)):
os.makedirs(os.path.dirname(args.outputFile))
with open(args.outputFile, 'w') as f:
f.write(json.dumps(paramAssignments))
if args.deleteAfterDone:
experiment.delete(exp_name, args.namespace)
if __name__== "__main__":
main()
| 8,060 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/dnntrainer/build_image.sh | #!/bin/bash -e
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
while getopts ":hp:t:i:b:l:" opt; do
case "${opt}" in
h) echo "-p: project name"
echo "-t: tag name"
echo "-i: image name. If provided, project name and tag name are not necessary"
echo "-b: tensorflow base image tag. Optional. The value can be tags listed under \
https://hub.docker.com/r/tensorflow/tensorflow/tags. Defaults to '1.6.0'."
echo "-l: local image name. Optional. Defaults to 'ml-pipeline-kubeflow-tf-trainer'"
exit
;;
p) PROJECT_ID=${OPTARG}
;;
t) TAG_NAME=${OPTARG}
;;
i) IMAGE_NAME=${OPTARG}
;;
b) TF_BASE_TAG=${OPTARG}
;;
l) LOCAL_IMAGE_NAME=${OPTARG}
;;
\? ) echo "Usage: cmd [-p] project [-t] tag [-i] image [-b] base image tag [l] local image"
exit
;;
esac
done
set -x
if [ -z "${LOCAL_IMAGE_NAME}" ]; then
LOCAL_IMAGE_NAME=ml-pipeline-kubeflow-tf-trainer
fi
if [ -z "${PROJECT_ID}" ]; then
PROJECT_ID=$(gcloud config config-helper --format "value(configuration.properties.core.project)")
fi
if [ -z "${TAG_NAME}" ]; then
TAG_NAME=$(date +v%Y%m%d)-$(git describe --tags --always --dirty)-$(git diff | shasum -a256 | cut -c -6)
fi
if [ -z "${TF_BASE_TAG}" ]; then
TF_BASE_TAG=1.6.0
fi
mkdir -p ./build
rsync -arvp ./src/ ./build/
cp ../../license.sh ./build
cp ../../third_party_licenses.csv ./build
docker build --build-arg TF_TAG=${TF_BASE_TAG} -t ${LOCAL_IMAGE_NAME} .
if [ -z "${IMAGE_NAME}" ]; then
docker tag ${LOCAL_IMAGE_NAME} gcr.io/${PROJECT_ID}/${LOCAL_IMAGE_NAME}:${TAG_NAME}
docker push gcr.io/${PROJECT_ID}/${LOCAL_IMAGE_NAME}:${TAG_NAME}
else
docker tag ${LOCAL_IMAGE_NAME} "${IMAGE_NAME}"
docker push "${IMAGE_NAME}"
fi
rm -rf ./build | 8,061 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/dnntrainer/component.yaml | name: Train FC DNN using TF
description: Trains fully-connected neural network using Tensorflow
inputs:
- {name: Transformed data dir, type: GCSPath, description: 'GCS path containing tf-transformed training and eval data.'} # type: {GCSPath: {path_type: Directory}}
- {name: Schema, type: GCSPath, description: 'GCS json schema file path.'} # type: {GCSPath: {data_type: JSON}}
- {name: Learning rate, type: Float, default: '0.1', description: 'Learning rate for training.'}
- {name: Optimizer, type: String, default: 'Adagrad', description: 'Optimizer for training. Valid values are: Adam, SGD, Adagrad. If not provided, tf.estimator default will be used.'}
- {name: Hidden layer size, type: String, default: '100', description: 'Comma-separated hidden layer sizes. For example "200,100,50".'}
- {name: Steps, type: Integer, description: 'Maximum number of training steps to perform. If unspecified, will honor epochs.'}
#- {name: Epochs, type: Integer, default: '', description: 'Maximum number of training data epochs on which to train. If both "steps" and "epochs" are specified, the training job will run for "steps" or "epochs", whichever occurs first.'}
- {name: Target, type: String, description: 'Name of the column for prediction target.'}
- {name: Preprocessing module, type: GCSPath, default: '', description: 'GCS path to a python file defining "preprocess" and "get_feature_columns" functions.'} # type: {GCSPath: {data_type: Python}}
- {name: Training output dir, type: GCSPath, description: 'GCS or local directory.'} # type: {GCSPath: {path_type: Directory}}
outputs:
- {name: Training output dir, type: GCSPath, description: 'GCS or local directory.'} # type: {GCSPath: {path_type: Directory}}
- {name: MLPipeline UI metadata, type: UI metadata}
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:ad9bd5648dd0453005225779f25d8cebebc7ca00
command: [python2, -m, trainer.task]
args: [
--transformed-data-dir, {inputValue: Transformed data dir},
--schema, {inputValue: Schema},
--learning-rate, {inputValue: Learning rate},
--optimizer, {inputValue: Optimizer},
--hidden-layer-size, {inputValue: Hidden layer size},
--steps, {inputValue: Steps},
# --epochs, {inputValue: Epochs},
--target, {inputValue: Target},
--preprocessing-module, {inputValue: Preprocessing module},
--job-dir, {inputValue: Training output dir},
]
fileOutputs:
Training output dir: /output.txt
MLPipeline UI metadata: /mlpipeline-ui-metadata.json
| 8,062 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/dnntrainer/Dockerfile | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ARG TF_TAG
FROM tensorflow/tensorflow:$TF_TAG
RUN apt-get update -y
RUN apt-get install --no-install-recommends -y -q ca-certificates python-dev python-setuptools \
wget unzip git
RUN apt-get install --no-install-recommends -y -q build-essential && \
pip install pyyaml==3.12 six==1.11.0 \
tensorflow-transform==0.6.0 \
tensorflow-model-analysis==0.6.0 && \
apt-get --purge autoremove -y build-essential
ADD build /ml
RUN mkdir /usr/licenses && \
/ml/license.sh /ml/third_party_licenses.csv /usr/licenses
WORKDIR /ml
ENTRYPOINT ["python", "-m", "trainer.task"]
| 8,063 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/dnntrainer | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/dnntrainer/src/__init__.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 8,064 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/dnntrainer | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/dnntrainer/src/setup.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
setup(
name='trainer',
version='1.0.0',
packages=find_packages(),
description='DNN Trainer',
author='Google',
keywords=[
],
license="Apache Software License",
long_description="""
""",
install_requires=[
'tensorflow==1.15.2',
],
package_data={
},
data_files=[],
)
| 8,065 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/dnntrainer/src | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/dnntrainer/src/trainer/__init__.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 8,066 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/dnntrainer/src | kubeflow_public_repos/kfp-tekton-backend/components/kubeflow/dnntrainer/src/trainer/task.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import tensorflow as tf
import tensorflow_transform as tft
import tensorflow_model_analysis as tfma
from tensorflow.python.lib.io import file_io
from tensorflow_transform.beam.tft_beam_io import transform_fn_io
from tensorflow_transform.saved import input_fn_maker
from tensorflow_transform.saved import saved_transform_io
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.tf_metadata import metadata_io
IMAGE_EMBEDDING_SIZE = 2048
CLASSIFICATION_TARGET_TYPES = [tf.bool, tf.int32, tf.int64]
REGRESSION_TARGET_TYPES = [tf.float32, tf.float64]
TARGET_TYPES = CLASSIFICATION_TARGET_TYPES + REGRESSION_TARGET_TYPES
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--job-dir',
type=str,
required=True,
help='GCS or local directory.')
parser.add_argument('--transformed-data-dir',
type=str,
required=True,
help='GCS path containing tf-transformed training and eval data.')
parser.add_argument('--schema',
type=str,
required=True,
help='GCS json schema file path.')
parser.add_argument('--target',
type=str,
required=True,
help='The name of the column to predict in training data.')
parser.add_argument('--learning-rate',
type=float,
default=0.1,
help='Learning rate for training.')
parser.add_argument('--optimizer',
choices=['Adam', 'SGD', 'Adagrad'],
default='Adagrad',
help='Optimizer for training. If not provided, '
'tf.estimator default will be used.')
parser.add_argument('--hidden-layer-size',
type=str,
default='100',
help='comma separated hidden layer sizes. For example "200,100,50".')
parser.add_argument('--steps',
type=int,
help='Maximum number of training steps to perform. If unspecified, will '
'honor epochs.')
parser.add_argument('--epochs',
type=int,
help='Maximum number of training data epochs on which to train. If '
'both "steps" and "epochs" are specified, the training '
'job will run for "steps" or "epochs", whichever occurs first.')
parser.add_argument('--preprocessing-module',
type=str,
required=False,
help=('GCS path to a python file defining '
'"preprocess" and "get_feature_columns" functions.'))
args = parser.parse_args()
args.hidden_layer_size = [int(x.strip()) for x in args.hidden_layer_size.split(',')]
return args
def is_classification(transformed_data_dir, target):
"""Whether the scenario is classification (vs regression).
Returns:
The number of classes if the target represents a classification
problem, or None if it does not.
"""
transformed_metadata = metadata_io.read_metadata(
os.path.join(transformed_data_dir, transform_fn_io.TRANSFORMED_METADATA_DIR))
transformed_feature_spec = transformed_metadata.schema.as_feature_spec()
if target not in transformed_feature_spec:
raise ValueError('Cannot find target "%s" in transformed data.' % target)
feature = transformed_feature_spec[target]
if (not isinstance(feature, tf.FixedLenFeature) or feature.shape != [] or
feature.dtype not in TARGET_TYPES):
raise ValueError('target "%s" is of invalid type.' % target)
if feature.dtype in CLASSIFICATION_TARGET_TYPES:
if feature.dtype == tf.bool:
return 2
return get_vocab_size(transformed_data_dir, target)
return None
def make_tft_input_metadata(schema):
"""Create tf-transform metadata from given schema."""
tft_schema = {}
for col_schema in schema:
col_type = col_schema['type']
col_name = col_schema['name']
if col_type == 'NUMBER':
tft_schema[col_name] = dataset_schema.ColumnSchema(
tf.float32, [], dataset_schema.FixedColumnRepresentation(default_value=0.0))
elif col_type in ['CATEGORY', 'TEXT', 'IMAGE_URL', 'KEY']:
tft_schema[col_name] = dataset_schema.ColumnSchema(
tf.string, [], dataset_schema.FixedColumnRepresentation(default_value=''))
return dataset_metadata.DatasetMetadata(dataset_schema.Schema(tft_schema))
def make_training_input_fn(transformed_data_dir, mode, batch_size, target_name, num_epochs=None):
"""Creates an input function reading from transformed data.
Args:
transformed_data_dir: Directory to read transformed data and metadata from.
mode: 'train' or 'eval'.
batch_size: Batch size.
target_name: name of the target column.
num_epochs: number of training data epochs.
Returns:
The input function for training or eval.
"""
transformed_metadata = metadata_io.read_metadata(
os.path.join(transformed_data_dir, transform_fn_io.TRANSFORMED_METADATA_DIR))
transformed_feature_spec = transformed_metadata.schema.as_feature_spec()
def _input_fn():
"""Input function for training and eval."""
epochs = 1 if mode == 'eval' else num_epochs
transformed_features = tf.contrib.learn.io.read_batch_features(
os.path.join(transformed_data_dir, mode + '-*'),
batch_size, transformed_feature_spec, tf.TFRecordReader, num_epochs=epochs)
# Extract features and label from the transformed tensors.
transformed_labels = transformed_features.pop(target_name)
return transformed_features, transformed_labels
return _input_fn
def make_serving_input_fn(transformed_data_dir, schema, target_name):
"""Creates an input function reading from transformed data.
Args:
transformed_data_dir: Directory to read transformed data and metadata from.
schema: the raw data schema.
target_name: name of the target column.
Returns:
The input function for serving.
"""
raw_metadata = make_tft_input_metadata(schema)
raw_feature_spec = raw_metadata.schema.as_feature_spec()
raw_keys = [x['name'] for x in schema]
raw_keys.remove(target_name)
serving_input_fn = input_fn_maker.build_csv_transforming_serving_input_receiver_fn(
raw_metadata=raw_metadata,
transform_savedmodel_dir=transformed_data_dir + '/transform_fn',
raw_keys=raw_keys)
return serving_input_fn
def get_vocab_size(transformed_data_dir, feature_name):
"""Get vocab size of a given text or category column."""
vocab_file = os.path.join(transformed_data_dir,
transform_fn_io.TRANSFORM_FN_DIR,
'assets',
'vocab_' + feature_name)
with file_io.FileIO(vocab_file, 'r') as f:
return sum(1 for _ in f)
def build_feature_columns(schema, transformed_data_dir, target):
"""Build feature columns that tf.estimator expects."""
feature_columns = []
for entry in schema:
name = entry['name']
datatype = entry['type']
if name == target:
continue
if datatype == 'NUMBER':
feature_columns.append(tf.feature_column.numeric_column(name, shape=()))
elif datatype == 'IMAGE_URL':
feature_columns.append(tf.feature_column.numeric_column(name, shape=(2048)))
elif datatype == 'CATEGORY':
vocab_size = get_vocab_size(transformed_data_dir, name)
category_column = tf.feature_column.categorical_column_with_identity(name, num_buckets=vocab_size)
indicator_column = tf.feature_column.indicator_column(category_column)
feature_columns.append(indicator_column)
elif datatype == 'TEXT':
vocab_size = get_vocab_size(transformed_data_dir, name)
indices_column = tf.feature_column.categorical_column_with_identity(name + '_indices', num_buckets=vocab_size + 1)
weighted_column = tf.feature_column.weighted_categorical_column(indices_column, name + '_weights')
indicator_column = tf.feature_column.indicator_column(weighted_column)
feature_columns.append(indicator_column)
return feature_columns
def get_estimator(schema, transformed_data_dir, target_name, output_dir, hidden_units,
optimizer, learning_rate, feature_columns):
"""Get proper tf.estimator (DNNClassifier or DNNRegressor)."""
optimizer = tf.train.AdagradOptimizer(learning_rate)
if optimizer == 'Adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
elif optimizer == 'SGD':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# Set how often to run checkpointing in terms of steps.
config = tf.contrib.learn.RunConfig(save_checkpoints_steps=1000)
n_classes = is_classification(transformed_data_dir, target_name)
if n_classes:
estimator = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
hidden_units=hidden_units,
n_classes=n_classes,
config=config,
model_dir=output_dir)
else:
estimator = tf.estimator.DNNRegressor(
feature_columns=feature_columns,
hidden_units=hidden_units,
config=config,
model_dir=output_dir,
optimizer=optimizer)
return estimator
def eval_input_receiver_fn(tf_transform_dir, schema, target):
"""Build everything needed for the tf-model-analysis to run the model.
Args:
tf_transform_dir: directory in which the tf-transform model was written
during the preprocessing step.
schema: the raw data schema.
target: name of the target column.
Returns:
EvalInputReceiver function, which contains:
- Tensorflow graph which parses raw untranformed features, applies the
tf-transform preprocessing operators.
- Set of raw, untransformed features.
- Label against which predictions will be compared.
"""
raw_metadata = make_tft_input_metadata(schema)
raw_feature_spec = raw_metadata.schema.as_feature_spec()
serialized_tf_example = tf.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
features = tf.parse_example(serialized_tf_example, raw_feature_spec)
_, transformed_features = (
saved_transform_io.partially_apply_saved_transform(
os.path.join(tf_transform_dir, transform_fn_io.TRANSFORM_FN_DIR),
features))
receiver_tensors = {'examples': serialized_tf_example}
return tfma.export.EvalInputReceiver(
features=transformed_features,
receiver_tensors=receiver_tensors,
labels=transformed_features[target])
def main():
# configure the TF_CONFIG such that the tensorflow recoginzes the MASTER in the yaml file as the chief.
# TODO: kubeflow is working on fixing the problem and this TF_CONFIG can be
# removed then.
args = parse_arguments()
tf.logging.set_verbosity(tf.logging.INFO)
schema = json.loads(file_io.read_file_to_string(args.schema))
feature_columns = None
if args.preprocessing_module:
module_dir = os.path.abspath(os.path.dirname(__file__))
preprocessing_module_path = os.path.join(module_dir, 'preprocessing.py')
with open(preprocessing_module_path, 'w+') as preprocessing_file:
preprocessing_file.write(
file_io.read_file_to_string(args.preprocessing_module))
import preprocessing
feature_columns = preprocessing.get_feature_columns(args.transformed_data_dir)
else:
feature_columns = build_feature_columns(schema, args.transformed_data_dir, args.target)
estimator = get_estimator(schema, args.transformed_data_dir, args.target, args.job_dir,
args.hidden_layer_size, args.optimizer, args.learning_rate,
feature_columns)
# TODO: Expose batch size.
train_input_fn = make_training_input_fn(
args.transformed_data_dir,
'train',
32,
args.target,
num_epochs=args.epochs)
eval_input_fn = make_training_input_fn(
args.transformed_data_dir,
'eval',
32,
args.target)
serving_input_fn = make_serving_input_fn(
args.transformed_data_dir,
schema,
args.target)
exporter = tf.estimator.FinalExporter('export', serving_input_fn)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=args.steps)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, exporters=[exporter])
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
eval_model_dir = os.path.join(args.job_dir, 'tfma_eval_model_dir')
tfma.export.export_eval_savedmodel(
estimator=estimator,
export_dir_base=eval_model_dir,
eval_input_receiver_fn=(
lambda: eval_input_receiver_fn(
args.transformed_data_dir, schema, args.target)))
metadata = {
'outputs' : [{
'type': 'tensorboard',
'source': args.job_dir,
}]
}
with open('/mlpipeline-ui-metadata.json', 'w') as f:
json.dump(metadata, f)
with open('/output.txt', 'w') as f:
f.write(args.job_dir)
if __name__ == '__main__':
main()
| 8,067 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/filesystem | kubeflow_public_repos/kfp-tekton-backend/components/filesystem/list_items/component.yaml | name: List items
description: Recursively list directory contents.
inputs:
- {name: Directory, type: Directory}
outputs:
- {name: Items}
implementation:
container:
image: alpine
command:
- sh
- -ex
- -c
- |
mkdir -p "$(dirname "$1")"
#ls --almost-all --recursive "$0" > "$1"
ls -A -R "$0" > "$1"
- inputPath: Directory
- outputPath: Items
| 8,068 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/filesystem | kubeflow_public_repos/kfp-tekton-backend/components/filesystem/get_file/component.yaml | name: Get file
description: Get file from directory.
inputs:
- {name: Directory, type: Directory}
- {name: Subpath, type: String}
outputs:
- {name: File}
implementation:
container:
image: alpine
command:
- sh
- -ex
- -c
- |
mkdir -p "$(dirname "$2")"
cp -r "$0/$1" "$2"
- inputPath: Directory
- inputValue: Subpath
- outputPath: File
| 8,069 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/filesystem | kubeflow_public_repos/kfp-tekton-backend/components/filesystem/get_subdirectory/component.yaml | name: Get subdirectory
description: Get subdirectory from directory.
inputs:
- {name: Directory, type: Directory}
- {name: Subpath, type: String}
outputs:
- {name: Subdir, type: Directory}
implementation:
container:
image: alpine
command:
- sh
- -ex
- -c
- |
mkdir -p "$(dirname "$2")"
cp -r "$0/$1" "$2"
- inputPath: Directory
- inputValue: Subpath
- outputPath: Subdir
| 8,070 |
0 | kubeflow_public_repos/kfp-tekton-backend/components | kubeflow_public_repos/kfp-tekton-backend/components/tfx/README.md | ## Versions of TFX components that can be used with KFP SDK
Disclaimer: The components in this directory are unofficial and are maintained by the KFP team not the TFX team.
If you experience any issues in this components please create a new issue in the Kubeflow Pipelines repo and assign it to Ark-kun.
These components were created to allow the users to use TFX components in their KFP pipelines, to be able to mix KFP and TFX components.
If your pipeline uses only TFX components, please use the official [TFX SDK](https://www.tensorflow.org/tfx/tutorials/tfx/cloud-ai-platform-pipelines).
See the [sample pipeline](_samples/TFX_pipeline.ipynb) which showcases most of the components.
The components come in two flavors - with file-based I/O and URI-based I/O. With file-based I/O the system takes care of storing output data and making it available to downstream components.
With URI-based I/O, only the URIs pointing to the data are passed between components and the pipeline author is responsible for providing unique URIs for all output artifacts of the components in the pipeline.
### Google Cloud Dataflow
The TFX components that use URI-based I/O allow specifying the `beam_pipeline_args` parameter that enables setting the runner to `DataflowRunner` which executes the components on Google Cloud Dataflow.
See the [sample TFX on Dataflow pipeline](_samples/TFX_Dataflow_pipeline.ipynb).
Aspects and limitations
* These components use the official TFX container image.
* These components run the executors and component classes of the official TFX components.
* These components do not execute TFX [drivers](https://www.tensorflow.org/tfx/api_docs/python/tfx/components/base/base_driver), so they do not log metadata themselves (the metadata is logged by the Metadata Writer service instead). The properties of artifacts are currently not logged.
| 8,071 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleValidator/component.py | from kfp.components import InputPath, OutputPath
def ExampleValidator(
statistics_path: InputPath('ExampleStatistics'),
schema_path: InputPath('Schema'),
anomalies_path: OutputPath('ExampleAnomalies'),
):
"""
A TFX component to validate input examples.
The ExampleValidator component uses [Tensorflow Data
Validation](https://www.tensorflow.org/tfx/data_validation) to
validate the statistics of some splits on input examples against a schema.
The ExampleValidator component identifies anomalies in training and serving
data. The component can be configured to detect different classes of anomalies
in the data. It can:
- perform validity checks by comparing data statistics against a schema that
codifies expectations of the user.
- detect data drift by looking at a series of data.
- detect changes in dataset-wide data (i.e., num_examples) across spans or
versions.
Schema Based Example Validation
The ExampleValidator component identifies any anomalies in the example data by
comparing data statistics computed by the StatisticsGen component against a
schema. The schema codifies properties which the input data is expected to
satisfy, and is provided and maintained by the user.
Please see https://www.tensorflow.org/tfx/data_validation for more details.
Args:
statistics: A Channel of 'ExampleStatistics` type. This should contain at
least 'eval' split. Other splits are ignored currently.
schema: A Channel of "Schema' type. _required_
Returns:
anomalies: Output channel of 'ExampleAnomalies' type.
Either `stats` or `statistics` must be present in the arguments.
"""
from tfx.components.example_validator.component import ExampleValidator as component_class
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments[name + '_path']
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
if __name__ == '__main__':
import kfp
kfp.components.func_to_container_op(
ExampleValidator,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
| 8,072 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleValidator/component.yaml | name: ExampleValidator
description: |-
A TFX component to validate input examples.
The ExampleValidator component uses [Tensorflow Data
Validation](https://www.tensorflow.org/tfx/data_validation) to
validate the statistics of some splits on input examples against a schema.
The ExampleValidator component identifies anomalies in training and serving
data. The component can be configured to detect different classes of anomalies
in the data. It can:
- perform validity checks by comparing data statistics against a schema that
codifies expectations of the user.
- detect data drift by looking at a series of data.
- detect changes in dataset-wide data (i.e., num_examples) across spans or
versions.
Schema Based Example Validation
The ExampleValidator component identifies any anomalies in the example data by
comparing data statistics computed by the StatisticsGen component against a
schema. The schema codifies properties which the input data is expected to
satisfy, and is provided and maintained by the user.
Please see https://www.tensorflow.org/tfx/data_validation for more details.
Args:
statistics: A Channel of 'ExampleStatistics` type. This should contain at
least 'eval' split. Other splits are ignored currently.
schema: A Channel of "Schema' type. _required_
Returns:
anomalies: Output channel of 'ExampleAnomalies' type.
Either `stats` or `statistics` must be present in the arguments.
inputs:
- {name: statistics, type: ExampleStatistics}
- {name: schema, type: Schema}
outputs:
- {name: anomalies, type: ExampleAnomalies}
implementation:
container:
image: tensorflow/tfx:0.21.4
command:
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def ExampleValidator(
statistics_path ,
schema_path ,
anomalies_path ,
):
"""
A TFX component to validate input examples.
The ExampleValidator component uses [Tensorflow Data
Validation](https://www.tensorflow.org/tfx/data_validation) to
validate the statistics of some splits on input examples against a schema.
The ExampleValidator component identifies anomalies in training and serving
data. The component can be configured to detect different classes of anomalies
in the data. It can:
- perform validity checks by comparing data statistics against a schema that
codifies expectations of the user.
- detect data drift by looking at a series of data.
- detect changes in dataset-wide data (i.e., num_examples) across spans or
versions.
Schema Based Example Validation
The ExampleValidator component identifies any anomalies in the example data by
comparing data statistics computed by the StatisticsGen component against a
schema. The schema codifies properties which the input data is expected to
satisfy, and is provided and maintained by the user.
Please see https://www.tensorflow.org/tfx/data_validation for more details.
Args:
statistics: A Channel of 'ExampleStatistics` type. This should contain at
least 'eval' split. Other splits are ignored currently.
schema: A Channel of "Schema' type. _required_
Returns:
anomalies: Output channel of 'ExampleAnomalies' type.
Either `stats` or `statistics` must be present in the arguments.
"""
from tfx.components.example_validator.component import ExampleValidator as component_class
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments[name + '_path']
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
import argparse
_parser = argparse.ArgumentParser(prog='ExampleValidator', description='A TFX component to validate input examples.\n\n The ExampleValidator component uses [Tensorflow Data\n Validation](https://www.tensorflow.org/tfx/data_validation) to\n validate the statistics of some splits on input examples against a schema.\n\n The ExampleValidator component identifies anomalies in training and serving\n data. The component can be configured to detect different classes of anomalies\n in the data. It can:\n - perform validity checks by comparing data statistics against a schema that\n codifies expectations of the user.\n - detect data drift by looking at a series of data.\n - detect changes in dataset-wide data (i.e., num_examples) across spans or\n versions.\n\n Schema Based Example Validation\n The ExampleValidator component identifies any anomalies in the example data by\n comparing data statistics computed by the StatisticsGen component against a\n schema. The schema codifies properties which the input data is expected to\n satisfy, and is provided and maintained by the user.\n\n Please see https://www.tensorflow.org/tfx/data_validation for more details.\n\n Args:\n statistics: A Channel of \'ExampleStatistics` type. This should contain at\n least \'eval\' split. Other splits are ignored currently.\n schema: A Channel of "Schema\' type. _required_\n Returns:\n anomalies: Output channel of \'ExampleAnomalies\' type.\n\n Either `stats` or `statistics` must be present in the arguments.')
_parser.add_argument("--statistics", dest="statistics_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--schema", dest="schema_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--anomalies", dest="anomalies_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = ExampleValidator(**_parsed_args)
_output_serializers = [
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --statistics
- {inputPath: statistics}
- --schema
- {inputPath: schema}
- --anomalies
- {outputPath: anomalies}
| 8,073 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleValidator | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleValidator/with_URI_IO/component.py | # flake8: noqa
from typing import NamedTuple
def ExampleValidator(
statistics_uri: 'ExampleStatisticsUri',
schema_uri: 'SchemaUri',
output_anomalies_uri: 'ExampleAnomaliesUri',
beam_pipeline_args: list = None,
) -> NamedTuple('Outputs', [
('anomalies_uri', 'ExampleAnomaliesUri'),
]):
from tfx.components import ExampleValidator as component_class
#Generated code
import json
import os
import tempfile
import tensorflow
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(component_class_instance.outputs.get_all())
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
# Workaround for a TFX+Beam bug to make DataflowRunner work.
# Remove after the next release that has https://github.com/tensorflow/tfx/commit/ddb01c02426d59e8bd541e3fd3cbaaf68779b2df
import tfx
tfx.version.__version__ += 'dev'
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_anomalies_uri, )
if __name__ == '__main__':
import kfp
kfp.components.create_component_from_func(
ExampleValidator,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
| 8,074 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleValidator | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleValidator/with_URI_IO/component.yaml | name: ExampleValidator
inputs:
- {name: statistics_uri, type: ExampleStatisticsUri}
- {name: schema_uri, type: SchemaUri}
- {name: output_anomalies_uri, type: ExampleAnomaliesUri}
- {name: beam_pipeline_args, type: JsonArray, optional: true}
outputs:
- {name: anomalies_uri, type: ExampleAnomaliesUri}
implementation:
container:
image: tensorflow/tfx:0.21.4
command:
- python3
- -u
- -c
- |
def ExampleValidator(
statistics_uri,
schema_uri,
output_anomalies_uri,
beam_pipeline_args = None,
):
from tfx.components import ExampleValidator as component_class
#Generated code
import json
import os
import tempfile
import tensorflow
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(component_class_instance.outputs.get_all())
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
# Workaround for a TFX+Beam bug to make DataflowRunner work.
# Remove after the next release that has https://github.com/tensorflow/tfx/commit/ddb01c02426d59e8bd541e3fd3cbaaf68779b2df
import tfx
tfx.version.__version__ += 'dev'
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_anomalies_uri, )
import json
import argparse
_parser = argparse.ArgumentParser(prog='ExampleValidator', description='')
_parser.add_argument("--statistics-uri", dest="statistics_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--schema-uri", dest="schema_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-anomalies-uri", dest="output_anomalies_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--beam-pipeline-args", dest="beam_pipeline_args", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=1)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = ExampleValidator(**_parsed_args)
_output_serializers = [
str,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --statistics-uri
- {inputValue: statistics_uri}
- --schema-uri
- {inputValue: schema_uri}
- --output-anomalies-uri
- {inputValue: output_anomalies_uri}
- if:
cond: {isPresent: beam_pipeline_args}
then:
- --beam-pipeline-args
- {inputValue: beam_pipeline_args}
- '----output-paths'
- {outputPath: anomalies_uri}
| 8,075 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen/CsvExampleGen/component.py | # flake8: noqa TODO
from kfp.components import InputPath, OutputPath
def CsvExampleGen(
# Inputs
input_path: InputPath('ExternalArtifact'),
# Outputs
examples_path: OutputPath('Examples'),
# Execution properties
input_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.Input'}} = None, # = '{"splits": []}', # JSON-serialized example_gen_pb2.Input instance, providing input configuration. If unset, the files under input_base will be treated as a single split.
output_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.Output'}} = None, # = '{"splitConfig": {"splits": []}}', # JSON-serialized example_gen_pb2.Output instance, providing output configuration. If unset, default splits will be 'train' and 'eval' with size 2:1.
custom_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.CustomConfig'}} = None,
):
"""Executes the CsvExampleGen component.
Args:
input: A Channel of 'ExternalPath' type, which includes one artifact
whose uri is an external directory with csv files inside (required).
input_config: An example_gen_pb2.Input instance, providing input
configuration. If unset, the files under input will be treated as a
single split.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1.
Returns:
examples: Artifact of type 'Examples' for output train and
eval examples.
"""
from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen as component_class
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments[name + '_path']
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
if __name__ == '__main__':
import kfp
kfp.components.func_to_container_op(
CsvExampleGen,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
| 8,076 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen/CsvExampleGen/component.yaml | name: CsvExampleGen
description: |-
Executes the CsvExampleGen component.
Args:
input: A Channel of 'ExternalPath' type, which includes one artifact
whose uri is an external directory with csv files inside (required).
input_config: An example_gen_pb2.Input instance, providing input
configuration. If unset, the files under input will be treated as a
single split.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1.
Returns:
examples: Artifact of type 'Examples' for output train and
eval examples.
inputs:
- {name: input, type: ExternalArtifact}
- name: input_config
type:
JsonObject: {data_type: 'proto:tfx.components.example_gen.Input'}
optional: true
- name: output_config
type:
JsonObject: {data_type: 'proto:tfx.components.example_gen.Output'}
optional: true
- name: custom_config
type:
JsonObject: {data_type: 'proto:tfx.components.example_gen.CustomConfig'}
optional: true
outputs:
- {name: examples, type: Examples}
implementation:
container:
image: tensorflow/tfx:0.21.4
command:
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def CsvExampleGen(
# Inputs
input_path ,
# Outputs
examples_path ,
# Execution properties
input_config = None, # = '{"splits": []}', # JSON-serialized example_gen_pb2.Input instance, providing input configuration. If unset, the files under input_base will be treated as a single split.
output_config = None, # = '{"splitConfig": {"splits": []}}', # JSON-serialized example_gen_pb2.Output instance, providing output configuration. If unset, default splits will be 'train' and 'eval' with size 2:1.
custom_config = None,
):
"""Executes the CsvExampleGen component.
Args:
input: A Channel of 'ExternalPath' type, which includes one artifact
whose uri is an external directory with csv files inside (required).
input_config: An example_gen_pb2.Input instance, providing input
configuration. If unset, the files under input will be treated as a
single split.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1.
Returns:
examples: Artifact of type 'Examples' for output train and
eval examples.
"""
from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen as component_class
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments[name + '_path']
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
import argparse
_parser = argparse.ArgumentParser(prog='CsvExampleGen', description="Executes the CsvExampleGen component.\n\n Args:\n input: A Channel of 'ExternalPath' type, which includes one artifact\n whose uri is an external directory with csv files inside (required).\n input_config: An example_gen_pb2.Input instance, providing input\n configuration. If unset, the files under input will be treated as a\n single split.\n output_config: An example_gen_pb2.Output instance, providing output\n configuration. If unset, default splits will be 'train' and 'eval' with\n size 2:1.\n Returns:\n examples: Artifact of type 'Examples' for output train and\n eval examples.")
_parser.add_argument("--input", dest="input_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--input-config", dest="input_config", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--output-config", dest="output_config", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--custom-config", dest="custom_config", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--examples", dest="examples_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = CsvExampleGen(**_parsed_args)
_output_serializers = [
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --input
- {inputPath: input}
- if:
cond: {isPresent: input_config}
then:
- --input-config
- {inputValue: input_config}
- if:
cond: {isPresent: output_config}
then:
- --output-config
- {inputValue: output_config}
- if:
cond: {isPresent: custom_config}
then:
- --custom-config
- {inputValue: custom_config}
- --examples
- {outputPath: examples}
| 8,077 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen/CsvExampleGen | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen/CsvExampleGen/with_URI_IO/component.py | # flake8: noqa
from typing import NamedTuple
def CsvExampleGen(
input_uri: 'ExternalArtifactUri',
output_examples_uri: 'ExamplesUri',
input_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.Input'}},
output_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.Output'}},
custom_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.CustomConfig'}} = None,
beam_pipeline_args: list = None,
) -> NamedTuple('Outputs', [
('examples_uri', 'ExamplesUri'),
]):
from tfx.components import CsvExampleGen as component_class
#Generated code
import json
import os
import tempfile
import tensorflow
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(component_class_instance.outputs.get_all())
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
# Workaround for a TFX+Beam bug to make DataflowRunner work.
# Remove after the next release that has https://github.com/tensorflow/tfx/commit/ddb01c02426d59e8bd541e3fd3cbaaf68779b2df
import tfx
tfx.version.__version__ += 'dev'
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_examples_uri, )
if __name__ == '__main__':
import kfp
kfp.components.create_component_from_func(
CsvExampleGen,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
| 8,078 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen/CsvExampleGen | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen/CsvExampleGen/with_URI_IO/component.yaml | name: CsvExampleGen
inputs:
- {name: input_uri, type: ExternalArtifactUri}
- {name: output_examples_uri, type: ExamplesUri}
- name: input_config
type:
JsonObject: {data_type: 'proto:tfx.components.example_gen.Input'}
- name: output_config
type:
JsonObject: {data_type: 'proto:tfx.components.example_gen.Output'}
- name: custom_config
type:
JsonObject: {data_type: 'proto:tfx.components.example_gen.CustomConfig'}
optional: true
- {name: beam_pipeline_args, type: JsonArray, optional: true}
outputs:
- {name: examples_uri, type: ExamplesUri}
implementation:
container:
image: tensorflow/tfx:0.21.4
command:
- python3
- -u
- -c
- |
def CsvExampleGen(
input_uri,
output_examples_uri,
input_config,
output_config,
custom_config = None,
beam_pipeline_args = None,
):
from tfx.components import CsvExampleGen as component_class
#Generated code
import json
import os
import tempfile
import tensorflow
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(component_class_instance.outputs.get_all())
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
# Workaround for a TFX+Beam bug to make DataflowRunner work.
# Remove after the next release that has https://github.com/tensorflow/tfx/commit/ddb01c02426d59e8bd541e3fd3cbaaf68779b2df
import tfx
tfx.version.__version__ += 'dev'
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_examples_uri, )
import json
import argparse
_parser = argparse.ArgumentParser(prog='CsvExampleGen', description='')
_parser.add_argument("--input-uri", dest="input_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-examples-uri", dest="output_examples_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--input-config", dest="input_config", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-config", dest="output_config", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--custom-config", dest="custom_config", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--beam-pipeline-args", dest="beam_pipeline_args", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=1)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = CsvExampleGen(**_parsed_args)
_output_serializers = [
str,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --input-uri
- {inputValue: input_uri}
- --output-examples-uri
- {inputValue: output_examples_uri}
- --input-config
- {inputValue: input_config}
- --output-config
- {inputValue: output_config}
- if:
cond: {isPresent: custom_config}
then:
- --custom-config
- {inputValue: custom_config}
- if:
cond: {isPresent: beam_pipeline_args}
then:
- --beam-pipeline-args
- {inputValue: beam_pipeline_args}
- '----output-paths'
- {outputPath: examples_uri}
| 8,079 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen/BigQueryExampleGen/component.py | # flake8: noqa TODO
from kfp.components import InputPath, OutputPath
def BigQueryExampleGen(
examples_path: OutputPath('Examples'),
query: str = None,
input_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.Input'}} = None,
output_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.Output'}} = None,
custom_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.CustomConfig'}} = None,
):
"""
Official TFX BigQueryExampleGen component.
The BigQuery examplegen component takes a query, and generates train
and eval examples for downsteam components.
Args:
query: BigQuery sql string, query result will be treated as a single
split, can be overwritten by input_config.
input_config: An example_gen_pb2.Input instance with Split.pattern as
BigQuery sql string. If set, it overwrites the 'query' arg, and allows
different queries per split.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1.
Returns:
examples: Optional channel of 'ExamplesPath' for output train and
eval examples.
Raises:
RuntimeError: Only one of query and input_config should be set.
"""
from tfx.components.example_gen.csv_example_gen.component import BigQueryExampleGen as component_class
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments[name + '_path']
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
if __name__ == '__main__':
import kfp
kfp.components.func_to_container_op(
BigQueryExampleGen,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
| 8,080 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen/BigQueryExampleGen/component.yaml | name: BigQueryExampleGen
description: |-
Official TFX BigQueryExampleGen component.
The BigQuery examplegen component takes a query, and generates train
and eval examples for downsteam components.
Args:
query: BigQuery sql string, query result will be treated as a single
split, can be overwritten by input_config.
input_config: An example_gen_pb2.Input instance with Split.pattern as
BigQuery sql string. If set, it overwrites the 'query' arg, and allows
different queries per split.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1.
Returns:
examples: Optional channel of 'ExamplesPath' for output train and
eval examples.
Raises:
RuntimeError: Only one of query and input_config should be set.
inputs:
- {name: query, type: String, optional: true}
- name: input_config
type:
JsonObject: {data_type: 'proto:tfx.components.example_gen.Input'}
optional: true
- name: output_config
type:
JsonObject: {data_type: 'proto:tfx.components.example_gen.Output'}
optional: true
- name: custom_config
type:
JsonObject: {data_type: 'proto:tfx.components.example_gen.CustomConfig'}
optional: true
outputs:
- {name: examples, type: Examples}
implementation:
container:
image: tensorflow/tfx:0.21.4
command:
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def BigQueryExampleGen(
examples_path ,
query = None,
input_config = None,
output_config = None,
custom_config = None,
):
"""
Official TFX BigQueryExampleGen component.
The BigQuery examplegen component takes a query, and generates train
and eval examples for downsteam components.
Args:
query: BigQuery sql string, query result will be treated as a single
split, can be overwritten by input_config.
input_config: An example_gen_pb2.Input instance with Split.pattern as
BigQuery sql string. If set, it overwrites the 'query' arg, and allows
different queries per split.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1.
Returns:
examples: Optional channel of 'ExamplesPath' for output train and
eval examples.
Raises:
RuntimeError: Only one of query and input_config should be set.
"""
from tfx.components.example_gen.csv_example_gen.component import BigQueryExampleGen as component_class
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments[name + '_path']
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
import argparse
_parser = argparse.ArgumentParser(prog='BigQueryExampleGen', description="Official TFX BigQueryExampleGen component.\n\n The BigQuery examplegen component takes a query, and generates train\n and eval examples for downsteam components.\n\n\n Args:\n query: BigQuery sql string, query result will be treated as a single\n split, can be overwritten by input_config.\n input_config: An example_gen_pb2.Input instance with Split.pattern as\n BigQuery sql string. If set, it overwrites the 'query' arg, and allows\n different queries per split.\n output_config: An example_gen_pb2.Output instance, providing output\n configuration. If unset, default splits will be 'train' and 'eval' with\n size 2:1.\n Returns:\n examples: Optional channel of 'ExamplesPath' for output train and\n eval examples.\n\n Raises:\n RuntimeError: Only one of query and input_config should be set.")
_parser.add_argument("--query", dest="query", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--input-config", dest="input_config", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--output-config", dest="output_config", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--custom-config", dest="custom_config", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--examples", dest="examples_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = BigQueryExampleGen(**_parsed_args)
_output_serializers = [
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- if:
cond: {isPresent: query}
then:
- --query
- {inputValue: query}
- if:
cond: {isPresent: input_config}
then:
- --input-config
- {inputValue: input_config}
- if:
cond: {isPresent: output_config}
then:
- --output-config
- {inputValue: output_config}
- if:
cond: {isPresent: custom_config}
then:
- --custom-config
- {inputValue: custom_config}
- --examples
- {outputPath: examples}
| 8,081 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen/BigQueryExampleGen | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen/BigQueryExampleGen/with_URI_IO/component.py | # flake8: noqa
from typing import NamedTuple
def BigQueryExampleGen(
output_examples_uri: 'ExamplesUri',
input_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.Input'}},
output_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.Output'}},
custom_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.CustomConfig'}} = None,
beam_pipeline_args: list = None,
) -> NamedTuple('Outputs', [
('examples_uri', 'ExamplesUri'),
]):
from tfx.components import BigQueryExampleGen as component_class
#Generated code
import json
import os
import tempfile
import tensorflow
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(component_class_instance.outputs.get_all())
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
# Workaround for a TFX+Beam bug to make DataflowRunner work.
# Remove after the next release that has https://github.com/tensorflow/tfx/commit/ddb01c02426d59e8bd541e3fd3cbaaf68779b2df
import tfx
tfx.version.__version__ += 'dev'
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_examples_uri, )
if __name__ == '__main__':
import kfp
kfp.components.create_component_from_func(
BigQueryExampleGen,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
| 8,082 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen/BigQueryExampleGen | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen/BigQueryExampleGen/with_URI_IO/component.yaml | name: BigQueryExampleGen
inputs:
- {name: output_examples_uri, type: ExamplesUri}
- name: input_config
type:
JsonObject: {data_type: 'proto:tfx.components.example_gen.Input'}
- name: output_config
type:
JsonObject: {data_type: 'proto:tfx.components.example_gen.Output'}
- name: custom_config
type:
JsonObject: {data_type: 'proto:tfx.components.example_gen.CustomConfig'}
optional: true
- {name: beam_pipeline_args, type: JsonArray, optional: true}
outputs:
- {name: examples_uri, type: ExamplesUri}
implementation:
container:
image: tensorflow/tfx:0.21.4
command:
- python3
- -u
- -c
- |
def BigQueryExampleGen(
output_examples_uri,
input_config,
output_config,
custom_config = None,
beam_pipeline_args = None,
):
from tfx.components import BigQueryExampleGen as component_class
#Generated code
import json
import os
import tempfile
import tensorflow
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(component_class_instance.outputs.get_all())
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
# Workaround for a TFX+Beam bug to make DataflowRunner work.
# Remove after the next release that has https://github.com/tensorflow/tfx/commit/ddb01c02426d59e8bd541e3fd3cbaaf68779b2df
import tfx
tfx.version.__version__ += 'dev'
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_examples_uri, )
import json
import argparse
_parser = argparse.ArgumentParser(prog='BigQueryExampleGen', description='')
_parser.add_argument("--output-examples-uri", dest="output_examples_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--input-config", dest="input_config", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-config", dest="output_config", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--custom-config", dest="custom_config", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--beam-pipeline-args", dest="beam_pipeline_args", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=1)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = BigQueryExampleGen(**_parsed_args)
_output_serializers = [
str,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --output-examples-uri
- {inputValue: output_examples_uri}
- --input-config
- {inputValue: input_config}
- --output-config
- {inputValue: output_config}
- if:
cond: {isPresent: custom_config}
then:
- --custom-config
- {inputValue: custom_config}
- if:
cond: {isPresent: beam_pipeline_args}
then:
- --beam-pipeline-args
- {inputValue: beam_pipeline_args}
- '----output-paths'
- {outputPath: examples_uri}
| 8,083 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen/ImportExampleGen/component.py | # flake8: noqa TODO
from kfp.components import InputPath, OutputPath
def ImportExampleGen(
input_base_path: InputPath('ExternalPath'),
#input_path: InputPath('ExternalPath'),
examples_path: OutputPath('Examples'),
input_config: 'JsonObject: example_gen_pb2.Input' = None,
output_config: 'JsonObject: example_gen_pb2.Output' = None,
):
"""
TFX ImportExampleGen component.
The ImportExampleGen component takes TFRecord files with TF Example data
format, and generates train and eval examples for downsteam components.
This component provides consistent and configurable partition, and it also
shuffle the dataset for ML best practice.
Args:
input: A Channel of 'ExternalPath' type, which includes one artifact
whose uri is an external directory with TFRecord files inside
(required).
input_config: An example_gen_pb2.Input instance, providing input
configuration. If unset, the files under input_base will be treated as a
single split.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1.
Returns:
examples: Optional channel of 'ExamplesPath' for output train and
eval examples.
Raises:
RuntimeError: Only one of query and input_config should be set.
"""
from tfx.components.example_gen.import_example_gen.component import ImportExampleGen as component_class
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments[name + '_path']
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
if __name__ == '__main__':
import kfp
kfp.components.func_to_container_op(
ImportExampleGen,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
| 8,084 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen/ImportExampleGen/component.yaml | name: ImportExampleGen
description: |-
TFX ImportExampleGen component.
The ImportExampleGen component takes TFRecord files with TF Example data
format, and generates train and eval examples for downsteam components.
This component provides consistent and configurable partition, and it also
shuffle the dataset for ML best practice.
Args:
input: A Channel of 'ExternalPath' type, which includes one artifact
whose uri is an external directory with TFRecord files inside
(required).
input_config: An example_gen_pb2.Input instance, providing input
configuration. If unset, the files under input_base will be treated as a
single split.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1.
Returns:
examples: Optional channel of 'ExamplesPath' for output train and
eval examples.
Raises:
RuntimeError: Only one of query and input_config should be set.
inputs:
- {name: input_base, type: ExternalPath}
- {name: input_config, type: 'JsonObject: example_gen_pb2.Input', optional: true}
- {name: output_config, type: 'JsonObject: example_gen_pb2.Output', optional: true}
outputs:
- {name: examples, type: Examples}
implementation:
container:
image: tensorflow/tfx:0.21.4
command:
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def ImportExampleGen(
input_base_path ,
#input_path: InputPath('ExternalPath'),
examples_path ,
input_config = None,
output_config = None,
):
"""
TFX ImportExampleGen component.
The ImportExampleGen component takes TFRecord files with TF Example data
format, and generates train and eval examples for downsteam components.
This component provides consistent and configurable partition, and it also
shuffle the dataset for ML best practice.
Args:
input: A Channel of 'ExternalPath' type, which includes one artifact
whose uri is an external directory with TFRecord files inside
(required).
input_config: An example_gen_pb2.Input instance, providing input
configuration. If unset, the files under input_base will be treated as a
single split.
output_config: An example_gen_pb2.Output instance, providing output
configuration. If unset, default splits will be 'train' and 'eval' with
size 2:1.
Returns:
examples: Optional channel of 'ExamplesPath' for output train and
eval examples.
Raises:
RuntimeError: Only one of query and input_config should be set.
"""
from tfx.components.example_gen.import_example_gen.component import ImportExampleGen as component_class
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments[name + '_path']
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
import argparse
_parser = argparse.ArgumentParser(prog='ImportExampleGen', description="TFX ImportExampleGen component.\n\n The ImportExampleGen component takes TFRecord files with TF Example data\n format, and generates train and eval examples for downsteam components.\n This component provides consistent and configurable partition, and it also\n shuffle the dataset for ML best practice.\n\n Args:\n input: A Channel of 'ExternalPath' type, which includes one artifact\n whose uri is an external directory with TFRecord files inside\n (required).\n input_config: An example_gen_pb2.Input instance, providing input\n configuration. If unset, the files under input_base will be treated as a\n single split.\n output_config: An example_gen_pb2.Output instance, providing output\n configuration. If unset, default splits will be 'train' and 'eval' with\n size 2:1.\n Returns:\n examples: Optional channel of 'ExamplesPath' for output train and\n eval examples.\n\n Raises:\n RuntimeError: Only one of query and input_config should be set.")
_parser.add_argument("--input-base", dest="input_base_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--input-config", dest="input_config", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--output-config", dest="output_config", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--examples", dest="examples_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = ImportExampleGen(**_parsed_args)
_output_serializers = [
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --input-base
- {inputPath: input_base}
- if:
cond: {isPresent: input_config}
then:
- --input-config
- {inputValue: input_config}
- if:
cond: {isPresent: output_config}
then:
- --output-config
- {inputValue: output_config}
- --examples
- {outputPath: examples}
| 8,085 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen/ImportExampleGen | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen/ImportExampleGen/with_URI_IO/component.py | # flake8: noqa
from typing import NamedTuple
def ImportExampleGen(
input_uri: 'ExternalArtifactUri',
output_examples_uri: 'ExamplesUri',
input_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.Input'}},
output_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.Output'}},
custom_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.CustomConfig'}} = None,
beam_pipeline_args: list = None,
) -> NamedTuple('Outputs', [
('examples_uri', 'ExamplesUri'),
]):
from tfx.components import ImportExampleGen as component_class
#Generated code
import json
import os
import tempfile
import tensorflow
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(component_class_instance.outputs.get_all())
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
# Workaround for a TFX+Beam bug to make DataflowRunner work.
# Remove after the next release that has https://github.com/tensorflow/tfx/commit/ddb01c02426d59e8bd541e3fd3cbaaf68779b2df
import tfx
tfx.version.__version__ += 'dev'
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_examples_uri, )
if __name__ == '__main__':
import kfp
kfp.components.create_component_from_func(
ImportExampleGen,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
| 8,086 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen/ImportExampleGen | kubeflow_public_repos/kfp-tekton-backend/components/tfx/ExampleGen/ImportExampleGen/with_URI_IO/component.yaml | name: ImportExampleGen
inputs:
- {name: input_uri, type: ExternalArtifactUri}
- {name: output_examples_uri, type: ExamplesUri}
- name: input_config
type:
JsonObject: {data_type: 'proto:tfx.components.example_gen.Input'}
- name: output_config
type:
JsonObject: {data_type: 'proto:tfx.components.example_gen.Output'}
- name: custom_config
type:
JsonObject: {data_type: 'proto:tfx.components.example_gen.CustomConfig'}
optional: true
- {name: beam_pipeline_args, type: JsonArray, optional: true}
outputs:
- {name: examples_uri, type: ExamplesUri}
implementation:
container:
image: tensorflow/tfx:0.21.4
command:
- python3
- -u
- -c
- |
def ImportExampleGen(
input_uri,
output_examples_uri,
input_config,
output_config,
custom_config = None,
beam_pipeline_args = None,
):
from tfx.components import ImportExampleGen as component_class
#Generated code
import json
import os
import tempfile
import tensorflow
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(component_class_instance.outputs.get_all())
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
# Workaround for a TFX+Beam bug to make DataflowRunner work.
# Remove after the next release that has https://github.com/tensorflow/tfx/commit/ddb01c02426d59e8bd541e3fd3cbaaf68779b2df
import tfx
tfx.version.__version__ += 'dev'
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_examples_uri, )
import json
import argparse
_parser = argparse.ArgumentParser(prog='ImportExampleGen', description='')
_parser.add_argument("--input-uri", dest="input_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-examples-uri", dest="output_examples_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--input-config", dest="input_config", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-config", dest="output_config", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--custom-config", dest="custom_config", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--beam-pipeline-args", dest="beam_pipeline_args", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=1)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = ImportExampleGen(**_parsed_args)
_output_serializers = [
str,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --input-uri
- {inputValue: input_uri}
- --output-examples-uri
- {inputValue: output_examples_uri}
- --input-config
- {inputValue: input_config}
- --output-config
- {inputValue: output_config}
- if:
cond: {isPresent: custom_config}
then:
- --custom-config
- {inputValue: custom_config}
- if:
cond: {isPresent: beam_pipeline_args}
then:
- --beam-pipeline-args
- {inputValue: beam_pipeline_args}
- '----output-paths'
- {outputPath: examples_uri}
| 8,087 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx | kubeflow_public_repos/kfp-tekton-backend/components/tfx/Trainer/component.py | # flake8: noqa TODO
from kfp.components import InputPath, OutputPath
def Trainer(
examples_path: InputPath('Examples'),
schema_path: InputPath('Schema'),
model_path: OutputPath('Model'),
train_args: {'JsonObject': {'data_type': 'proto:tfx.components.trainer.TrainArgs'}},
eval_args: {'JsonObject': {'data_type': 'proto:tfx.components.trainer.EvalArgs'}},
module_file: str = None,
trainer_fn: str = None,
custom_config: dict = None,
transform_graph_path: InputPath('TransformGraph') = None,
base_model_path: InputPath('Model') = None,
hyperparameters_path: InputPath('HyperParameters') = None,
):
"""
A TFX component to train a TensorFlow model.
The Trainer component is used to train and eval a model using given inputs and
a user-supplied estimator. This component includes a custom driver to
optionally grab previous model to warm start from.
## Providing an estimator
The TFX executor will use the estimator provided in the `module_file` file
to train the model. The Trainer executor will look specifically for the
`trainer_fn()` function within that file. Before training, the executor will
call that function expecting the following returned as a dictionary:
- estimator: The
[estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator)
to be used by TensorFlow to train the model.
- train_spec: The
[configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/TrainSpec)
to be used by the "train" part of the TensorFlow `train_and_evaluate()`
call.
- eval_spec: The
[configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/EvalSpec)
to be used by the "eval" part of the TensorFlow `train_and_evaluate()` call.
- eval_input_receiver_fn: The
[configuration](https://www.tensorflow.org/tfx/model_analysis/get_started#modify_an_existing_model)
to be used
by the [ModelValidator](https://www.tensorflow.org/tfx/guide/modelval)
component when validating the model.
An example of `trainer_fn()` can be found in the [user-supplied
code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
of the TFX Chicago Taxi pipeline example.
Args:
examples: A Channel of 'Examples' type, serving as the source of
examples that are used in training (required). May be raw or
transformed.
transform_graph: An optional Channel of 'TransformGraph' type, serving as
the input transform graph if present.
schema: A Channel of 'SchemaPath' type, serving as the schema of training
and eval data.
module_file: A path to python module file containing UDF model definition.
The module_file must implement a function named `trainer_fn` at its
top level. The function must have the following signature.
def trainer_fn(tf.contrib.training.HParams,
tensorflow_metadata.proto.v0.schema_pb2) -> Dict:
...
where the returned Dict has the following key-values.
'estimator': an instance of tf.estimator.Estimator
'train_spec': an instance of tf.estimator.TrainSpec
'eval_spec': an instance of tf.estimator.EvalSpec
'eval_input_receiver_fn': an instance of tfma.export.EvalInputReceiver
Exactly one of 'module_file' or 'trainer_fn' must be supplied.
trainer_fn: A python path to UDF model definition function. See
'module_file' for the required signature of the UDF.
Exactly one of 'module_file' or 'trainer_fn' must be supplied.
train_args: A trainer_pb2.TrainArgs instance, containing args used for
training. Current only num_steps is available.
eval_args: A trainer_pb2.EvalArgs instance, containing args used for eval.
Current only num_steps is available.
custom_config: A dict which contains the training job parameters to be
passed to Google Cloud ML Engine. For the full set of parameters
supported by Google Cloud ML Engine, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job
Returns:
model: Optional 'Model' channel for result of exported models.
Raises:
ValueError:
- When both or neither of 'module_file' and 'trainer_fn' is supplied.
"""
from tfx.components.trainer.component import Trainer as component_class
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments[name + '_path']
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
if __name__ == '__main__':
import kfp
kfp.components.func_to_container_op(
Trainer,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
| 8,088 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx | kubeflow_public_repos/kfp-tekton-backend/components/tfx/Trainer/component.yaml | name: Trainer
description: |-
A TFX component to train a TensorFlow model.
The Trainer component is used to train and eval a model using given inputs and
a user-supplied estimator. This component includes a custom driver to
optionally grab previous model to warm start from.
## Providing an estimator
The TFX executor will use the estimator provided in the `module_file` file
to train the model. The Trainer executor will look specifically for the
`trainer_fn()` function within that file. Before training, the executor will
call that function expecting the following returned as a dictionary:
- estimator: The
[estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator)
to be used by TensorFlow to train the model.
- train_spec: The
[configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/TrainSpec)
to be used by the "train" part of the TensorFlow `train_and_evaluate()`
call.
- eval_spec: The
[configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/EvalSpec)
to be used by the "eval" part of the TensorFlow `train_and_evaluate()` call.
- eval_input_receiver_fn: The
[configuration](https://www.tensorflow.org/tfx/model_analysis/get_started#modify_an_existing_model)
to be used
by the [ModelValidator](https://www.tensorflow.org/tfx/guide/modelval)
component when validating the model.
An example of `trainer_fn()` can be found in the [user-supplied
code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
of the TFX Chicago Taxi pipeline example.
Args:
examples: A Channel of 'Examples' type, serving as the source of
examples that are used in training (required). May be raw or
transformed.
transform_graph: An optional Channel of 'TransformGraph' type, serving as
the input transform graph if present.
schema: A Channel of 'SchemaPath' type, serving as the schema of training
and eval data.
module_file: A path to python module file containing UDF model definition.
The module_file must implement a function named `trainer_fn` at its
top level. The function must have the following signature.
def trainer_fn(tf.contrib.training.HParams,
tensorflow_metadata.proto.v0.schema_pb2) -> Dict:
...
where the returned Dict has the following key-values.
'estimator': an instance of tf.estimator.Estimator
'train_spec': an instance of tf.estimator.TrainSpec
'eval_spec': an instance of tf.estimator.EvalSpec
'eval_input_receiver_fn': an instance of tfma.export.EvalInputReceiver
Exactly one of 'module_file' or 'trainer_fn' must be supplied.
trainer_fn: A python path to UDF model definition function. See
'module_file' for the required signature of the UDF.
Exactly one of 'module_file' or 'trainer_fn' must be supplied.
train_args: A trainer_pb2.TrainArgs instance, containing args used for
training. Current only num_steps is available.
eval_args: A trainer_pb2.EvalArgs instance, containing args used for eval.
Current only num_steps is available.
custom_config: A dict which contains the training job parameters to be
passed to Google Cloud ML Engine. For the full set of parameters
supported by Google Cloud ML Engine, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job
Returns:
model: Optional 'Model' channel for result of exported models.
Raises:
ValueError:
- When both or neither of 'module_file' and 'trainer_fn' is supplied.
inputs:
- {name: examples, type: Examples}
- {name: schema, type: Schema}
- name: train_args
type:
JsonObject: {data_type: 'proto:tfx.components.trainer.TrainArgs'}
- name: eval_args
type:
JsonObject: {data_type: 'proto:tfx.components.trainer.EvalArgs'}
- {name: module_file, type: String, optional: true}
- {name: trainer_fn, type: String, optional: true}
- {name: custom_config, type: JsonObject, optional: true}
- {name: transform_graph, type: TransformGraph, optional: true}
- {name: base_model, type: Model, optional: true}
- {name: hyperparameters, type: HyperParameters, optional: true}
outputs:
- {name: model, type: Model}
implementation:
container:
image: tensorflow/tfx:0.21.4
command:
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def Trainer(
examples_path ,
schema_path ,
model_path ,
train_args ,
eval_args ,
module_file = None,
trainer_fn = None,
custom_config = None,
transform_graph_path = None,
base_model_path = None,
hyperparameters_path = None,
):
"""
A TFX component to train a TensorFlow model.
The Trainer component is used to train and eval a model using given inputs and
a user-supplied estimator. This component includes a custom driver to
optionally grab previous model to warm start from.
## Providing an estimator
The TFX executor will use the estimator provided in the `module_file` file
to train the model. The Trainer executor will look specifically for the
`trainer_fn()` function within that file. Before training, the executor will
call that function expecting the following returned as a dictionary:
- estimator: The
[estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator)
to be used by TensorFlow to train the model.
- train_spec: The
[configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/TrainSpec)
to be used by the "train" part of the TensorFlow `train_and_evaluate()`
call.
- eval_spec: The
[configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/EvalSpec)
to be used by the "eval" part of the TensorFlow `train_and_evaluate()` call.
- eval_input_receiver_fn: The
[configuration](https://www.tensorflow.org/tfx/model_analysis/get_started#modify_an_existing_model)
to be used
by the [ModelValidator](https://www.tensorflow.org/tfx/guide/modelval)
component when validating the model.
An example of `trainer_fn()` can be found in the [user-supplied
code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
of the TFX Chicago Taxi pipeline example.
Args:
examples: A Channel of 'Examples' type, serving as the source of
examples that are used in training (required). May be raw or
transformed.
transform_graph: An optional Channel of 'TransformGraph' type, serving as
the input transform graph if present.
schema: A Channel of 'SchemaPath' type, serving as the schema of training
and eval data.
module_file: A path to python module file containing UDF model definition.
The module_file must implement a function named `trainer_fn` at its
top level. The function must have the following signature.
def trainer_fn(tf.contrib.training.HParams,
tensorflow_metadata.proto.v0.schema_pb2) -> Dict:
...
where the returned Dict has the following key-values.
'estimator': an instance of tf.estimator.Estimator
'train_spec': an instance of tf.estimator.TrainSpec
'eval_spec': an instance of tf.estimator.EvalSpec
'eval_input_receiver_fn': an instance of tfma.export.EvalInputReceiver
Exactly one of 'module_file' or 'trainer_fn' must be supplied.
trainer_fn: A python path to UDF model definition function. See
'module_file' for the required signature of the UDF.
Exactly one of 'module_file' or 'trainer_fn' must be supplied.
train_args: A trainer_pb2.TrainArgs instance, containing args used for
training. Current only num_steps is available.
eval_args: A trainer_pb2.EvalArgs instance, containing args used for eval.
Current only num_steps is available.
custom_config: A dict which contains the training job parameters to be
passed to Google Cloud ML Engine. For the full set of parameters
supported by Google Cloud ML Engine, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job
Returns:
model: Optional 'Model' channel for result of exported models.
Raises:
ValueError:
- When both or neither of 'module_file' and 'trainer_fn' is supplied.
"""
from tfx.components.trainer.component import Trainer as component_class
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments[name + '_path']
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
import json
import argparse
_parser = argparse.ArgumentParser(prog='Trainer', description='A TFX component to train a TensorFlow model.\n\n The Trainer component is used to train and eval a model using given inputs and\n a user-supplied estimator. This component includes a custom driver to\n optionally grab previous model to warm start from.\n\n ## Providing an estimator\n The TFX executor will use the estimator provided in the `module_file` file\n to train the model. The Trainer executor will look specifically for the\n `trainer_fn()` function within that file. Before training, the executor will\n call that function expecting the following returned as a dictionary:\n\n - estimator: The\n [estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator)\n to be used by TensorFlow to train the model.\n - train_spec: The\n [configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/TrainSpec)\n to be used by the "train" part of the TensorFlow `train_and_evaluate()`\n call.\n - eval_spec: The\n [configuration](https://www.tensorflow.org/api_docs/python/tf/estimator/EvalSpec)\n to be used by the "eval" part of the TensorFlow `train_and_evaluate()` call.\n - eval_input_receiver_fn: The\n [configuration](https://www.tensorflow.org/tfx/model_analysis/get_started#modify_an_existing_model)\n to be used\n by the [ModelValidator](https://www.tensorflow.org/tfx/guide/modelval)\n component when validating the model.\n\n An example of `trainer_fn()` can be found in the [user-supplied\n code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))\n of the TFX Chicago Taxi pipeline example.\n\n\n Args:\n examples: A Channel of \'Examples\' type, serving as the source of\n examples that are used in training (required). May be raw or\n transformed.\n transform_graph: An optional Channel of \'TransformGraph\' type, serving as\n the input transform graph if present.\n schema: A Channel of \'SchemaPath\' type, serving as the schema of training\n and eval data.\n module_file: A path to python module file containing UDF model definition.\n The module_file must implement a function named `trainer_fn` at its\n top level. The function must have the following signature.\n\n def trainer_fn(tf.contrib.training.HParams,\n tensorflow_metadata.proto.v0.schema_pb2) -> Dict:\n ...\n\n where the returned Dict has the following key-values.\n \'estimator\': an instance of tf.estimator.Estimator\n \'train_spec\': an instance of tf.estimator.TrainSpec\n \'eval_spec\': an instance of tf.estimator.EvalSpec\n \'eval_input_receiver_fn\': an instance of tfma.export.EvalInputReceiver\n\n Exactly one of \'module_file\' or \'trainer_fn\' must be supplied.\n trainer_fn: A python path to UDF model definition function. See\n \'module_file\' for the required signature of the UDF.\n Exactly one of \'module_file\' or \'trainer_fn\' must be supplied.\n train_args: A trainer_pb2.TrainArgs instance, containing args used for\n training. Current only num_steps is available.\n eval_args: A trainer_pb2.EvalArgs instance, containing args used for eval.\n Current only num_steps is available.\n custom_config: A dict which contains the training job parameters to be\n passed to Google Cloud ML Engine. For the full set of parameters\n supported by Google Cloud ML Engine, refer to\n https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job\n Returns:\n model: Optional \'Model\' channel for result of exported models.\n Raises:\n ValueError:\n - When both or neither of \'module_file\' and \'trainer_fn\' is supplied.')
_parser.add_argument("--examples", dest="examples_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--schema", dest="schema_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--train-args", dest="train_args", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--eval-args", dest="eval_args", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--module-file", dest="module_file", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--trainer-fn", dest="trainer_fn", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--custom-config", dest="custom_config", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--transform-graph", dest="transform_graph_path", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--base-model", dest="base_model_path", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--hyperparameters", dest="hyperparameters_path", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--model", dest="model_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = Trainer(**_parsed_args)
_output_serializers = [
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --examples
- {inputPath: examples}
- --schema
- {inputPath: schema}
- --train-args
- {inputValue: train_args}
- --eval-args
- {inputValue: eval_args}
- if:
cond: {isPresent: module_file}
then:
- --module-file
- {inputValue: module_file}
- if:
cond: {isPresent: trainer_fn}
then:
- --trainer-fn
- {inputValue: trainer_fn}
- if:
cond: {isPresent: custom_config}
then:
- --custom-config
- {inputValue: custom_config}
- if:
cond: {isPresent: transform_graph}
then:
- --transform-graph
- {inputPath: transform_graph}
- if:
cond: {isPresent: base_model}
then:
- --base-model
- {inputPath: base_model}
- if:
cond: {isPresent: hyperparameters}
then:
- --hyperparameters
- {inputPath: hyperparameters}
- --model
- {outputPath: model}
| 8,089 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/Trainer | kubeflow_public_repos/kfp-tekton-backend/components/tfx/Trainer/with_URI_IO/component.py | # flake8: noqa
from typing import NamedTuple
def Trainer(
examples_uri: 'ExamplesUri',
schema_uri: 'SchemaUri',
output_model_uri: 'ModelUri',
train_args: {'JsonObject': {'data_type': 'proto:tfx.components.trainer.TrainArgs'}},
eval_args: {'JsonObject': {'data_type': 'proto:tfx.components.trainer.EvalArgs'}},
transform_graph_uri: 'TransformGraphUri' = None,
base_model_uri: 'ModelUri' = None,
hyperparameters_uri: 'HyperParametersUri' = None,
module_file: str = None,
run_fn: str = None,
trainer_fn: str = None,
custom_config: dict = None,
beam_pipeline_args: list = None,
) -> NamedTuple('Outputs', [
('model_uri', 'ModelUri'),
]):
from tfx.components import Trainer as component_class
#Generated code
import json
import os
import tempfile
import tensorflow
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(component_class_instance.outputs.get_all())
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
# Workaround for a TFX+Beam bug to make DataflowRunner work.
# Remove after the next release that has https://github.com/tensorflow/tfx/commit/ddb01c02426d59e8bd541e3fd3cbaaf68779b2df
import tfx
tfx.version.__version__ += 'dev'
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_model_uri, )
if __name__ == '__main__':
import kfp
kfp.components.create_component_from_func(
Trainer,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
| 8,090 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/Trainer | kubeflow_public_repos/kfp-tekton-backend/components/tfx/Trainer/with_URI_IO/component.yaml | name: Trainer
inputs:
- {name: examples_uri, type: ExamplesUri}
- {name: schema_uri, type: SchemaUri}
- {name: output_model_uri, type: ModelUri}
- name: train_args
type:
JsonObject: {data_type: 'proto:tfx.components.trainer.TrainArgs'}
- name: eval_args
type:
JsonObject: {data_type: 'proto:tfx.components.trainer.EvalArgs'}
- {name: transform_graph_uri, type: TransformGraphUri, optional: true}
- {name: base_model_uri, type: ModelUri, optional: true}
- {name: hyperparameters_uri, type: HyperParametersUri, optional: true}
- {name: module_file, type: String, optional: true}
- {name: run_fn, type: String, optional: true}
- {name: trainer_fn, type: String, optional: true}
- {name: custom_config, type: JsonObject, optional: true}
- {name: beam_pipeline_args, type: JsonArray, optional: true}
outputs:
- {name: model_uri, type: ModelUri}
implementation:
container:
image: tensorflow/tfx:0.21.4
command:
- python3
- -u
- -c
- |
def Trainer(
examples_uri,
schema_uri,
output_model_uri,
train_args,
eval_args,
transform_graph_uri = None,
base_model_uri = None,
hyperparameters_uri = None,
module_file = None,
run_fn = None,
trainer_fn = None,
custom_config = None,
beam_pipeline_args = None,
):
from tfx.components import Trainer as component_class
#Generated code
import json
import os
import tempfile
import tensorflow
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(component_class_instance.outputs.get_all())
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
# Workaround for a TFX+Beam bug to make DataflowRunner work.
# Remove after the next release that has https://github.com/tensorflow/tfx/commit/ddb01c02426d59e8bd541e3fd3cbaaf68779b2df
import tfx
tfx.version.__version__ += 'dev'
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_model_uri, )
import json
import argparse
_parser = argparse.ArgumentParser(prog='Trainer', description='')
_parser.add_argument("--examples-uri", dest="examples_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--schema-uri", dest="schema_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-model-uri", dest="output_model_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--train-args", dest="train_args", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--eval-args", dest="eval_args", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--transform-graph-uri", dest="transform_graph_uri", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--base-model-uri", dest="base_model_uri", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--hyperparameters-uri", dest="hyperparameters_uri", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--module-file", dest="module_file", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--run-fn", dest="run_fn", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--trainer-fn", dest="trainer_fn", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--custom-config", dest="custom_config", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--beam-pipeline-args", dest="beam_pipeline_args", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=1)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = Trainer(**_parsed_args)
_output_serializers = [
str,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --examples-uri
- {inputValue: examples_uri}
- --schema-uri
- {inputValue: schema_uri}
- --output-model-uri
- {inputValue: output_model_uri}
- --train-args
- {inputValue: train_args}
- --eval-args
- {inputValue: eval_args}
- if:
cond: {isPresent: transform_graph_uri}
then:
- --transform-graph-uri
- {inputValue: transform_graph_uri}
- if:
cond: {isPresent: base_model_uri}
then:
- --base-model-uri
- {inputValue: base_model_uri}
- if:
cond: {isPresent: hyperparameters_uri}
then:
- --hyperparameters-uri
- {inputValue: hyperparameters_uri}
- if:
cond: {isPresent: module_file}
then:
- --module-file
- {inputValue: module_file}
- if:
cond: {isPresent: run_fn}
then:
- --run-fn
- {inputValue: run_fn}
- if:
cond: {isPresent: trainer_fn}
then:
- --trainer-fn
- {inputValue: trainer_fn}
- if:
cond: {isPresent: custom_config}
then:
- --custom-config
- {inputValue: custom_config}
- if:
cond: {isPresent: beam_pipeline_args}
then:
- --beam-pipeline-args
- {inputValue: beam_pipeline_args}
- '----output-paths'
- {outputPath: model_uri}
| 8,091 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx | kubeflow_public_repos/kfp-tekton-backend/components/tfx/SchemaGen/component.py | from kfp.components import InputPath, OutputPath
def SchemaGen(
statistics_path: InputPath('ExampleStatistics'),
schema_path: OutputPath('Schema'),
infer_feature_shape: bool = None, # ? False
):
"""Constructs a SchemaGen component.
Args:
statistics: A Channel of `ExampleStatistics` type (required if spec is not
passed). This should contain at least a `train` split. Other splits are
currently ignored. _required_
infer_feature_shape: Boolean value indicating
whether or not to infer the shape of features. If the feature shape is
not inferred, downstream Tensorflow Transform component using the schema
will parse input as tf.SparseTensor.
Returns:
output: Output `Schema` channel for schema result.
"""
from tfx.components.schema_gen.component import SchemaGen as component_class
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments[name + '_path']
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
#return (output_path,)
if __name__ == '__main__':
import kfp
kfp.components.func_to_container_op(
SchemaGen,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
| 8,092 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx | kubeflow_public_repos/kfp-tekton-backend/components/tfx/SchemaGen/component.yaml | name: SchemaGen
description: |-
Constructs a SchemaGen component.
Args:
statistics: A Channel of `ExampleStatistics` type (required if spec is not
passed). This should contain at least a `train` split. Other splits are
currently ignored. _required_
infer_feature_shape: Boolean value indicating
whether or not to infer the shape of features. If the feature shape is
not inferred, downstream Tensorflow Transform component using the schema
will parse input as tf.SparseTensor.
Returns:
output: Output `Schema` channel for schema result.
inputs:
- {name: statistics, type: ExampleStatistics}
- {name: infer_feature_shape, type: Boolean, optional: true}
outputs:
- {name: schema, type: Schema}
implementation:
container:
image: tensorflow/tfx:0.21.4
command:
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def SchemaGen(
statistics_path ,
schema_path ,
infer_feature_shape = None, # ? False
):
"""Constructs a SchemaGen component.
Args:
statistics: A Channel of `ExampleStatistics` type (required if spec is not
passed). This should contain at least a `train` split. Other splits are
currently ignored. _required_
infer_feature_shape: Boolean value indicating
whether or not to infer the shape of features. If the feature shape is
not inferred, downstream Tensorflow Transform component using the schema
will parse input as tf.SparseTensor.
Returns:
output: Output `Schema` channel for schema result.
"""
from tfx.components.schema_gen.component import SchemaGen as component_class
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments[name + '_path']
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
def _deserialize_bool(s) -> bool:
from distutils.util import strtobool
return strtobool(s) == 1
import argparse
_parser = argparse.ArgumentParser(prog='SchemaGen', description='Constructs a SchemaGen component.\n\n Args:\n statistics: A Channel of `ExampleStatistics` type (required if spec is not\n passed). This should contain at least a `train` split. Other splits are\n currently ignored. _required_\n infer_feature_shape: Boolean value indicating\n whether or not to infer the shape of features. If the feature shape is\n not inferred, downstream Tensorflow Transform component using the schema\n will parse input as tf.SparseTensor.\n Returns:\n output: Output `Schema` channel for schema result.')
_parser.add_argument("--statistics", dest="statistics_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--infer-feature-shape", dest="infer_feature_shape", type=_deserialize_bool, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--schema", dest="schema_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = SchemaGen(**_parsed_args)
_output_serializers = [
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --statistics
- {inputPath: statistics}
- if:
cond: {isPresent: infer_feature_shape}
then:
- --infer-feature-shape
- {inputValue: infer_feature_shape}
- --schema
- {outputPath: schema}
| 8,093 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/SchemaGen | kubeflow_public_repos/kfp-tekton-backend/components/tfx/SchemaGen/with_URI_IO/component.py | # flake8: noqa
from typing import NamedTuple
def SchemaGen(
statistics_uri: 'ExampleStatisticsUri',
output_schema_uri: 'SchemaUri',
infer_feature_shape: bool = None,
beam_pipeline_args: list = None,
) -> NamedTuple('Outputs', [
('schema_uri', 'SchemaUri'),
]):
from tfx.components import SchemaGen as component_class
#Generated code
import json
import os
import tempfile
import tensorflow
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(component_class_instance.outputs.get_all())
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
# Workaround for a TFX+Beam bug to make DataflowRunner work.
# Remove after the next release that has https://github.com/tensorflow/tfx/commit/ddb01c02426d59e8bd541e3fd3cbaaf68779b2df
import tfx
tfx.version.__version__ += 'dev'
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_schema_uri, )
if __name__ == '__main__':
import kfp
kfp.components.create_component_from_func(
SchemaGen,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
| 8,094 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/SchemaGen | kubeflow_public_repos/kfp-tekton-backend/components/tfx/SchemaGen/with_URI_IO/component.yaml | name: SchemaGen
inputs:
- {name: statistics_uri, type: ExampleStatisticsUri}
- {name: output_schema_uri, type: SchemaUri}
- {name: infer_feature_shape, type: Boolean, optional: true}
- {name: beam_pipeline_args, type: JsonArray, optional: true}
outputs:
- {name: schema_uri, type: SchemaUri}
implementation:
container:
image: tensorflow/tfx:0.21.4
command:
- python3
- -u
- -c
- |
def SchemaGen(
statistics_uri,
output_schema_uri,
infer_feature_shape = None,
beam_pipeline_args = None,
):
from tfx.components import SchemaGen as component_class
#Generated code
import json
import os
import tempfile
import tensorflow
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(component_class_instance.outputs.get_all())
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
# Workaround for a TFX+Beam bug to make DataflowRunner work.
# Remove after the next release that has https://github.com/tensorflow/tfx/commit/ddb01c02426d59e8bd541e3fd3cbaaf68779b2df
import tfx
tfx.version.__version__ += 'dev'
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_schema_uri, )
def _deserialize_bool(s) -> bool:
from distutils.util import strtobool
return strtobool(s) == 1
import json
import argparse
_parser = argparse.ArgumentParser(prog='SchemaGen', description='')
_parser.add_argument("--statistics-uri", dest="statistics_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-schema-uri", dest="output_schema_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--infer-feature-shape", dest="infer_feature_shape", type=_deserialize_bool, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--beam-pipeline-args", dest="beam_pipeline_args", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=1)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = SchemaGen(**_parsed_args)
_output_serializers = [
str,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --statistics-uri
- {inputValue: statistics_uri}
- --output-schema-uri
- {inputValue: output_schema_uri}
- if:
cond: {isPresent: infer_feature_shape}
then:
- --infer-feature-shape
- {inputValue: infer_feature_shape}
- if:
cond: {isPresent: beam_pipeline_args}
then:
- --beam-pipeline-args
- {inputValue: beam_pipeline_args}
- '----output-paths'
- {outputPath: schema_uri}
| 8,095 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx | kubeflow_public_repos/kfp-tekton-backend/components/tfx/StatisticsGen/component.py | from kfp.components import InputPath, OutputPath
def StatisticsGen(
examples_path: InputPath('Examples'),
#examples_path: 'ExamplesUri',
statistics_path: OutputPath('ExampleStatistics'),
#statistics_path: 'ExampleStatisticsUri',
stats_options: {'JsonObject': {'data_type': 'proto:tensorflow_data_validation.StatsOptions'}} = None,
schema_path: InputPath('Schema') = None,
#schema_path: 'SchemaUri' = None,
):
#) -> NamedTuple('Outputs', [
# ('statistics', 'ExampleStatisticsUri'),
#]):
"""Construct a StatisticsGen component.
Args:
examples: A Channel of `ExamplesPath` type, likely generated by the
[ExampleGen component](https://www.tensorflow.org/tfx/guide/examplegen).
This needs to contain two splits labeled `train` and `eval`. _required_
schema: A `Schema` channel to use for automatically configuring the value
of stats options passed to TFDV.
Returns:
statistics: `ExampleStatistics` channel for statistics of each split
provided in the input examples.
"""
from tfx.components.statistics_gen.component import StatisticsGen as component_class
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments[name + '_path']
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
#return (statistics_path,)
if __name__ == '__main__':
import kfp
kfp.components.func_to_container_op(
StatisticsGen,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
| 8,096 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx | kubeflow_public_repos/kfp-tekton-backend/components/tfx/StatisticsGen/component.yaml | name: StatisticsGen
description: |-
Construct a StatisticsGen component.
Args:
examples: A Channel of `ExamplesPath` type, likely generated by the
[ExampleGen component](https://www.tensorflow.org/tfx/guide/examplegen).
This needs to contain two splits labeled `train` and `eval`. _required_
schema: A `Schema` channel to use for automatically configuring the value
of stats options passed to TFDV.
Returns:
statistics: `ExampleStatistics` channel for statistics of each split
provided in the input examples.
inputs:
- {name: examples, type: Examples}
- name: stats_options
type:
JsonObject: {data_type: 'proto:tensorflow_data_validation.StatsOptions'}
optional: true
- {name: schema, type: Schema, optional: true}
outputs:
- {name: statistics, type: ExampleStatistics}
implementation:
container:
image: tensorflow/tfx:0.21.4
command:
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def StatisticsGen(
examples_path ,
#examples_path: 'ExamplesUri',
statistics_path ,
#statistics_path: 'ExampleStatisticsUri',
stats_options = None,
schema_path = None,
#schema_path: 'SchemaUri' = None,
):
#) -> NamedTuple('Outputs', [
# ('statistics', 'ExampleStatisticsUri'),
#]):
"""Construct a StatisticsGen component.
Args:
examples: A Channel of `ExamplesPath` type, likely generated by the
[ExampleGen component](https://www.tensorflow.org/tfx/guide/examplegen).
This needs to contain two splits labeled `train` and `eval`. _required_
schema: A `Schema` channel to use for automatically configuring the value
of stats options passed to TFDV.
Returns:
statistics: `ExampleStatistics` channel for statistics of each split
provided in the input examples.
"""
from tfx.components.statistics_gen.component import StatisticsGen as component_class
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments[name + '_path']
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
import argparse
_parser = argparse.ArgumentParser(prog='StatisticsGen', description='Construct a StatisticsGen component.\n\n Args:\n examples: A Channel of `ExamplesPath` type, likely generated by the\n [ExampleGen component](https://www.tensorflow.org/tfx/guide/examplegen).\n This needs to contain two splits labeled `train` and `eval`. _required_\n schema: A `Schema` channel to use for automatically configuring the value\n of stats options passed to TFDV.\n\n Returns:\n statistics: `ExampleStatistics` channel for statistics of each split\n provided in the input examples.')
_parser.add_argument("--examples", dest="examples_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--stats-options", dest="stats_options", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--schema", dest="schema_path", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--statistics", dest="statistics_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = StatisticsGen(**_parsed_args)
_output_serializers = [
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --examples
- {inputPath: examples}
- if:
cond: {isPresent: stats_options}
then:
- --stats-options
- {inputValue: stats_options}
- if:
cond: {isPresent: schema}
then:
- --schema
- {inputPath: schema}
- --statistics
- {outputPath: statistics}
| 8,097 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/StatisticsGen | kubeflow_public_repos/kfp-tekton-backend/components/tfx/StatisticsGen/with_URI_IO/component.py | # flake8: noqa
from typing import NamedTuple
def StatisticsGen(
examples_uri: 'ExamplesUri',
output_statistics_uri: 'ExampleStatisticsUri',
schema_uri: 'SchemaUri' = None,
stats_options_json: str = None,
beam_pipeline_args: list = None,
) -> NamedTuple('Outputs', [
('statistics_uri', 'ExampleStatisticsUri'),
]):
from tfx.components import StatisticsGen as component_class
#Generated code
import json
import os
import tempfile
import tensorflow
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(component_class_instance.outputs.get_all())
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
# Workaround for a TFX+Beam bug to make DataflowRunner work.
# Remove after the next release that has https://github.com/tensorflow/tfx/commit/ddb01c02426d59e8bd541e3fd3cbaaf68779b2df
import tfx
tfx.version.__version__ += 'dev'
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_statistics_uri, )
if __name__ == '__main__':
import kfp
kfp.components.create_component_from_func(
StatisticsGen,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
| 8,098 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/tfx/StatisticsGen | kubeflow_public_repos/kfp-tekton-backend/components/tfx/StatisticsGen/with_URI_IO/component.yaml | name: StatisticsGen
inputs:
- {name: examples_uri, type: ExamplesUri}
- {name: output_statistics_uri, type: ExampleStatisticsUri}
- {name: schema_uri, type: SchemaUri, optional: true}
- {name: stats_options_json, type: String, optional: true}
- {name: beam_pipeline_args, type: JsonArray, optional: true}
outputs:
- {name: statistics_uri, type: ExampleStatisticsUri}
implementation:
container:
image: tensorflow/tfx:0.21.4
command:
- python3
- -u
- -c
- |
def StatisticsGen(
examples_uri,
output_statistics_uri,
schema_uri = None,
stats_options_json = None,
beam_pipeline_args = None,
):
from tfx.components import StatisticsGen as component_class
#Generated code
import json
import os
import tempfile
import tensorflow
from google.protobuf import json_format, message
from tfx.types import channel_utils, artifact_utils
from tfx.components.base import base_executor
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
else:
argument_value_obj = argument_value
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
# Workaround for https://github.com/tensorflow/tensorflow/issues/39167
subdirs = [subdir.rstrip('/') for subdir in subdirs]
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())
output_dict = channel_utils.unwrap_channel_dict(component_class_instance.outputs.get_all())
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')
if base_artifact_path:
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
# Workaround for a TFX+Beam bug to make DataflowRunner work.
# Remove after the next release that has https://github.com/tensorflow/tfx/commit/ddb01c02426d59e8bd541e3fd3cbaaf68779b2df
import tfx
tfx.version.__version__ += 'dev'
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=tempfile.gettempdir(),
unique_id='tfx_component',
)
executor = component_class_instance.executor_spec.executor_class(executor_context)
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
return (output_statistics_uri, )
import json
import argparse
_parser = argparse.ArgumentParser(prog='StatisticsGen', description='')
_parser.add_argument("--examples-uri", dest="examples_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-statistics-uri", dest="output_statistics_uri", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--schema-uri", dest="schema_uri", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--stats-options-json", dest="stats_options_json", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--beam-pipeline-args", dest="beam_pipeline_args", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=1)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = StatisticsGen(**_parsed_args)
_output_serializers = [
str,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --examples-uri
- {inputValue: examples_uri}
- --output-statistics-uri
- {inputValue: output_statistics_uri}
- if:
cond: {isPresent: schema_uri}
then:
- --schema-uri
- {inputValue: schema_uri}
- if:
cond: {isPresent: stats_options_json}
then:
- --stats-options-json
- {inputValue: stats_options_json}
- if:
cond: {isPresent: beam_pipeline_args}
then:
- --beam-pipeline-args
- {inputValue: beam_pipeline_args}
- '----output-paths'
- {outputPath: statistics_uri}
| 8,099 |
Subsets and Splits