Dataset Viewer
content
stringlengths 0
181M
| language
stringclasses 54
values |
---|---|
[package]
name = "console"
version = "0.1.0"
authors = ["Alex McArther <[email protected]>"]
[dependencies]
itertools = "*"
cgmath = "0.10"
specs = "0.7.0"
glutin = "0.6.1"
lazy_static = "0.2.1"
uuid = { version = "0.2.2", features = ["serde", "v4"] }
[dependencies.client_state]
path = "../../core/client_state"
[dependencies.pause]
path = "../../core/pause"
[dependencies.pubsub]
path = "../../../pubsub"
[dependencies.common]
path = "../../../common"
[dependencies.automatic_system_installer]
path = "../../infra/automatic_system_installer"
|
TOML
|
package com.cookbook.cbook.service;
import com.cookbook.cbook.entity.Recipe;
import org.springframework.stereotype.Repository;
import javax.persistence.EntityManager;
import javax.persistence.PersistenceContext;
import javax.transaction.Transactional;
//@Repository
//@Transactional
public class RecipeDAOService {
@PersistenceContext
private EntityManager entityManager;
public long insert(Recipe recipe){
entityManager.persist(recipe);
return recipe.getId();
}
}
|
Java
|
(* Auto-generated from "game_descr.atd" *)
(** A position in the initial array. *)
type tile_pos = Game_descr_t.tile_pos
type tile = Game_descr_t.tile
type rule_descr = Game_descr_t.rule_descr = {
name: string;
flags: string list option
}
(** Player 0 is east and so on. *)
type round_player = Game_descr_t.round_player
type round_event = Game_descr_t.round_event =
Init of tile option Ag_util.ocaml_array
| Wall_breaker_roll of int
| Break_wall_roll of int
| Deal
| Draw of round_player
| Discard of (round_player * tile_pos)
| Mahjong of round_player
| Concealed_kong of (round_player * tile_pos list)
| Small_kong of (round_player * tile_pos)
| Chow of (round_player * tile_pos list)
| Pong of (round_player * tile_pos list)
| Kong of (round_player * tile_pos list)
| No_action of round_player
type ai_conf = Game_descr_t.ai_conf = { name: string; force: int }
type player_kind = Game_descr_t.player_kind = Human | AI of ai_conf
type player_idx = Game_descr_t.player_idx
type player_descr = Game_descr_t.player_descr = {
name: string;
kind: player_kind
}
type game_event = Game_descr_t.game_event =
Set_rule of rule_descr
| Player of player_descr
| East_seat of player_idx
| Init_score of float
| Round_event of round_event
| End_round
| New_round
| End_game
type game = Game_descr_t.game = {
game_events: game_event list;
current_round: round_event list
}
val write_tile_pos :
Bi_outbuf.t -> tile_pos -> unit
(** Output a JSON value of type {!tile_pos}. *)
val string_of_tile_pos :
?len:int -> tile_pos -> string
(** Serialize a value of type {!tile_pos}
into a JSON string.
@param len specifies the initial length
of the buffer used internally.
Default: 1024. *)
val read_tile_pos :
Yojson.Safe.lexer_state -> Lexing.lexbuf -> tile_pos
(** Input JSON data of type {!tile_pos}. *)
val tile_pos_of_string :
string -> tile_pos
(** Deserialize JSON data of type {!tile_pos}. *)
val write_tile :
Bi_outbuf.t -> tile -> unit
(** Output a JSON value of type {!tile}. *)
val string_of_tile :
?len:int -> tile -> string
(** Serialize a value of type {!tile}
into a JSON string.
@param len specifies the initial length
of the buffer used internally.
Default: 1024. *)
val read_tile :
Yojson.Safe.lexer_state -> Lexing.lexbuf -> tile
(** Input JSON data of type {!tile}. *)
val tile_of_string :
string -> tile
(** Deserialize JSON data of type {!tile}. *)
val write_rule_descr :
Bi_outbuf.t -> rule_descr -> unit
(** Output a JSON value of type {!rule_descr}. *)
val string_of_rule_descr :
?len:int -> rule_descr -> string
(** Serialize a value of type {!rule_descr}
into a JSON string.
@param len specifies the initial length
of the buffer used internally.
Default: 1024. *)
val read_rule_descr :
Yojson.Safe.lexer_state -> Lexing.lexbuf -> rule_descr
(** Input JSON data of type {!rule_descr}. *)
val rule_descr_of_string :
string -> rule_descr
(** Deserialize JSON data of type {!rule_descr}. *)
val write_round_player :
Bi_outbuf.t -> round_player -> unit
(** Output a JSON value of type {!round_player}. *)
val string_of_round_player :
?len:int -> round_player -> string
(** Serialize a value of type {!round_player}
into a JSON string.
@param len specifies the initial length
of the buffer used internally.
Default: 1024. *)
val read_round_player :
Yojson.Safe.lexer_state -> Lexing.lexbuf -> round_player
(** Input JSON data of type {!round_player}. *)
val round_player_of_string :
string -> round_player
(** Deserialize JSON data of type {!round_player}. *)
val write_round_event :
Bi_outbuf.t -> round_event -> unit
(** Output a JSON value of type {!round_event}. *)
val string_of_round_event :
?len:int -> round_event -> string
(** Serialize a value of type {!round_event}
into a JSON string.
@param len specifies the initial length
of the buffer used internally.
Default: 1024. *)
val read_round_event :
Yojson.Safe.lexer_state -> Lexing.lexbuf -> round_event
(** Input JSON data of type {!round_event}. *)
val round_event_of_string :
string -> round_event
(** Deserialize JSON data of type {!round_event}. *)
val write_ai_conf :
Bi_outbuf.t -> ai_conf -> unit
(** Output a JSON value of type {!ai_conf}. *)
val string_of_ai_conf :
?len:int -> ai_conf -> string
(** Serialize a value of type {!ai_conf}
into a JSON string.
@param len specifies the initial length
of the buffer used internally.
Default: 1024. *)
val read_ai_conf :
Yojson.Safe.lexer_state -> Lexing.lexbuf -> ai_conf
(** Input JSON data of type {!ai_conf}. *)
val ai_conf_of_string :
string -> ai_conf
(** Deserialize JSON data of type {!ai_conf}. *)
val write_player_kind :
Bi_outbuf.t -> player_kind -> unit
(** Output a JSON value of type {!player_kind}. *)
val string_of_player_kind :
?len:int -> player_kind -> string
(** Serialize a value of type {!player_kind}
into a JSON string.
@param len specifies the initial length
of the buffer used internally.
Default: 1024. *)
val read_player_kind :
Yojson.Safe.lexer_state -> Lexing.lexbuf -> player_kind
(** Input JSON data of type {!player_kind}. *)
val player_kind_of_string :
string -> player_kind
(** Deserialize JSON data of type {!player_kind}. *)
val write_player_idx :
Bi_outbuf.t -> player_idx -> unit
(** Output a JSON value of type {!player_idx}. *)
val string_of_player_idx :
?len:int -> player_idx -> string
(** Serialize a value of type {!player_idx}
into a JSON string.
@param len specifies the initial length
of the buffer used internally.
Default: 1024. *)
val read_player_idx :
Yojson.Safe.lexer_state -> Lexing.lexbuf -> player_idx
(** Input JSON data of type {!player_idx}. *)
val player_idx_of_string :
string -> player_idx
(** Deserialize JSON data of type {!player_idx}. *)
val write_player_descr :
Bi_outbuf.t -> player_descr -> unit
(** Output a JSON value of type {!player_descr}. *)
val string_of_player_descr :
?len:int -> player_descr -> string
(** Serialize a value of type {!player_descr}
into a JSON string.
@param len specifies the initial length
of the buffer used internally.
Default: 1024. *)
val read_player_descr :
Yojson.Safe.lexer_state -> Lexing.lexbuf -> player_descr
(** Input JSON data of type {!player_descr}. *)
val player_descr_of_string :
string -> player_descr
(** Deserialize JSON data of type {!player_descr}. *)
val write_game_event :
Bi_outbuf.t -> game_event -> unit
(** Output a JSON value of type {!game_event}. *)
val string_of_game_event :
?len:int -> game_event -> string
(** Serialize a value of type {!game_event}
into a JSON string.
@param len specifies the initial length
of the buffer used internally.
Default: 1024. *)
val read_game_event :
Yojson.Safe.lexer_state -> Lexing.lexbuf -> game_event
(** Input JSON data of type {!game_event}. *)
val game_event_of_string :
string -> game_event
(** Deserialize JSON data of type {!game_event}. *)
val write_game :
Bi_outbuf.t -> game -> unit
(** Output a JSON value of type {!game}. *)
val string_of_game :
?len:int -> game -> string
(** Serialize a value of type {!game}
into a JSON string.
@param len specifies the initial length
of the buffer used internally.
Default: 1024. *)
val read_game :
Yojson.Safe.lexer_state -> Lexing.lexbuf -> game
(** Input JSON data of type {!game}. *)
val game_of_string :
string -> game
(** Deserialize JSON data of type {!game}. *)
|
OCaml
|
<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:background="@drawable/background_learn"
tools:context=".AddNewItemActivity" >
<ImageButton
android:id="@+id/add_item_btn_camera"
android:layout_width="200dp"
android:layout_height="200dp"
android:layout_above="@+id/add_item_layout_description"
android:layout_centerHorizontal="true"
android:layout_marginBottom="@dimen/add_item_margin_bottom"
android:background="@android:color/transparent"
android:contentDescription="@string/description_camera_image_button"
android:scaleType="fitCenter"
android:src="@drawable/btn_take_picture" />
<LinearLayout
android:id="@+id/add_item_layout_description"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_centerHorizontal="true"
android:layout_centerInParent="true" >
<EditText
android:id="@+id/add_item_edt_description"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_marginBottom="@dimen/add_item_margin_bottom"
android:background="@color/white_transparent"
android:ems="20"
android:hint="@string/add_item_hint"
android:inputType="text" >
</EditText>
<Spinner
android:id="@+id/add_item_spinner_list_subject"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_marginLeft="@dimen/add_item_screen_spinner_margin_left"
android:gravity="center_horizontal" />
</LinearLayout>
<LinearLayout
android:id="@+id/add_item_layout_record_play"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_below="@+id/add_item_layout_description"
android:layout_centerHorizontal="true"
android:layout_marginTop="@dimen/add_item_btn_record_margin_top"
android:paddingBottom="@dimen/add_item_margin_bottom" >
<ImageButton
android:id="@+id/add_item_btn_record"
android:layout_width="60dp"
android:layout_height="60dp"
android:background="@android:color/transparent"
android:contentDescription="@string/description_camera_image_button"
android:src="@drawable/record" />
<ImageButton
android:id="@+id/add_item_btn_recording"
android:layout_width="60dp"
android:layout_height="60dp"
android:background="@android:color/transparent"
android:contentDescription="@string/description_camera_image_button"
android:src="@drawable/stop"
android:visibility="gone" />
<ImageButton
android:id="@+id/add_item_btn_play"
android:layout_width="60dp"
android:layout_height="60dp"
android:background="@android:color/transparent"
android:contentDescription="@string/description_save_item"
android:paddingLeft="@dimen/add_item_btn_record_margin_left"
android:src="@drawable/play" />
<ImageButton
android:id="@+id/add_item_btn_playing"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:background="@android:color/transparent"
android:contentDescription="@string/description_camera_image_button"
android:paddingLeft="@dimen/add_item_btn_record_margin_left"
android:src="@drawable/stop"
android:visibility="gone" />
</LinearLayout>
<ImageButton
android:id="@+id/add_item_btn_save"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_below="@+id/add_item_layout_record_play"
android:layout_centerHorizontal="true"
android:background="@android:color/transparent"
android:contentDescription="@string/description_save_item"
android:src="@drawable/btn_save" />
</RelativeLayout>
|
XML
|
import java.math.BigInteger
fun main(args: Array<String>) {
val x = BigInteger.valueOf(5).pow(Math.pow(4.0, 3.0 * 3.0).toInt())
val y = x.toString()
val len = y.length
println("5^4^3^2 = ${y.substring(0, 20)}...${y.substring(len - 20)} and has $len digits")
}
|
Kotlin
|
{
"hits": 0,
"timeRestore": false,
"description": "",
"title": "Packetbeat Dashboard",
"panelsJSON": "[{\"col\":1,\"id\":\"Web-transactions\",\"row\":5,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":4,\"id\":\"DB-transactions\",\"row\":5,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Cache-transactions\",\"row\":5,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":10,\"id\":\"RPC-transactions\",\"row\":5,\"size_x\":3,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Response-times-percentiles\",\"row\":10,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Errors-count-over-time\",\"row\":13,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Errors-vs-successful-transactions\",\"row\":10,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"id\":\"Latency-histogram\",\"row\":13,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":4,\"id\":\"Client-locations\",\"row\":1,\"size_x\":9,\"size_y\":4,\"type\":\"visualization\"},{\"col\":1,\"id\":\"Response-times-repartition\",\"row\":7,\"size_x\":12,\"size_y\":3,\"type\":\"visualization\"},{\"id\":\"Navigation\",\"type\":\"visualization\",\"size_x\":3,\"size_y\":4,\"col\":1,\"row\":1}]",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}]}"
}
}
|
JSON
|
package com.cagnosolutions.app.main.user
import groovy.transform.CompileStatic
import javax.persistence.Entity
import javax.persistence.GeneratedValue
import javax.persistence.Id
import javax.persistence.Table
/**
* Created by Scott Cagno.
* Copyright Cagno Solutions. All rights reserved.
*/
@CompileStatic
@Entity
@Table(name = "user")
class User {
@Id
@GeneratedValue
Long id
String name, email, username, password, role = "ROLE_USER"
Long creation, lastSeen
short active = 1
}
|
Groovy
|
class Colors
COLORS = {
:red => 1,
:green => 2,
:yellow => 3,
:blue => 4,
:purple => 5,
:sea => 6,
:white => 7
}
class << self
def default_terminal_colors
@default_terminal_colors ||= "\e[0m"
end
def process(data)
begin
_process(data)
ensure
STDOUT.flush
reset!
end
end
def reset!
STDOUT.write("\e[0m")
STDOUT.flush
end
def _process(data)
# Backrounds
if m = data.match(%r{<(.*?) bg=(.*?)>(.*?)<\/(.*?)>}m)
COLORS.each do |k,v|
t = data.match(%r{<(.*?) bg=#{k}>(.*?)<\/(.*?)>}m)
data.gsub!(%r{<(.*?) bg=#{k}>(.*?)<\/(.*?)>}m, "\e[1m\e[4#{v}m<\\1>\\2</\\1>#{default_terminal_colors}")
end
end
# Colored text
COLORS.each do |k,v|
data.gsub!(%r{<#{k}>(.*?)</#{k}>}m, "\e[1m\e[3#{v}m\\1#{default_terminal_colors}")
end
data.gsub!(%r{<b>(.*?)</b>}m, "\e[1m\\1#{default_terminal_colors}")
data.gsub!(%r{<line>}m, "---------------------------------------")
data.gsub!(%r{<banner>(.*?)</banner>}m, "\e[33m\e[44m\e[1m\\1#{default_terminal_colors}")
return data
end
end
end
|
Ruby
|
/*
$Id$
*======================================================================
*
* DISCLAIMER
*
* This material was prepared as an account of work sponsored by an
* agency of the United States Government. Neither the United States
* Government nor the United States Department of Energy, nor Battelle,
* nor any of their employees, MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR
* ASSUMES ANY LEGAL LIABILITY OR RESPONSIBILITY FOR THE ACCURACY,
* COMPLETENESS, OR USEFULNESS OF ANY INFORMATION, APPARATUS, PRODUCT,
* SOFTWARE, OR PROCESS DISCLOSED, OR REPRESENTS THAT ITS USE WOULD NOT
* INFRINGE PRIVATELY OWNED RIGHTS.
*
* ACKNOWLEDGMENT
*
* This software and its documentation were produced with Government
* support under Contract Number DE-AC06-76RLO-1830 awarded by the United
* States Department of Energy. The Government retains a paid-up
* non-exclusive, irrevocable worldwide license to reproduce, prepare
* derivative works, perform publicly and display publicly by or for the
* Government, including the right to distribute to other Government
* contractors.
*
*======================================================================
*
* -- PEIGS routine (version 2.1) --
* Pacific Northwest Laboratory
* July 28, 1995
*
*======================================================================
*/
#include <stdio.h>
#include <math.h>
#include "globalp.c.h"
#define max(a,b) ((a) > (b) ? (a) : (b))
void sonenrm ( n, colA, mapA, norm, iwork, work, info)
Integer *n, *mapA, *iwork, *info;
DoublePrecision **colA, *work, *norm;
{
/******************************************************
*
* C subroutine sonenrm
*
* This routines computes the one norm of a column
* distributed symmetric matrix in packed storage format
*
Arguments
---------
In the following:
INTEGER = "pointer to Integer"
DOUBLE PRECISION = "pointer to DoublePrecision"
me = this processor's id (= mxmynd_())
nprocs = number of allocated processors ( = mxnprc_())
nvecsA = number of entries in mapA equal to me
(= count_list( me, mapA, n ))
sDP = sizeof( DoublePrecision )
n....... (input) INTEGER
size of the matrix A
colA ... (input) array of pointers to DoublePrecision,
length (nvecsA)
The part of matrix A owned by this processer stored
in packed format, i.e., colA[i] points to the diagonal
element of the i-th column (or equivalently row) of A
owned by this processor, i = 0 to nvecsA-1.
mapA ... (input) INTEGER array, length (n)
The i-th column (or equivalently row) of A is
owned by processor mapA[i], i = 0 to n-1.
norm ... (output) DOUBLE PRECISION
The one-norm of A
iwork... (workspace) INTEGER array, length( n+nvecsA )
work.... (workspace) DOUBLE PRECISION array,
length( n + 1 + mxlbuf_() / sDP + 1 )
info.... (output) INTEGER
= 0, not currently used
*/
static Integer IONE = 1;
Integer ll, nprocs, i, me, nvecsA, *mapvecA;
Integer *proclist, jj, k, ii;
Integer *iscrat;
DoublePrecision scl;
DoublePrecision *normvec, *workMX;
extern DoublePrecision dasum_ ();
extern void gsum00();
extern void fil_dbl_list ();
extern Integer mxmynd_();
extern Integer fil_mapvec_();
extern Integer reduce_list2();
extern void fil_dbl_lst();
me = mxmynd_ ();
*info = 0;
ll = *n;
*norm = 0.e0;
iscrat = iwork;
mapvecA = iscrat;
nvecsA = fil_mapvec_ ( &me, &ll, mapA, mapvecA);
iscrat += nvecsA;
if ( nvecsA == 0 )
return;
proclist = iscrat;
nprocs = reduce_list2( *n, mapA, proclist);
iscrat += nprocs;
normvec = work;
workMX = work + *n + 1;
fil_dbl_lst ( *n, normvec, 0.0e0); /* zero out normvec */
for ( i = 0; i < nvecsA; i++ ) {
jj = mapvecA[i];
ii = ll - jj;
scl = dasum_ ( &ii, colA[i], &IONE );
normvec[jj] = scl;
}
for ( i = 0; i < nvecsA; i++ ) {
jj = mapvecA[i];
for ( k = 1; k < *n-jj; k++ )
normvec[jj+k] += fabs( colA[i][k] );
}
gsum00( (char *) normvec, ll * sizeof(DoublePrecision), 5, 10, proclist[0], nprocs, proclist, workMX);
scl = 0.0e0;
for ( i = 0; i < ll; i++ )
scl = max( normvec[i], scl);
*norm = scl;
return;
}
|
C
|
#############################################################################
#
# XFOIL
#
# Copyright (C) 2000 Mark Drela
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
#############################################################################
cmake_minimum_required(VERSION 3.5)
project(xfoil LANGUAGES C Fortran VERSION 6.97)
if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
set(CMAKE_BUILD_TYPE Release CACHE STRING
"Choose the type of build; options are Debug Release RelWithDebInfo MinSizeRel"
FORCE)
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY
STRINGS
Debug
Release
RelWithDebInfo
MinSizeRel)
endif()
include(CTest)
if(CMAKE_Fortran_COMPILER_ID STREQUAL GNU)
set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -std=legacy")
endif()
option(DOUBLE_PRECISION
"Make the real and complex types eight bytes long" OFF)
if(DOUBLE_PRECISION)
if(CMAKE_Fortran_COMPILER_ID STREQUAL Intel)
if(WIN32)
set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} /real-size:64")
else()
set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -real-size 64")
endif()
elseif(CMAKE_Fortran_COMPILER_ID STREQUAL GNU)
set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -fdefault-real-8")
endif()
endif()
include(GNUInstallDirs)
set(ORRS_DIR "${CMAKE_SOURCE_DIR}/orrs")
add_subdirectory(orrs)
add_subdirectory(osrc)
add_subdirectory(plotlib)
add_subdirectory(src)
set(XFOIL_DOCS version_notes.txt xfoil_doc.txt)
install(FILES ${XFOIL_DOCS} DESTINATION ${CMAKE_INSTALL_DOCDIR})
include(CMakePackageConfigHelpers)
configure_package_config_file(xfoil-config.cmake.in xfoil-config.cmake.in
INSTALL_DESTINATION ${CMAKE_INSTALL_DATADIR}/${PROJECT_NAME}
PATH_VARS CMAKE_INSTALL_BINDIR
NO_SET_AND_CHECK_MACRO NO_CHECK_REQUIRED_COMPONENTS_MACRO)
file(GENERATE
OUTPUT xfoil-config.cmake
INPUT "${CMAKE_CURRENT_BINARY_DIR}/xfoil-config.cmake.in")
write_basic_package_version_file(xfoil-config-version.cmake
COMPATIBILITY SameMajorVersion)
install(FILES
"${CMAKE_CURRENT_BINARY_DIR}/xfoil-config.cmake"
"${CMAKE_CURRENT_BINARY_DIR}/xfoil-config-version.cmake"
DESTINATION ${CMAKE_INSTALL_DATADIR}/${PROJECT_NAME})
include(CPack)
|
CMake
|
\relax
\catcode`:\active
\catcode`;\active
\catcode`!\active
\catcode`?\active
\catcode`"\active
\ifx\hyper@anchor\@undefined
\global \let \oldcontentsline\contentsline
\gdef \contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}}
\global \let \oldnewlabel\newlabel
\gdef \newlabel#1#2{\newlabelxx{#1}#2}
\gdef \newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}}
\AtEndDocument{\let \contentsline\oldcontentsline
\let \newlabel\oldnewlabel}
\else
\global \let \hyper@last\relax
\fi
\@input{FrontBackMatter/Titlepage.aux}
\reset@newl@bel
\select@language{french}
\@writefile{toc}{\select@language{french}}
\@writefile{lof}{\select@language{french}}
\@writefile{lot}{\select@language{french}}
\providecommand\mph@setcol[2]{}
\@writefile{toc}{\vspace {-\cftbeforepartskip }}
\@writefile{lof}{\deactivateaddvspace }
\@writefile{lot}{\deactivateaddvspace }
\@writefile{lol}{\deactivateaddvspace }
\select@language{american}
\@writefile{toc}{\select@language{american}}
\@writefile{lof}{\select@language{american}}
\@writefile{lot}{\select@language{american}}
\@input{FrontBackMatter/Titleback.aux}
\@input{FrontBackMatter/Dedication.aux}
\mph@setcol{ii:iv}{\mph@nr}
\@input{FrontBackMatter/Abstract.aux}
\mph@setcol{ii:vi}{\mph@nr}
\@input{FrontBackMatter/Publication.aux}
\mph@setcol{ii:viii}{\mph@nr}
\@input{FrontBackMatter/Acknowledgments.aux}
\mph@setcol{ii:x}{\mph@nr}
\@input{FrontBackMatter/Contents.aux}
\@writefile{toc}{\contentsline {part}{i\hspace {1em}\spacedlowsmallcaps {Some Kind of Manual}}{1}{part.1}}
\mph@setcol{ii:1}{\mph@nr}
\mph@setcol{ii:2}{\mph@nr}
\@input{Chapters/Chapter01.aux}
\@writefile{toc}{\contentsline {part}{ii\hspace {1em}\spacedlowsmallcaps {The Showcase}}{9}{part.2}}
\mph@setcol{ii:9}{\mph@nr}
\mph@setcol{ii:10}{\mph@nr}
\@input{Chapters/Chapter02.aux}
\@input{Chapters/Chapter03.aux}
\@writefile{toc}{\contentsline {part}{iii\hspace {1em}\spacedlowsmallcaps {Appendix}}{19}{part.3}}
\mph@setcol{ii:19}{\mph@nr}
\mph@setcol{ii:20}{\mph@nr}
\@input{Chapters/Chapter0A.aux}
\@input{FrontBackMatter/Bibliography.aux}
\@input{FrontBackMatter/Colophon.aux}
\mph@setcol{ii:24}{\mph@nr}
\@input{FrontBackMatter/Declaration.aux}
\gdef\mph@lastpage{26}
\csname mph@do@warn\endcsname
\global\@altsecnumformattrue
|
TeX
|
(ns leiningen.new.thing-babel
(:require
[leiningen.new.templates :as tpl]
[leiningen.core.main :as main]
[clojure.string :as str])
(:import
[java.util Locale Calendar]))
(def licenses
{"asl" {:name "Apache Software License 2.0" :url "http://www.apache.org/licenses/LICENSE-2.0"}
"epl" {:name "Eclipse Public License" :url "http://www.eclipse.org/legal/epl-v10.html"}
"mit" {:name "MIT License" :url "http://opensource.org/licenses/MIT"}})
(def render (tpl/renderer "thing-babel"))
(defn group-name
"Replace hyphens with underscores."
[^String s]
(let [idx (.indexOf s "/")]
(when (pos? idx)
(subs s 0 idx))))
(defn opts-info
[opts ks]
(doseq [[k desc] (partition 2 ks)]
(main/info (format "%-24s: %s" desc (str (opts k))))))
(defn thing-babel
"Literal programming template for org-mode babel projects"
[name & args]
(let [opts (->> args
(partition 2)
(map #(vector (keyword (first %)) (second %)))
(into {}))
{:keys [author license target url]
:or {author (System/getProperty "user.name")
license "epl"
target "babel"
url "https://github.com/"}} opts
tangle-target (if (and (seq target) (not= \/ (last target)))
(str target \/)
target)
target (if (empty? target) "project root" target)
group (group-name name)
license (.toLowerCase license)
opts (merge
{:name (tpl/project-name name)
:group group
:fqname name
:sanitized (tpl/name-to-path name)
:author author
:url url
:desc "FIXME: write description"
:license-name (get-in licenses [license :name])
:license-url (get-in licenses [license :url])
:ns-root (tpl/sanitize-ns name)
:ns-root-path (tpl/name-to-path name)
:tangle-target tangle-target
:target target
:tzone (-> (Locale/getDefault)
(Calendar/getInstance)
(.get Calendar/ZONE_OFFSET)
(/ (* 1000 60 60)))}
opts)]
(main/info (str "Generating fresh literate programming project: " name))
(opts-info opts
[:name "generated project dir"
:group "artefact group ID"
:url "project url"
:author "project author"
:author-url "author url"
:email "author email"
:tzone "author timezone"
:license-name "license"
:desc "description"
:tangle-target "path for gen sources"
:ns-root "project root namespace"])
(tpl/->files opts
["README.org" (render "readme-tpl.org" opts)]
["src/setup.org" (render "setup.org" opts)]
["src/core.org" (render "core.org" opts)]
["src/libraryofbabel.org" (render "libraryofbabel.org" opts)]
["test/core.org" (render "test.org" opts)]
["tangle.sh" (render "tangle.sh") :executable true]
["tangle-all.sh" (render "tangle-all.sh") :executable true])))
|
Clojure
|
-- Category: Database Engine Configuration
SET NOCOUNT ON;
SELECT
CONVERT(nvarchar(128), SERVERPROPERTY('ServerName')) AS ServerName,
[name] AS [EndpointName],
COALESCE(SUSER_NAME(principal_id), '') AS [Owner],
COALESCE([protocol_desc], '') AS [ProtocolDesc],
COALESCE([type_desc], '') AS [PayloadType],
COALESCE([state_desc], '') AS [StateDesc],
[is_admin_endpoint] AS [Is AdminEndpoint]
FROM sys.endpoints
WHERE [endpoint_id] > 5;
|
PLSQL
|
package chap06
import com.cra.figaro.library.atomic.discrete.Geometric
import com.cra.figaro.library.atomic.continuous.{Beta, Normal}
import com.cra.figaro.library.collection.VariableSizeArray
import com.cra.figaro.algorithm.sampling.Importance
import com.cra.figaro.language.Universe
import com.cra.figaro.language.Flip
object NewProducts {
def runExperiment(rNDLevel: Double) {
Universe.createNew()
val numNewProducts = Geometric(rNDLevel)
val productQuality = VariableSizeArray(numNewProducts, i => Beta(1, i + 1))
val productSalesRaw = productQuality.chain(Normal(_, 0.5))
val productSales = productSalesRaw.map(_.max(0))
val totalSales = productSales.foldLeft(0.0)(_ + _)
val algorithm = Importance(totalSales)
algorithm.start()
Thread.sleep(5000)
algorithm.stop()
println("With R&D at " + rNDLevel + ", expected sales will be " + algorithm.mean(totalSales))
algorithm.kill()
}
def main(args: Array[String]) {
for { i <- 0.05 to 1.0 by 0.1 } { runExperiment(i) }
}
}
|
Scala
|
FROM elixir:1.17.1-otp-26
# Single RUN statement, otherwise intermediate images are created
# https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#run
RUN apt-get update &&\
apt-get install -y libmagic-dev cmake libimage-exiftool-perl ffmpeg &&\
mix local.hex --force &&\
mix local.rebar --force
|
Dockerfile
|
# Random samples from determinantal point processes
"""
Computes a random sample from the determinantal point process defined by the
spectral factorization object `L`.
Inputs:
`L`: `Eigen` factorization object of an N x N matrix
Output:
`Y`: A `Vector{Int}` with entries in [1:N].
References:
Algorithm 18 of \\cite{HKPV05}, as described in Algorithm 1 of \\cite{KT12}.
@article{HKPV05,
author = {Hough, J Ben and Krishnapur, Manjunath and Peres, Yuval and Vir\'{a}g, B\'{a}lint},
doi = {10.1214/154957806000000078},
journal = {Probability Surveys},
pages = {206--229},
title = {Determinantal Processes and Independence},
volume = {3},
year = {2005}
archivePrefix = {arXiv},
eprint = {0503110},
}
@article{KT12,
author = {Kulesza, Alex and Taskar, Ben},
doi = {10.1561/2200000044},
journal = {Foundations and Trends in Machine Learning},
number = {2-3},
pages = {123--286},
title = {Determinantal Point Processes for Machine Learning},
volume = {5},
year = {2012},
archivePrefix = {arXiv},
eprint = {1207.6083},
}
TODO Check loss of orthogonality - a tip from Zelda Mariet
"""
function rand(L::LinearAlgebra.Eigen{S,T}) where {S<:Real,T}
N = length(L.values)
J = Int[]
for n=1:N
λ = L.values[n]
rand() < λ/(λ+1) && push!(J, n)
end
V = L.vectors[:, J]
Y = Int[]
nV = size(V, 2)
while true
# Select i from 𝒴=[1:N] (ground set) with probabilities
# Pr(i) = 1/|V| Σ_{v∈V} (v⋅eᵢ)²
#Compute selection probabilities
Pr = zeros(N)
for i=1:N
for j=1:nV #TODO this loop is a bottleneck - why?
Pr[i] += (V[i,j])^2 #ith entry of jth eigenvector
end
Pr[i] /= nV
end
@assert abs(1-sum(Pr)) < N*eps() #Check normalization
#Simple discrete sampler
i, ρ = N, rand()
for j=1:N
if ρ < Pr[j]
i = j
break
else
ρ -= Pr[j]
end
end
push!(Y, i)
nV == 1 && break #Done
#V = V⊥ #an orthonormal basis for the subspace of V ⊥ eᵢ
V[i, :] = 0 #Project out eᵢ
V = full(qrfact!(V)[:Q])[:, 1:nV-1]
nV = size(V, 2)
end
return Y
end
|
Julia
|
del *.log
del *.syntax
del *.av
del oop\*.log
del oop\*.syntax
del oop\*.av
"..\shell_project\ori.exe" -f "..\test\consts.php" -nologo
"..\shell_project\ori.exe" -f "..\test\vars.php" -nologo
"..\shell_project\ori.exe" -f "..\test\operators.php" -nologo
"..\shell_project\ori.exe" -f "..\test\globals.php" -nologo
"..\shell_project\ori.exe" -f "..\test\core.php" -nologo
"..\shell_project\ori.exe" -f "..\test\math.php" -nologo
"..\shell_project\ori.exe" -f "..\test\arrays.php" -nologo
"..\shell_project\ori.exe" -f "..\test\cycles.php" -nologo
"..\shell_project\ori.exe" -f "..\test\foreach.php" -nologo
"..\shell_project\ori.exe" -f "..\test\core_string.php" -nologo
"..\shell_project\ori.exe" -f "..\test\core_array.php" -nologo
"..\shell_project\ori.exe" -f "..\test\evals.php" -nologo
"..\shell_project\ori.exe" -f "..\test\functions.php" -nologo
"..\shell_project\ori.exe" -f "..\test\typed.php" -nologo
"..\shell_project\ori.exe" -f "..\test\leak.php" -nologo
"..\shell_project\ori.exe" -f "..\test\complex1.php" -nologo
"..\shell_project\ori.exe" -f "..\test\oop\constants.php"
"..\shell_project\ori.exe" -f "..\test\oop\static_vars.php"
pause
|
Batchfile
|
%Schools Out!
subject(english).
subject(gym).
subject(history).
subject(math).
subject(science).
state(california).
state(florida).
state(maine).
state(oregon).
state(virginia).
activity(antiquing).
activity(camping).
activity(sightseeing).
activity(spelunking).
activity(water_skiing).
name(appleton).
name(gross).
name(knight).
name(mcEvoy).
name(parnell).
solve :-
subject(Appleton_subject),
subject(Gross_subject),
subject(Knight_subject),
subject(Mcevoy_subject),
subject(Parnell_subject),
all_different([Appleton_subject, Gross_subject, Knight_subject, Mcevoy_subject, Parnell_subject]),
state(Appleton_state),
state(Gross_state),
state(Knight_state),
state(Mcevoy_state),
state(Parnell_state),
all_different([Appleton_state, Gross_state, Knight_state, Mcevoy_state, Parnell_state]),
activity(Appleton_activity), activity(Gross_activity), activity(Knight_activity), activity(Mcevoy_activity), activity(Parnell_activity),
all_different([Appleton_activity, Gross_activity, Knight_activity, Mcevoy_activity, Parnell_activity]),
Groupings = [ [appleton, Appleton_subject, Appleton_state, Appleton_activity],
[gross, Gross_subject, Gross_state, Gross_activity],
[knight, Knight_subject, Knight_state, Knight_activity],
[mcevoy, Mcevoy_subject, Mcevoy_state, Mcevoy_activity],
[parnell, Parnell_subject, Parnell_state, Parnell_activity] ],
%Clue One
%Gross_subject = math || Gross_subject = science
%if Gross_activity = antiquing -> Gross_state = florida
%else Gross_state = california
%member(name, subject, state, activity)
( member([gross, math, _, _], Groupings) ; member([gross, science, _, _], Groupings) ),
( member([gross, _, florida, antiquing], Groupings) ; member([gross, _, california, _], Groupings) ),
%Clue Two
%person_subject = science && person_activity = water_skiing
%person_state = florida || person_state = california
%Mcevoy_subject = history && Mcevoy_state = maine || Mcevoy_state = oregon
( member([_, science, florida, water_skiing], Groupings) ; member([_, science, california, water_skiing], Groupings) ),
( member([mcevoy, history, maine, _], Groupings) ; member([mcevoy, history, oregon, _], Groupings) ),
%Clue Three
%(Appleton_state = virginia && Appleton_subject = english) || Parnell_state = virginia
%Parnell_activity = spelunking
( member([appleton, english, virginia, _], Groupings) ; member([parnell, _, virginia, _], Groupings) ),
member([parnell, _, _, spelunking], Groupings),
%Clue Four
%person_state = maine && person_subject != gym && person_activity != sightseeing
\+ member([_, _, maine, sightseeing], Groupings),
\+ member([_, gym, maine, _], Groupings),
%Clue Five
%Gross_activity != camping
%woman_activity = antiquing
%member([gross, _, _, _], Groupings), \+ member([_, _, _, camping], Groupings),
\+ member([gross, _, _, camping], Groupings),
( member([gross, _, _, antiquing], Groupings) ;
member([appleton, _, _, antiquing], Groupings) ;
member([parnell, _, _, antiquing], groupings) ),
tell(appleton, Appleton_subject, Appleton_state, Appleton_activity),
tell(gross, Gross_subject, Gross_state, Gross_activity),
tell(knight, Knight_subject, Knight_state, Knight_activity),
tell(mcevoy, Mcevoy_subject, Mcevoy_state, Mcevoy_activity),
tell(parnell, Parnell_subject, Parnell_state, Parnell_activity).
all_different([H | T]) :- member(H,T), !, fail.
all_different([_ | T]) :- all_different(T).
all_different([_]).
tell(N, SUB, ST, A) :-
write(N), write(' who teaches '),
write(SUB), write(' is vacationing to '),
write(ST), write(' and their activity is '),
write(A), nl.
|
Prolog
|
//
// PopoverViewControllerTests
// ApptivatorTests
//
import XCTest
@testable import Apptivator
class PopoverViewControllerTests: XCTestCase {}
|
Swift
|
defmodule Explorer.Repo.Migrations.ChangeBlockSizeToNullable do
use Ecto.Migration
def up do
alter table(:blocks) do
modify(:size, :integer, null: true)
end
end
def down do
alter table(:blocks) do
modify(:size, :integer, null: false)
end
end
end
|
Elixir
|
#
# %CopyrightBegin%
#
# Copyright Ericsson AB 1998-2011. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
# compliance with the License. You should have received a copy of the
# Erlang Public License along with this software. If not, it can be
# retrieved online at http://www.erlang.org/.
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and limitations
# under the License.
#
# %CopyrightEnd%
#
#
include $(ERL_TOP)/make/target.mk
include $(ERL_TOP)/make/$(TARGET)/otp.mk
CC = @CC@
LIBS = @LIBS@
LIBDIR = ../priv/lib/$(TARGET)
OBJDIR = ../priv/obj/$(TARGET)
INCDIR = ../include
ERL_INTERFACE_FLAGS = \
-I$(ERL_TOP)/lib/erl_interface/include \
-I$(ERL_TOP)/lib/erl_interface/src
# ----------------------------------------------------
# Application version
# ----------------------------------------------------
include ../vsn.mk
VSN=$(IC_VSN)
# ----------------------------------------------------
# Release directory specification
# ----------------------------------------------------
RELSYSDIR = $(RELEASE_PATH)/lib/ic-$(VSN)
# ----------------------------------------------------
# File Specs
# ----------------------------------------------------
IDL_FILES = \
$(INCDIR)/erlang.idl
ifeq ($(findstring win32,$(TARGET)),win32)
USING_MINGW=@MIXED_CYGWIN_MINGW@
ifeq ($(USING_MINGW),yes)
AR_OUT = rcv
CC_FLAGS =
LIBRARY = $(LIBDIR)/libic.a
SKIP_BUILDING_BINARIES := false
else
LIBRARY = $(LIBDIR)/ic.lib
AR_OUT = -out:
CC_FLAGS = -MT
endif
ifeq ($(HOST_OS),)
HOST_OS := $(shell $(ERL_TOP)/erts/autoconf/config.guess)
endif
ifeq ($(findstring solaris,$(HOST_OS)),solaris)
SKIP_BUILDING_BINARIES := true
endif
else
AR_OUT = rcv
CC_FLAGS = @DED_CFLAGS@
LIBRARY = $(LIBDIR)/libic.a
SKIP_BUILDING_BINARIES := false
endif
C_FILES = \
ic.c \
ic_tmo.c \
oe_ei_encode_version.c \
oe_ei_encode_long.c \
oe_ei_encode_ulong.c \
oe_ei_encode_double.c \
oe_ei_encode_char.c \
oe_ei_encode_string.c \
oe_ei_encode_atom.c \
oe_ei_encode_pid.c \
oe_ei_encode_port.c \
oe_ei_encode_ref.c \
oe_ei_encode_term.c \
oe_ei_encode_tuple_header.c \
oe_ei_encode_list_header.c \
oe_ei_encode_longlong.c \
oe_ei_encode_ulonglong.c \
oe_ei_encode_wchar.c \
oe_ei_encode_wstring.c \
oe_ei_decode_longlong.c \
oe_ei_decode_ulonglong.c \
oe_ei_decode_wchar.c \
oe_ei_decode_wstring.c \
oe_ei_code_erlang_binary.c
H_FILES = $(INCDIR)/ic.h
OBJ_FILES= $(C_FILES:%.c=$(OBJDIR)/%.o)
ALL_CFLAGS = @CFLAGS@ @DEFS@ -I$(INCDIR) $(ERL_INTERFACE_FLAGS) $(CFLAGS)
# ----------------------------------------------------
# Targets
# ----------------------------------------------------
ifeq ($(SKIP_BUILDING_BINARIES), true)
debug opt:
else
debug opt: $(LIBRARY)
endif
clean:
rm -f $(LIBRARY) $(OBJ_FILES)
rm -f core *~
docs:
# ----------------------------------------------------
# Special Build Targets
# ----------------------------------------------------
_create_dirs := $(shell mkdir -p $(OBJDIR) $(LIBDIR))
$(LIBRARY): $(OBJ_FILES)
-$(AR) $(AR_OUT) $@ $(OBJ_FILES)
-$(RANLIB) $@
$(OBJDIR)/%.o: %.c
$(CC) $(CC_FLAGS) -c -o $@ $(ALL_CFLAGS) $<
# ----------------------------------------------------
# Release Target
# ----------------------------------------------------
include $(ERL_TOP)/make/otp_release_targets.mk
release_spec: opt
$(INSTALL_DIR) $(RELSYSDIR)/c_src
$(INSTALL_DIR) $(RELSYSDIR)/include
$(INSTALL_DIR) $(RELSYSDIR)/priv/lib
$(INSTALL_DATA) ic.c ic_tmo.c $(RELSYSDIR)/c_src
$(INSTALL_DATA) $(IDL_FILES) $(H_FILES) $(RELSYSDIR)/include
$(INSTALL_DATA) $(LIBRARY) $(RELSYSDIR)/priv/lib
release_docs_spec:
|
Makefile
|
(ns argumentica.db.file-transaction-log
(:require (argumentica [transaction-log :as transaction-log])
[me.raynes.fs :as fs]
[clojure.java.io :as io]
[clojure.edn :as edn]
[clojure.string :as string]
[argumentica.util :as util])
(:import [java.nio.file Files Paths OpenOption LinkOption]
[java.nio.file.attribute FileAttribute])
(:use clojure.test))
(defrecord FileTransactionLog [log-file-path state-atom]
java.io.Closeable
(close [this]
(.close (:output-stream @state-atom))))
(defn log-to-string [log]
(string/join "\n"
(map pr-str
log)))
(defn reset-log-file! [log-file-path log]
(let [temporary-log-file-path (str log-file-path ".new")]
(spit temporary-log-file-path
(log-to-string log))
(fs/rename temporary-log-file-path
log-file-path)))
(defn read-and-fix-log! [log-file-path]
(with-open [reader (io/reader log-file-path)]
(loop [lines (line-seq reader)
log (sorted-map)]
(if-let [line (first lines)]
(if-let [[transaction-number statements] (try (edn/read-string line)
(catch Exception exception
(reset-log-file! log-file-path log)
nil))]
(recur (rest lines)
(assoc log transaction-number statements))
log)
log))))
(defn create [log-file-path]
(->FileTransactionLog log-file-path
(atom {:in-memory-log (if (fs/exists? log-file-path)
(read-and-fix-log! log-file-path)
(sorted-map))
:output-stream (io/output-stream log-file-path
:append true)})))
(defn write-to-log-file! [output-stream transaction-number statements]
(.write output-stream
(.getBytes (prn-str [transaction-number statements])
"UTF-8"))
(.flush output-stream))
(defn add-transaction! [state transaction-number statements]
(when (not (:is-transient? state))
(write-to-log-file! (:output-stream state)
transaction-number
statements))
(update state
:in-memory-log
assoc
transaction-number
statements))
(deftest test-log-to-string
(is (= "[1 [[1 :name :set \"Foo 1\"] [2 :name :set \"Foo 2\"]]]\n[2 [[1 :name :set \"Bar 1\"] [2 :name :set \"Bar 2\"]]]"
(log-to-string (sorted-map 1 [[1 :name :set "Foo 1"]
[2 :name :set "Foo 2"]]
2 [[1 :name :set "Bar 1"]
[2 :name :set "Bar 2"]])))))
(defn truncate! [state log-file-path first-preserved-transaction-number]
(let [truncated-log (util/filter-sorted-map-keys (:in-memory-log state)
(fn [transaction-number]
(<= first-preserved-transaction-number
transaction-number)))]
(when (not (:is-transient? state))
(.close (:output-stream state))
(reset-log-file! log-file-path truncated-log))
(-> state
(assoc :in-memory-log truncated-log)
(cond-> (:is-transient? state)
(assoc :output-stream (io/output-stream log-file-path :append true))))))
(defn synchronously-apply-to-state! [file-transaction-log function & arguments]
(locking (:state-atom file-transaction-log)
(apply swap!
(:state-atom file-transaction-log)
function
arguments))
file-transaction-log)
(defmethod transaction-log/truncate! FileTransactionLog
[this first-preserved-transaction-number]
(synchronously-apply-to-state! this
truncate!
(:log-file-path this)
first-preserved-transaction-number))
(defmethod transaction-log/last-transaction-number FileTransactionLog
[this]
(first (last (:in-memory-log @(:state-atom this)))))
(defmethod transaction-log/add!-method FileTransactionLog
[this transaction-number statements]
(synchronously-apply-to-state! this
add-transaction!
transaction-number
statements))
(defn transient? [file-transaction-log]
(:is-transient? @(:state-atom file-transaction-log)))
(defn close! [file-transaction-log]
(if (not (transient? file-transaction-log))
(.close (:output-stream @(:state-atom file-transaction-log)))))
(defmethod transaction-log/close! FileTransactionLog
[this]
(close! this))
(defmethod transaction-log/subseq FileTransactionLog
[this first-transaction-number]
(subseq (:in-memory-log @(:state-atom this))
>=
first-transaction-number))
(defmethod transaction-log/make-transient! FileTransactionLog
[file-transaction-log]
(assert (not (transient? file-transaction-log)))
(synchronously-apply-to-state! file-transaction-log
(fn [state]
(close! file-transaction-log)
(fs/delete (:log-file-path file-transaction-log))
(assoc state :is-transient? true))))
(defmethod transaction-log/make-persistent! FileTransactionLog
[file-transaction-log]
(assert (transient? file-transaction-log))
(synchronously-apply-to-state! file-transaction-log
(fn [state]
(reset-log-file! (:log-file-path file-transaction-log)
(:in-memory-log state))
(assoc state
:is-transient? false
:output-stream (io/output-stream (:log-file-path file-transaction-log)
:append true)))))
(comment
(write-to-log-file! "data/temp/log"
3
[[1 :name :set "Foo 4"]])
(read-and-fix-log! "data/temp/log")
(with-open [log (create "data/temp/log")]
(doto log
#_(transaction-log/make-transient!)
(transaction-log/add! #{[1 :name :set "Bar 1"]
[2 :name :set "Bar 2"]})
(transaction-log/add! #{[1 :name :set "Baz 1"]})
#_(transaction-log/truncate! 2)
(transaction-log/add! #{[1 :name :set "Foo 2"]})
#_(transaction-log/make-persistent!))
#_(prn (transaction-log/subseq log 2))
#_(prn (transaction-log/last-transaction-number log))))
|
Clojure
|
package leafnodes
import (
"github.com/botmetrics/go-botmetrics/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/failer"
"github.com/botmetrics/go-botmetrics/Godeps/_workspace/src/github.com/onsi/ginkgo/types"
"time"
)
type SuiteNode interface {
Run(parallelNode int, parallelTotal int, syncHost string) bool
Passed() bool
Summary() *types.SetupSummary
}
type simpleSuiteNode struct {
runner *runner
outcome types.SpecState
failure types.SpecFailure
runTime time.Duration
}
func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
t := time.Now()
node.outcome, node.failure = node.runner.run()
node.runTime = time.Since(t)
return node.outcome == types.SpecStatePassed
}
func (node *simpleSuiteNode) Passed() bool {
return node.outcome == types.SpecStatePassed
}
func (node *simpleSuiteNode) Summary() *types.SetupSummary {
return &types.SetupSummary{
ComponentType: node.runner.nodeType,
CodeLocation: node.runner.codeLocation,
State: node.outcome,
RunTime: node.runTime,
Failure: node.failure,
}
}
func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
return &simpleSuiteNode{
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0),
}
}
func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
return &simpleSuiteNode{
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
}
}
|
Go
|
help: ## Print documentation
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
ghcid: ## Run ghcid with the cardano-sl-explorer package
ghcid \
--command "stack ghci cardano-sl-explorer --ghci-options=-fno-code"
ghcid-test: ## Have ghcid run the test suite on successful recompile
ghcid \
--command "stack ghci cardano-sl-explorer:lib cardano-sl-explorer:test:cardano-explorer-test --ghci-options=-fobject-code" \
--test "Main.main"
.PHONY: ghcid ghcid-test help
|
Makefile
|
# Copyright (c) 2016, Ruslan Baratov
# All rights reserved.
cmake_minimum_required(VERSION 3.0)
# Emulate HunterGate:
# * https://github.com/hunter-packages/gate
include("../common.cmake")
project(download-fixesproto)
# download fixesproto
hunter_add_package(fixesproto)
|
CMake
|
/******************************************************************************
* (C) Copyright 2011 KALRAY SA All Rights Reserved
*
* MODULE: mmu_proc_dcache_master_seq_lib.sv
* DEVICE: MMU_PROC_DCACHE VIP
* PROJECT:
* AUTHOR:
* DATE:
*
* ABSTRACT:
*
*******************************************************************************/
`ifndef MMU_PROC_DCACHE_MASTER_SEQ_LIB_SV
`define MMU_PROC_DCACHE_MASTER_SEQ_LIB_SV
//------------------------------------------------------------------------------
//
// CLASS: mmu_proc_dcache_master_base_sequence
//
//------------------------------------------------------------------------------
class mmu_proc_dcache_master_base_sequence extends uvm_sequence #(mmu_proc_dcache_transfer);
typedef mmu_proc_dcache_master_sequencer mmu_proc_dcache_master_sequencer_t;
typedef mmu_proc_dcache_transfer mmu_proc_dcache_transfer_t;
`uvm_object_param_utils(mmu_proc_dcache_master_base_sequence)
string v_name;
// new - constructor
function new(string name="mmu_proc_dcache_master_base_sequence");
super.new(name);
endfunction : new
// Raise in pre_body so the objection is only raised for root sequences.
// There is no need to raise for sub-sequences since the root sequence
// will encapsulate the sub-sequence.
virtual task pre_body();
m_sequencer.uvm_report_info(get_type_name(), $psprintf("%s pre_body() raising an uvm_test_done objection", get_sequence_path()), UVM_HIGH);
uvm_test_done.raise_objection(this);
endtask
// Drop the objection in the post_body so the objection is removed when
// the root sequence is complete.
virtual task post_body();
m_sequencer.uvm_report_info(get_type_name(), $psprintf("%s post_body() dropping an uvm_test_done objection", get_sequence_path()), UVM_HIGH);
uvm_test_done.drop_objection(this);
endtask // post_body
endclass : mmu_proc_dcache_master_base_sequence
//------------------------------------------------------------------------------
//
// CLASS: mmu_proc_dcache_standby_seq
//
//------------------------------------------------------------------------------
class mmu_proc_dcache_standby_seq extends mmu_proc_dcache_master_base_sequence;
`uvm_object_param_utils(mmu_proc_dcache_standby_seq)
// new - constructor
function new(string name="mmu_proc_dcache_standby_seq");
super.new(name);
endfunction : new
// Implment behavior sequence
virtual task body();
endtask // body
endclass : mmu_proc_dcache_standby_seq
//------------------------------------------------------------------------------
// Example sequence
// CLASS: mmu_proc_dcache_trial_seq
//
//------------------------------------------------------------------------------
class proc_dcache_seq extends mmu_proc_dcache_master_base_sequence;
`uvm_object_param_utils(proc_dcache_seq)
// Add sequence parameters
int unsigned lreq_lat;
logic [40:0] le1_dcache_virt_addr_i;
e1_dcache_opc_t le1_dcache_opc_i;
logic le1_glob_acc_i;
logic [3:0] le1_dcache_size_i;
logic le1_non_trapping_i;
// new - constructor
function new(string name="mmu_proc_dcache_trial_seq");
super.new(name);
endfunction : new
mmu_proc_dcache_transfer_t mmu_proc_dcache_trans;
// Implment behavior sequence
virtual task body();
`uvm_info(get_type_name(), $psprintf("Start sequence mmu_proc_dcache_trial_seq"), UVM_LOW)
$cast(mmu_proc_dcache_trans, create_item(mmu_proc_dcache_transfer_t::type_id::get(), m_sequencer, "mmu_proc_dcache_trans"));
start_item(mmu_proc_dcache_trans);
mmu_proc_dcache_trans.v_name = v_name;
if (!(mmu_proc_dcache_trans.randomize() with {
// Transmit sequence paramaters
mmu_proc_dcache_trans.req_lat ==lreq_lat;
mmu_proc_dcache_trans.e1_dcache_virt_addr_i == le1_dcache_virt_addr_i;
// mmu_proc_dcache_trans.e1_glob_acc_i == le1_glob_acc_i;
mmu_proc_dcache_trans.e1_dcache_size_i == le1_dcache_size_i;
mmu_proc_dcache_trans.e1_non_trapping_i == le1_non_trapping_i;
mmu_proc_dcache_trans.e1_dcache_opc == le1_dcache_opc_i;
}))
`uvm_fatal(get_type_name(), $psprintf("mmu_proc_dcache_trial_seq: randomization error"))
finish_item(mmu_proc_dcache_trans);
`uvm_info(get_type_name(), "End sequence mmu_proc_dcache_trial_seq", UVM_LOW)
endtask // body
endclass : proc_dcache_seq
`endif
|
Coq
|
<#
.SYNOPSIS
Function to retrieve the size of a directory and the number of files and directories inside of it.
.DESCRIPTION
Function to retrieve the size of a directory and the number of files and directories inside of it.
.PARAMETER Path
In the parameter Path you can specify the directory you want to query. This can be both a local or remote (UNC) path.
.PARAMETER OutputFormat
In the parameter OutputFormat you can specify the format in which the directory size should be outputted. Possible formats are KB, MB and GB. The default is GB.
.PARAMETER NoRecurse
When using the switch NoRecurse the function will only query the directory specified in the Path parameter and will not query child directories and files.
.EXAMPLE
PS C:\>Get-FSDirectorySize -Path C:\Windows
This command will retrieve the size (in GB) of the directory C:\Windows and the number of files and directories insides of it.
.NOTES
Author : Ingvald Belmans
Website : http://www.supersysadmin.com
Version : 1.0
Changelog:
- 1.0 (2015-12-31) Initial version.
.LINK
http://www.supersysadmin.com
#>
function Get-FSDirectorySize
{
[CmdletBinding()]
Param
(
[Parameter(
Mandatory=$true,
ValueFromPipeline=$true,
ValueFromPipelineByPropertyName=$true
)
]
[String]
$Path,
[validateset('KB','MB','GB')]
[string]
$OutputFormat = "GB",
[switch]
$NoRecurse
)
Begin
{
}
Process
{
Write-Verbose -Message "Testing if path '$Path' exists."
if (Test-Path -Path $Path)
{
Write-Verbose -Message "Path '$Path' exists."
$DirectorySize = @()
$DirectorySizeObject = New-Object -TypeName System.Object
if ($NoRecurse)
{
Write-Verbose -Message "Performing a non-recursive search on path '$Path'."
$QueryDirectory = Get-ChildItem -Path $Path -ErrorVariable QueryDirectoryErrors -ErrorAction SilentlyContinue
}
else
{
Write-Verbose -Message "Performing a recursive search on path '$Path'."
$QueryDirectory = Get-ChildItem -Path $Path -Recurse -ErrorVariable QueryDirectoryErrors -ErrorAction SilentlyContinue
}
foreach ($QueryDirectoryError in $QueryDirectoryErrors)
{
Write-Warning -Message $QueryDirectoryError
}
$DirectorySizeObject | Add-Member -MemberType NoteProperty -Name "Directory" -Value $Path
Write-Verbose -Message "Calculating size of path '$Path'."
$QueryDirectorySize = $QueryDirectory | Measure-Object -Property Length -Sum
if ($OutputFormat -eq "KB")
{
Write-Verbose -Message "Setting OutputFormat to KB."
$QueryDirectorySizeFormattedHeader = "Size(KB)"
$QueryDirectorySizeFormatted = "{0:N2}" -f ($QueryDirectorySize.Sum / 1KB)
}
elseif ($OutputFormat -eq "MB")
{
Write-Verbose -Message "Setting OutputFormat to MB."
$QueryDirectorySizeFormattedHeader = "Size(MB)"
$QueryDirectorySizeFormatted = "{0:N2}" -f ($QueryDirectorySize.Sum / 1MB)
}
elseif ($OutputFormat -eq "GB")
{
Write-Verbose -Message "Setting OutputFormat to GB."
$QueryDirectorySizeFormattedHeader = "Size(GB)"
$QueryDirectorySizeFormatted = "{0:N2}" -f ($QueryDirectorySize.Sum / 1GB)
}
$DirectorySizeObject | Add-Member -MemberType NoteProperty -Name $QueryDirectorySizeFormattedHeader -Value $QueryDirectorySizeFormatted
Write-Verbose -Message "Calculating amount of directories in path '$Path'."
$QueryDirectoryDirectories = $QueryDirectory | Where-Object -FilterScript {$_.PSIsContainer -eq $true}
$DirectorySizeObject | Add-Member -MemberType NoteProperty -Name "Directories" -Value $QueryDirectoryDirectories.Count
Write-Verbose -Message "Calculating amount of files in path '$Path'."
$QueryDirectoryFiles = $QueryDirectory | Where-Object -FilterScript {$_.PSIsContainer -eq $false}
$DirectorySizeObject | Add-Member -MemberType NoteProperty -Name "Files" -Value $QueryDirectoryFiles.Count
$DirectorySize += $DirectorySizeObject
Write-Output -InputObject $DirectorySize
}
else
{
Write-Warning -Message "Path '$path' does not exist."
break
}
}
End
{
}
}
|
PowerShell
|
\hypertarget{class_image_app_1_1migrations_1_10003__auto__20180818__1425_1_1_migration}{}\section{Image\+App.\+migrations.0003\+\_\+auto\+\_\+20180818\+\_\+1425.Migration Class Reference}
\label{class_image_app_1_1migrations_1_10003__auto__20180818__1425_1_1_migration}\index{Image\+App.\+migrations.\+0003\+\_\+auto\+\_\+20180818\+\_\+1425.\+Migration@{Image\+App.\+migrations.\+0003\+\_\+auto\+\_\+20180818\+\_\+1425.\+Migration}}
Inheritance diagram for Image\+App.\+migrations.0003\+\_\+auto\+\_\+20180818\+\_\+1425.Migration\+:\begin{figure}[H]
\begin{center}
\leavevmode
\includegraphics[height=2.000000cm]{class_image_app_1_1migrations_1_10003__auto__20180818__1425_1_1_migration}
\end{center}
\end{figure}
\subsection*{Static Public Attributes}
\begin{DoxyCompactItemize}
\item
list \mbox{\hyperlink{class_image_app_1_1migrations_1_10003__auto__20180818__1425_1_1_migration_a33c13fdbeb1e5e28d03d1a1fc391dad3}{dependencies}}
\item
list \mbox{\hyperlink{class_image_app_1_1migrations_1_10003__auto__20180818__1425_1_1_migration_aae609b480f1a2542bd52396572fc1574}{operations}}
\end{DoxyCompactItemize}
\subsection{Member Data Documentation}
\mbox{\Hypertarget{class_image_app_1_1migrations_1_10003__auto__20180818__1425_1_1_migration_a33c13fdbeb1e5e28d03d1a1fc391dad3}\label{class_image_app_1_1migrations_1_10003__auto__20180818__1425_1_1_migration_a33c13fdbeb1e5e28d03d1a1fc391dad3}}
\index{Image\+App\+::migrations\+::0003\+\_\+auto\+\_\+20180818\+\_\+1425\+::\+Migration@{Image\+App\+::migrations\+::0003\+\_\+auto\+\_\+20180818\+\_\+1425\+::\+Migration}!dependencies@{dependencies}}
\index{dependencies@{dependencies}!Image\+App\+::migrations\+::0003\+\_\+auto\+\_\+20180818\+\_\+1425\+::\+Migration@{Image\+App\+::migrations\+::0003\+\_\+auto\+\_\+20180818\+\_\+1425\+::\+Migration}}
\subsubsection{\texorpdfstring{dependencies}{dependencies}}
{\footnotesize\ttfamily list Image\+App.\+migrations.\+0003\+\_\+auto\+\_\+20180818\+\_\+1425.\+Migration.\+dependencies\hspace{0.3cm}{\ttfamily [static]}}
{\bfseries Initial value\+:}
\begin{DoxyCode}
= [
(\textcolor{stringliteral}{'ImageApp'}, \textcolor{stringliteral}{'0002\_image\_imagefile'}),
]
\end{DoxyCode}
\mbox{\Hypertarget{class_image_app_1_1migrations_1_10003__auto__20180818__1425_1_1_migration_aae609b480f1a2542bd52396572fc1574}\label{class_image_app_1_1migrations_1_10003__auto__20180818__1425_1_1_migration_aae609b480f1a2542bd52396572fc1574}}
\index{Image\+App\+::migrations\+::0003\+\_\+auto\+\_\+20180818\+\_\+1425\+::\+Migration@{Image\+App\+::migrations\+::0003\+\_\+auto\+\_\+20180818\+\_\+1425\+::\+Migration}!operations@{operations}}
\index{operations@{operations}!Image\+App\+::migrations\+::0003\+\_\+auto\+\_\+20180818\+\_\+1425\+::\+Migration@{Image\+App\+::migrations\+::0003\+\_\+auto\+\_\+20180818\+\_\+1425\+::\+Migration}}
\subsubsection{\texorpdfstring{operations}{operations}}
{\footnotesize\ttfamily list Image\+App.\+migrations.\+0003\+\_\+auto\+\_\+20180818\+\_\+1425.\+Migration.\+operations\hspace{0.3cm}{\ttfamily [static]}}
{\bfseries Initial value\+:}
\begin{DoxyCode}
= [
migrations.RenameField(
model\_name=\textcolor{stringliteral}{'image'},
old\_name=\textcolor{stringliteral}{'imageFile'},
new\_name=\textcolor{stringliteral}{'imageField'},
),
]
\end{DoxyCode}
The documentation for this class was generated from the following file\+:\begin{DoxyCompactItemize}
\item
Web\+Project/\+Cell\+Segmentation/\+Image\+App/migrations/\mbox{\hyperlink{0003__auto__20180818__1425_8py}{0003\+\_\+auto\+\_\+20180818\+\_\+1425.\+py}}\end{DoxyCompactItemize}
|
TeX
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# no more "zero" integer division bugs!:P
# import argparse
import os
import sys
import numpy as np # array
import time
import emcee
# import h5py
# import random
# import constants as cst # local constants module
# from scipy.stats import norm as scipy_norm
from . import ancillary as anc
import matplotlib as mpl
import matplotlib.pyplot as plt
anc.set_rcParams()
def compute_convergence(
chains, fit_names, log_folder, plots_folder, n_cv=10, n_thin=1, figsize=(5, 5)
):
os.makedirs(log_folder, exist_ok=True)
log_file = os.path.join(log_folder, "log_convergence.txt")
with open(log_file, "w") as olog:
anc.print_both("", output=olog)
anc.print_both(" ======================== ", output=olog)
anc.print_both(" CONVERGENCE PLOTS", output=olog)
anc.print_both(" ======================== ", output=olog)
anc.print_both("", output=olog)
n_steps, n_walkers, n_fit = np.shape(chains)
if n_cv < 1:
n_cv = 10
step = int((n_steps - 10) / n_cv)
steps = np.rint(np.linspace(step, n_steps, endpoint=True, num=n_cv)).astype(int)
expected_acf = np.zeros((n_fit)).astype(int)
expected_steps = np.zeros((n_fit)).astype(int)
for ifit, iname in enumerate(fit_names):
anc.print_both("\nParameter {}".format(iname), output=olog)
fig, axs = plt.subplots(nrows=3, ncols=1, sharex=False, figsize=figsize)
fig.suptitle(iname)
ax = axs[0]
# anc.print_both("Gelman-Rubin", output=olog)
gr = np.zeros((n_cv)) + 100
for istep in range(n_cv):
gr[istep] = anc.GelmanRubin(chains[: steps[istep], :, ifit])
ax.plot(
steps,
gr,
color="black",
marker="o",
ms=4,
mec="white",
mew=0.3,
ls="-",
lw=0.7,
zorder=5,
)
ax.axhline(1.01, color="gray", ls="--", lw=0.7, zorder=4)
ax.set_ylim(0.95, 1.2)
ax.set_ylabel("G-R ($\^{R}$)")
ax.set_xlabel("steps $\\times {}$".format(n_thin))
ax = axs[1]
# anc.print_both("Geweke", output=olog)
lower_interval, z_score = anc.geweke_test(
chains[:, :, ifit], start_frac=0.01, n_sel_steps=n_cv
)
for i_c in range(0, n_walkers):
ax.plot(
lower_interval,
z_score[:, i_c],
marker="o",
ms=2,
mec="None",
ls="-",
lw=0.4,
label="walker {:d}".format(i_c + 1),
alpha=0.6,
)
# ax.legend(loc='best', fontsize=3)
ax.axhline(
+2.0,
color="lightgray",
ls="-",
lw=0.7,
)
ax.axhline(
-2.0,
color="lightgray",
ls="-",
lw=0.7,
)
ax.set_ylabel("Geweke")
ax.set_xlabel("steps $\\times {}$".format(n_thin))
ax.set_ylim(-3, +3)
ax = axs[2]
# anc.print_both("ACF", output=olog)
tolerance = 50
integrated_ACF = emcee.autocorr.integrated_time(
chains[:, :, ifit], tol=tolerance, quiet=True
)
acf_len = int(np.nanmax(integrated_ACF))
n_expected = acf_len * tolerance
anc.print_both(
"ACF {}x{} expected chain long as n = {}x{} (current {} steps)".format(
acf_len, n_thin, n_expected, n_thin, n_steps
),
output=olog,
)
expected_steps[ifit] = n_expected
expected_acf[ifit] = acf_len
n_acf = acf_len * tolerance
n_acf = 10
acf_steps = np.rint(
np.linspace(acf_len // 2, n_steps, endpoint=True, num=n_acf)
).astype(int)
tau_est = np.zeros((n_acf))
for i_acf, n_s in enumerate(acf_steps):
acf_mean = np.zeros((n_s))
for iw in range(0, n_walkers):
acf = emcee.autocorr.function_1d(chains[:n_s, iw, ifit])
acf_mean += acf
acf_mean /= n_walkers
c = 5
taus = 2.0 * np.cumsum(acf_mean) - 1.0
window = emcee.autocorr.auto_window(taus, c)
tau_est[i_acf] = taus[window]
ax.plot(
acf_steps,
tau_est,
color="C0",
marker="o",
ms=2,
mec="None",
ls="-",
lw=0.5,
label="$\\tau$",
zorder=6,
)
ax.axhline(
acf_len,
color="black",
ls="-",
lw=0.7,
label="ACF = ${}\\times{}$".format(acf_len, n_thin),
zorder=5,
)
ax.plot(
acf_steps,
acf_steps / tolerance,
color="gray",
marker="None",
ls="--",
lw=0.5,
label="$\\tau = N/({}\\times{})$".format(tolerance, n_thin),
zorder=4,
)
ax.legend(loc="best", fontsize=4)
ax.set_ylabel("ACF")
ax.set_xlabel("steps $\\times {}$".format(n_thin))
plt.tight_layout()
fig.align_ylabels(axs)
out_file = os.path.join(
plots_folder, "{:03d}_{}_convergence.png".format(ifit, iname)
)
fig.savefig(out_file, dpi=300, bbox_inches="tight")
plt.close(fig)
anc.print_both("", output=olog)
anc.print_both(
"All expected steps for each parameter needed to reach full convergence:\n{}".format(
expected_steps
),
output=olog,
)
anc.print_both(
"All expected ACF len for each parameter needed to reach full convergence:\n{}".format(
expected_acf
),
output=olog,
)
imax_acf = np.argmax(expected_acf)
anc.print_both(
"MAX ACF = {} ==> needed chains of {} steps\n".format(
expected_acf[imax_acf], expected_steps[imax_acf]
),
output=olog,
)
# close olog
return
def full_statistics(
chains,
# post,
flat_post,
names,
pars,
lnp_post,
output_folder,
olog=None,
ilast=0,
n_burn=0,
n_thin=1,
show_plot=False,
figsize=(8, 8),
):
# 68.27% (15.87th-84.13th) ==> alpha = 1. - 0.6827 = 0.3173
# 95.44% ( 2.28th-97.72th) ==> alpha = 1. - 0.9544 = 0.0456
# 99.74% ( 0.13th-99.87th) ==> alpha = 1. - 0.9974 = 0.0026
cred1 = 0.6827
scred1 = "{:.2f}".format(100 * cred1)
cred2 = 0.9544
scred2 = "{:.2f}".format(100 * cred2)
cred3 = 0.9974
scred3 = "{:.2f}".format(100 * cred3)
lsize = 10
tsize = lsize - 3
n_steps, n_walkers, n_par = np.shape(chains)
print("### n_steps, n_walkers, n_par = {}, {}, {}".format(n_steps, n_walkers, n_par))
n_gr = 10
if n_steps <= n_gr:
n_gr = n_steps
step = 1
else:
step = int((n_steps - 10) / n_gr)
steps = np.rint(np.linspace(step, n_steps, endpoint=True, num=n_gr)).astype(int)
expected_acf = np.zeros((n_par)).astype(int)
expected_steps = np.zeros((n_par)).astype(int)
for ipar, pname in enumerate(names):
p = pars[ipar]
anc.print_both("Parameter {}".format(pname), output=olog)
if (
pname[0] == "w"
or pname[0:2] == "mA"
or pname[0:2] == "lN"
or "lambda" in pname
):
pmod = p%360.0
patg = anc.get_arctan_angle(pmod)
pmin, pmax = flat_post[:, ipar].min(), flat_post[:, ipar].max()
if np.logical_and(patg >= pmin, patg <= pmax):
p = patg
else:
p = pmod
hdi1 = anc.hpd(flat_post[:, ipar], cred=cred1)
hdi2 = anc.hpd(flat_post[:, ipar], cred=cred2)
hdi3 = anc.hpd(flat_post[:, ipar], cred=cred3)
err = np.array(hdi1) - p
med = np.median(flat_post[:, ipar])
warn = ""
if err[0] > 0 or err[1] < 0:
warn = "!!WARNING MAP OUT OF HDI{}%!!".format(cred1)
fig = plt.figure(figsize=figsize, layout="constrained")
spec = fig.add_gridspec(3, 3)
axs = []
# TOP-LEFT ==== Gelman-Rubin
topl = fig.add_subplot(spec[0, 0])
topl.tick_params(axis="x", labelrotation=0, labelsize=tsize)
topl.tick_params(axis="y", labelrotation=45, labelsize=tsize)
gr = np.zeros((n_gr)) + 100
for istep, vstep in enumerate(steps):
# gr[istep] = anc.GelmanRubin(chains[:vstep, :, ipar])
gr[istep] = anc.GelmanRubin_PyORBIT(chains[:vstep, :, ipar])
topl.plot(
steps,
gr,
color="black",
marker="o",
ms=4,
mec="white",
mew=0.3,
ls="-",
lw=0.7,
zorder=5,
)
topl.axhline(1.01, color="gray", ls="--", lw=0.7, zorder=4)
ylim0 = topl.get_ylim()
topl.set_ylim(max(0.95, ylim0[0]), min(1.2, ylim0[1]))
# topl.set_ylabel("G-R ($\^{R}$)", fontsize=lsize)
topl.set_xlabel("steps $\\times {}$".format(n_thin), fontsize=lsize)
axs.append(topl)
# TOP-CENTRE ==== Geweke
topc = fig.add_subplot(spec[0, 1])
topc.tick_params(axis="x", labelrotation=0, labelsize=tsize)
topc.tick_params(axis="y", labelrotation=45, labelsize=tsize)
lower_interval, z_score = anc.geweke_test(
chains[:, :, ipar], start_frac=0.01, n_sel_steps=n_gr
)
for i_c in range(0, n_walkers):
topc.plot(
lower_interval,
z_score[:, i_c],
marker="o",
ms=2,
mec="None",
ls="-",
lw=0.4,
# label="walker {:d}".format(i_c + 1),
alpha=0.6,
)
topc.axhline(
+2.0,
color="lightgray",
ls="-",
lw=0.7,
)
topc.axhline(
-2.0,
color="lightgray",
ls="-",
lw=0.7,
)
topc.set_ylim(-3, +3)
# topc.set_ylabel("Geweke", fontsize=lsize)
topc.set_xlabel("steps $\\times {}$".format(n_thin), fontsize=lsize)
axs.append(topc)
# TOP-RIGHT ==== ACF
topr = fig.add_subplot(spec[0, 2])
topr.tick_params(axis="x", labelrotation=0, labelsize=tsize)
topr.tick_params(axis="y", labelrotation=45, labelsize=tsize)
tolerance = 50
integrated_ACF = emcee.autocorr.integrated_time(
chains[:, :, ipar], tol=tolerance, quiet=True
)
print("integrated_ACF ",integrated_ACF)
acf_len = np.rint(np.nanmax(integrated_ACF)).astype(int)
print("acf_len ",acf_len)
n_expected = acf_len * tolerance
print("n_expected ",n_expected)
anc.print_both(
"ACF {}x{} expected chain long as n = {}x{} (current {} steps)".format(
acf_len, n_thin, n_expected, n_thin, n_steps
),
output=olog,
)
expected_steps[ipar] = n_expected
print("expected_steps[ipar] ",expected_steps[ipar])
expected_acf[ipar] = acf_len
print("expected_acf[ipar] ",expected_acf[ipar])
n_acf = 10
acf_start = acf_len // 2
if acf_start < 1:
acf_start = 1
acf_steps = np.rint(
np.linspace(acf_start, n_steps, endpoint=True, num=n_acf)
).astype(int)
print("acf_steps ",acf_steps)
tau_est = np.zeros((n_acf))
for i_acf, n_s in enumerate(acf_steps):
acf_mean = np.zeros((n_s))
for iw in range(0, n_walkers):
acf = emcee.autocorr.function_1d(chains[:n_s, iw, ipar])
acf_mean += acf
acf_mean /= n_walkers
c = 5
taus = 2.0 * np.cumsum(acf_mean) - 1.0
window = emcee.autocorr.auto_window(taus, c)
tau_est[i_acf] = taus[window]
topr.plot(
acf_steps,
tau_est,
color="C0",
marker="o",
ms=2,
mec="None",
ls="-",
lw=0.5,
label="$\\tau$",
zorder=6,
)
topr.axhline(
acf_len,
color="black",
ls="-",
lw=0.7,
label="ACF = ${}\\times{}$".format(acf_len, n_thin),
zorder=5,
)
topr.plot(
acf_steps,
acf_steps / tolerance,
color="gray",
marker="None",
ls="--",
lw=0.5,
label="$\\tau = N/({}\\times{})$".format(tolerance, n_thin),
zorder=4,
)
topr.legend(loc="best", fontsize=tsize - 2)
# topr.set_ylabel("ACF", fontsize=lsize)
topr.set_xlabel("steps $\\times {}$".format(n_thin), fontsize=lsize)
axs.append(topr)
# MIDLEFT ==== trace full chains
midl = fig.add_subplot(spec[1, 0])
midl.tick_params(axis="x", labelrotation=0, labelsize=tsize)
midl.tick_params(axis="y", labelrotation=45, labelsize=tsize)
midl.plot(chains[:, :, ipar], ls="-", lw=0.2, alpha=0.3)
midl.axvline(n_burn, color="gray", ls="-", lw=1.3, alpha=0.7)
midl.axhline(p, color="C1", ls="-", lw=1.4, alpha=0.7)
# midl.set_ylabel("{} (full)".format(pname), fontsize=lsize)
midl.set_xlabel("steps $\\times {}$".format(n_thin), fontsize=lsize)
axs.append(midl)
# MIDCENTRE ==== trace posterior chains
midc = fig.add_subplot(spec[1, 1])
midc.tick_params(axis="x", labelrotation=0, labelsize=tsize)
midc.tick_params(axis="y", labelrotation=45, labelsize=tsize)
midc.plot(chains[:, :, ipar], ls="-", lw=0.2, alpha=0.3)
midc.axvspan(
0, n_burn, facecolor="gray", edgecolor="None", ls="-", lw=1.3, alpha=0.5
)
midc.axvline(n_burn, color="gray", ls="-", lw=1.3, alpha=0.7)
midc.axhline(p, color="C1", ls="-", lw=1.4, alpha=0.7)
y = flat_post[:, ipar]
dy = np.ptp(y)
midc.set_ylim([y.min() - 0.03 * dy, y.max() + 0.03 * dy])
midc.set_xlabel("steps $\\times {}$".format(n_thin), fontsize=lsize)
axs.append(midc)
# MIDRIGHT ==== posterior distribution
midr = fig.add_subplot(spec[1, 2])
midr.tick_params(axis="x", labelbottom=False)
midr.tick_params(axis="y", labelleft=False)
midr.hist(
flat_post[:, ipar],
bins=33,
color="black",
density=False,
orientation="horizontal",
zorder=3,
)
midr.axhline(
p, color="C1", ls="-", lw=1.3, alpha=1.0, label="MAP", zorder=5
)
midr.axhline(
hdi1[0],
color="C2",
ls="--",
lw=0.95,
alpha=1.0,
label="HDI{}%".format(scred1),
zorder=4,
)
midr.axhline(hdi1[1], color="C2", ls="--", lw=0.95, alpha=1.0, zorder=4)
midr.axhline(
hdi2[0],
color="C3",
ls="--",
lw=0.50,
alpha=1.0,
label="HDI{}%".format(scred2),
zorder=4,
)
midr.axhline(hdi2[1], color="C3", ls="--", lw=0.50, alpha=1.0, zorder=5)
midr.axhline(
hdi3[0],
color="C4",
ls="--",
lw=0.50,
alpha=1.0,
label="HDI{}%".format(scred3),
zorder=4,
)
midr.axhline(hdi3[1], color="C4", ls="--", lw=0.50, alpha=1.0, zorder=6)
midr.axhline(
med, color="C0", ls="--", lw=1.0, alpha=1.0, label="MEDIAN", zorder=6
)
# midr.legend(loc='best', fontsize=tsize-3)
axs.append(midr)
# BOTTOM ==== lnP = f(par)
bot = fig.add_subplot(spec[2, :])
bot.tick_params(axis="x", labelrotation=0, labelsize=tsize)
bot.tick_params(axis="y", labelrotation=45, labelsize=tsize)
# bot.set_title(pname, fontsize=lsize+1)
bot.plot(
flat_post[:, ipar],
lnp_post,
color="black",
marker="o",
ms=1,
mec="None",
ls="",
alpha=0.33,
zorder=2,
)
bot.axvline(
p, color="C1", ls="-", lw=1.3, alpha=1.0, label="MAP", zorder=5
)
bot.axvline(
hdi1[0],
color="C2",
ls="--",
lw=0.95,
alpha=1.0,
label="HDI{}%".format(scred1),
zorder=4,
)
bot.axvline(hdi1[1], color="C2", ls="--", lw=0.95, alpha=1.0, zorder=4)
bot.axvline(
hdi2[0],
color="C3",
ls="--",
lw=0.50,
alpha=1.0,
label="HDI{}%".format(scred2),
zorder=4,
)
bot.axvline(hdi2[1], color="C3", ls="--", lw=0.50, alpha=1.0, zorder=5)
bot.axvline(
hdi3[0],
color="C4",
ls="--",
lw=0.50,
alpha=1.0,
label="HDI{}%".format(scred3),
zorder=4,
)
bot.axvline(hdi3[1], color="C4", ls="--", lw=0.50, alpha=1.0, zorder=6)
bot.axvline(
med, color="C0", ls="--", lw=1.0, alpha=1.0, label="MEDIAN", zorder=6
)
bot.legend(
# loc='center left',
# bbox_to_anchor=(1.01, 0.5),
loc="best",
fontsize=tsize - 3,
)
bot.set_ylabel("$\ln\mathcal{P}$", fontsize=lsize)
bot.set_xlabel("{} (posterior)".format(pname), fontsize=lsize)
axs.append(bot)
plt.tight_layout()
# fig.align_ylabels(axs)
# save figure
output_file = os.path.join(
output_folder, "{:03d}_{}.png".format(ipar + ilast, pname)
)
anc.print_both("Saving {}".format(output_file), output=olog)
fig.savefig(output_file, dpi=300, bbox_inches="tight")
if show_plot:
plt.show()
plt.close(fig)
return expected_acf, expected_steps
def log_probability_trace(
log_prob, lnprob_posterior, plot_folder, n_burn=0, n_thin=1, show_plot=False, figsize=(8, 8), olog=None
):
lsize = 10
tsize = lsize - 3
map_lnprob = np.max(lnprob_posterior)
fig = plt.figure(figsize=figsize, layout="constrained")
spec = fig.add_gridspec(3, 1)
axs = []
top = fig.add_subplot(spec[0, 0])
top.tick_params(axis="x", labelrotation=45, labelsize=tsize, labelbottom=False)
top.tick_params(axis="y", labelrotation=45, labelsize=tsize)
top.plot(log_prob, ls="-", lw=0.2, alpha=0.3)
top.axvline(n_burn, color="gray", ls="-", lw=1.3, alpha=0.7)
top.axhline(map_lnprob, color="C1", ls="-", lw=1.2, alpha=0.7)
top.set_ylabel("$\ln\mathcal{P}$ (full)")
axs.append(top)
mid = fig.add_subplot(spec[1, 0])
mid.tick_params(axis="x", labelrotation=45, labelsize=tsize)
mid.tick_params(axis="y", labelrotation=45, labelsize=tsize)
mid.plot(log_prob, ls="-", lw=0.2, alpha=0.3)
mid.axvline(n_burn, color="gray", ls="-", lw=1.3, alpha=0.7)
mid.axhline(map_lnprob, color="C1", ls="-", lw=1.2, alpha=0.7)
mid.set_ylabel("$\ln\mathcal{P}$ (post.)")
mid.set_xlabel("steps $\\times {}$".format(n_thin))
dlnP = np.ptp(lnprob_posterior)
mid.set_ylim(
[lnprob_posterior.min() - 0.03 * dlnP, lnprob_posterior.max() + 0.03 * dlnP]
)
axs.append(mid)
bot = fig.add_subplot(spec[2, 0])
bot.tick_params(axis="x", labelrotation=45, labelsize=tsize)
bot.tick_params(axis="y", labelrotation=45, labelsize=tsize, labelleft=False)
bot.hist(
lnprob_posterior,
bins=33,
color="black",
density=False,
orientation="vertical",
zorder=3,
)
bot.axvline(map_lnprob, color="C1", ls="-", lw=1.2, alpha=0.7)
bot.set_xlabel("$\ln\mathcal{P}$ (post.)")
axs.append(bot)
plt.tight_layout()
fig.align_ylabels(axs)
output_file = os.path.join(plot_folder, "lnprob_trace.png")
anc.print_both("\nSaving {}".format(output_file), output=olog)
fig.savefig(output_file, dpi=300, bbox_inches="tight")
if show_plot:
plt.show()
plt.close(fig)
return
|
Python
|
CREATE PROCEDURE UpdateCreditLimitWrapper
AS
BEGIN
DECLARE @Retries INT = 1 ;
WHILE @Retries <= 10
BEGIN
BEGIN TRY
EXEC dbo.UpdateCreditLimit ;
END TRY
BEGIN CATCH
WAITFOR DELAY '00:00:01' ;
SET @Retries = @Retries + 1 ;
END CATCH
END
END
|
PLSQL
|
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package iam provides the client and types for making API
// requests to AWS Identity and Access Management.
//
// AWS Identity and Access Management (IAM) is a web service that you can use
// to manage users and user permissions under your AWS account. This guide provides
// descriptions of IAM actions that you can call programmatically. For general
// information about IAM, see AWS Identity and Access Management (IAM) (http://aws.amazon.com/iam/).
// For the user guide for IAM, see Using IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/).
//
// AWS provides SDKs that consist of libraries and sample code for various programming
// languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs
// provide a convenient way to create programmatic access to IAM and AWS. For
// example, the SDKs take care of tasks such as cryptographically signing requests
// (see below), managing errors, and retrying requests automatically. For information
// about the AWS SDKs, including how to download and install them, see the Tools
// for Amazon Web Services (http://aws.amazon.com/tools/) page.
//
// We recommend that you use the AWS SDKs to make programmatic API calls to
// IAM. However, you can also use the IAM Query API to make direct calls to
// the IAM web service. To learn more about the IAM Query API, see Making Query
// Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
// in the Using IAM guide. IAM supports GET and POST requests for all actions.
// That is, the API does not require you to use GET for some actions and POST
// for others. However, GET requests are subject to the limitation size of a
// URL. Therefore, for operations that require larger sizes, use a POST request.
//
// Signing Requests
//
// Requests must be signed using an access key ID and a secret access key. We
// strongly recommend that you do not use your AWS account access key ID and
// secret access key for everyday work with IAM. You can use the access key
// ID and secret access key for an IAM user or you can use the AWS Security
// Token Service to generate temporary security credentials and use those to
// sign requests.
//
// To sign requests, we recommend that you use Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
// If you have an existing application that uses Signature Version 2, you do
// not have to update it to use Signature Version 4. However, some operations
// now require Signature Version 4. The documentation for operations that require
// version 4 indicate this requirement.
//
// Additional Resources
//
// For more information, see the following:
//
// * AWS Security Credentials (http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html).
// This topic provides general information about the types of credentials
// used for accessing AWS.
//
// * IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAMBestPractices.html).
// This topic presents a list of suggestions for using the IAM service to
// help secure your AWS resources.
//
// * Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html).
// This set of topics walk you through the process of signing a request using
// an access key ID and secret access key.
//
// See https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08 for more information on this service.
//
// See iam package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/iam/
//
// Using the Client
//
// To contact AWS Identity and Access Management with the SDK use the New function to create
// a new service client. With that client you can make API requests to the service.
// These clients are safe to use concurrently.
//
// See the SDK's documentation for more information on how to use the SDK.
// https://docs.aws.amazon.com/sdk-for-go/api/
//
// See aws.Config documentation for more information on configuring SDK clients.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
//
// See the AWS Identity and Access Management client IAM for more
// information on creating client for this service.
// https://docs.aws.amazon.com/sdk-for-go/api/service/iam/#New
package iam
|
Go
|
part of serverManager;
class MessageProcessor {
Logger _log = new Logger('MessageProcessor');
Manager _manager;
List<MessageProcessorInterface> _processors = [];
MessageProcessor(Manager this._manager);
void registerProcessor(MessageProcessorInterface mp) {
mp.manager = _manager;
_processors.add(mp);
}
void process(Message message, String idClient) {
for (var processor in _processors) {
if (processor.canProcess(message, idClient)) {
processor.process(message, idClient);
}
}
}
}
|
Dart
|
--[[ WoTD License -
This software is provided as free and open source by the
team of The WoTD Team. This script was written and is
protected by the GPL v2. Please give credit where credit
is due, if modifying, redistributing and/or using this
software. Thank you.
Thank: WoTD Team; for the Script
~~End of License... Please Stand By...
-- WoTD Team, Janurary 19, 2010. ]]
function ScryerCavalier_OnEnterCombat(Unit,Event)
Unit:CastSpell(30931)
Unit:registerEvent("ScryerCavalier_Spellbreaker", 24000, 0)
end
function ScryerCavalier_Spellbreaker(Unit,Event)
Unit:FullCastSpellOnTarget(35871, Unit:GetClosestPlayer())
end
function ScryerCavalier_OnLeaveCombat(Unit,Event)
Unit:RemoveEvents()
end
function ScryerCavalier_OnDied(Unit,Event)
Unit:RemoveEvents()
end
RegisterUnitEvent(22967, 1, "ScryerCavalier_OnEnterCombat")
RegisterUnitEvent(22967, 2, "ScryerCavalier_OnLeaveCombat")
RegisterUnitEvent(22967, 4, "ScryerCavalier_OnDied")
|
Lua
|
FROM busybox
ADD index.html /www/index.html
EXPOSE 8000
CMD httpd -p 8000 -h /www; tail -f /dev/null
|
Dockerfile
|
Sub.pair <-
function(z, t, Mat, i, j){
alpha=0.05
n1 <- Sub.n(z, t, Mat, i)
n2 <- Sub.n(z, t, Mat, j)
m2 <- sum(z[which(Mat[,i] == 1 & Mat[,j] == 1)])
M <- sum(z)
M12 <- M - sum(z[which(Mat[,i] == 0 & Mat[,j] == 0)])
PetN <- n1 * n2 / m2
ChpN <- (n1 + 1) * (n2 + 1) / (m2 +1) - 1
VarN <- (n1 + 1) * (n2 + 1) * (n1 - m2) * (n2 - m2) / ((m2 + 1)^2 * (m2 + 2))
SEN <- sqrt(VarN)
C <- exp(qnorm(1 - alpha / 2) * sqrt(log(1 + VarN / (ChpN - M12)^2)))
ChpN.L <- M12 + (ChpN - M12) / C
ChpN.U <- M12 + (ChpN - M12) * C
Nij <- cbind(PetN, ChpN, SEN , ChpN.L, ChpN.U)
colnames(Nij) <- c("Petersen","Chapman","se","cil","ciu")
rownames(Nij) <- paste("pa", i, j, sep="")
return(Nij)
}
|
R
|
{
"_args": [
[
{
"raw": "is-unc-path@^0.1.1",
"scope": null,
"escapedName": "is-unc-path",
"name": "is-unc-path",
"rawSpec": "^0.1.1",
"spec": ">=0.1.1 <0.2.0",
"type": "range"
},
"/home/lfernandes/ng2/code/first_app/angular2-reddit-base/node_modules/is-relative"
]
],
"_from": "is-unc-path@>=0.1.1 <0.2.0",
"_id": "[email protected]",
"_inCache": true,
"_installable": true,
"_location": "/is-unc-path",
"_nodeVersion": "0.12.4",
"_npmUser": {
"name": "jonschlinkert",
"email": "[email protected]"
},
"_npmVersion": "2.10.1",
"_phantomChildren": {},
"_requested": {
"raw": "is-unc-path@^0.1.1",
"scope": null,
"escapedName": "is-unc-path",
"name": "is-unc-path",
"rawSpec": "^0.1.1",
"spec": ">=0.1.1 <0.2.0",
"type": "range"
},
"_requiredBy": [
"/is-relative"
],
"_resolved": "https://registry.npmjs.org/is-unc-path/-/is-unc-path-0.1.1.tgz",
"_shasum": "ab2533d77ad733561124c3dc0f5cd8b90054c86b",
"_shrinkwrap": null,
"_spec": "is-unc-path@^0.1.1",
"_where": "/home/lfernandes/ng2/code/first_app/angular2-reddit-base/node_modules/is-relative",
"author": {
"name": "Jon Schlinkert",
"url": "https://github.com/jonschlinkert"
},
"bugs": {
"url": "https://github.com/jonschlinkert/is-unc-path/issues"
},
"dependencies": {
"unc-path-regex": "^0.1.0"
},
"description": "Returns true if a filepath is a windows UNC file path.",
"devDependencies": {
"mocha": "*",
"should": "*"
},
"directories": {},
"dist": {
"shasum": "ab2533d77ad733561124c3dc0f5cd8b90054c86b",
"tarball": "https://registry.npmjs.org/is-unc-path/-/is-unc-path-0.1.1.tgz"
},
"engines": {
"node": ">=0.10.0"
},
"files": [
"index.js"
],
"homepage": "https://github.com/jonschlinkert/is-unc-path",
"keywords": [
"absolute",
"expression",
"file",
"filepath",
"match",
"matching",
"path",
"regex",
"regexp",
"regular",
"unc",
"win",
"windows"
],
"license": "MIT",
"main": "index.js",
"maintainers": [
{
"name": "jonschlinkert",
"email": "[email protected]"
}
],
"name": "is-unc-path",
"optionalDependencies": {},
"readme": "ERROR: No README data found!",
"repository": {
"type": "git",
"url": "git+https://github.com/jonschlinkert/is-unc-path.git"
},
"scripts": {
"test": "mocha"
},
"version": "0.1.1"
}
|
JSON
|
######################################################
##### Purge all existing firewall rules (if any) #####
######################################################
resources { 'firewall':
purge => true,
}
#####################################################
##### Default rules defined before custom rules #####
#####################################################
class pre {
Firewall {
require => undef,
}
# Default firewall rules
firewall { '000 accept all icmp':
proto => 'icmp',
action => 'accept',
}->
firewall { '001 accept all to lo interface':
proto => 'all',
iniface => 'lo',
action => 'accept',
}->
firewall { '002 reject local traffic not on loopback interface':
iniface => '! lo',
proto => 'all',
destination => '127.0.0.1/8',
action => 'reject',
}->
firewall { '003 accept related established rules':
proto => 'all',
state => ['RELATED', 'ESTABLISHED'],
action => 'accept',
}
}
######################################################
##### Custom rules defined between default rules #####
######################################################
# firewall { '004 custom rule example':
# proto => 'all',
# state => ['RELATED', 'ESTABLISHED'],
# action => 'accept',
# }
#
# firewall { '005 custom rule example':
# proto => 'all',
# state => ['RELATED', 'ESTABLISHED'],
# action => 'accept',
# }
#
# firewall { '006 custom rule example':
# proto => 'all',
# state => ['RELATED', 'ESTABLISHED'],
# action => 'drop',
# }
####################################################
##### Default rules defined after custom rules #####
####################################################
class post {
firewall { '999 drop all':
proto => 'all',
action => 'drop',
before => undef,
}
}
Firewall {
before => Class['post'],
require => Class['pre'],
}
class { ['pre', 'post']: }
class { 'firewall': }
|
Pascal
|
# make sure we have vala
find_package (Vala REQUIRED)
# make sure we use vala
include (ValaVersion)
# make sure it's the desired version of vala
ensure_vala_version ("0.16" MINIMUM)
configure_file (${CMAKE_SOURCE_DIR}/config.vala.cmake ${CMAKE_BINARY_DIR}/config.vala)
# files we want to compile
include (ValaPrecompile)
vala_precompile (VALA_C ${EXEC_NAME}
${CMAKE_BINARY_DIR}/config.vala
Const.vala
GameView.vala
InGameGUI.vala
MainWindow.vala
Map.vala
Tile.vala
Building/Building.vala
Building/Fountain.vala
Building/House.vala
Building/Road.vala
Building/Farm.vala
Building/Prefecture.vala
Building/Engineer.vala
Building/Market.vala
PACKAGES
gtk+-3.0
OPTIONS
--target-glib=2.32
--thread
)
# tell cmake what to call the executable we just made
add_executable (${EXEC_NAME} ${VALA_C})
# install the binaries we just made
install (TARGETS ${EXEC_NAME} RUNTIME DESTINATION bin)
|
CMake
|
#!/usr/bin/env python
"""
This script generates the index-pattern for Kibana from
the fields.yml file.
"""
import yaml
import argparse
import string
import re
import json
import os
import errno
import sys
unique_fields = []
def fields_to_json(section, path, output):
if not section["fields"]:
return
for field in section["fields"]:
if path == "":
newpath = field["name"]
else:
newpath = path + "." + field["name"]
if "type" in field and field["type"] == "group":
fields_to_json(field, newpath, output)
else:
field_to_json(field, newpath, output)
def field_to_json(desc, path, output,
indexed=True, analyzed=False, doc_values=True,
searchable=True, aggregatable=True):
global unique_fields
if path in unique_fields:
print("ERROR: Field {} is duplicated. Please delete it and try again. Fields already are {}".format(
path, ", ".join(unique_fields)))
sys.exit(1)
else:
unique_fields.append(path)
field = {
"name": path,
"count": 0,
"scripted": False,
"indexed": indexed,
"analyzed": analyzed,
"doc_values": doc_values,
"searchable": searchable,
"aggregatable": aggregatable,
}
# find the kibana types based on the field type
if "type" in desc:
if desc["type"] in ["half_float", "scaled_float", "float", "integer", "long", "short", "byte"]:
field["type"] = "number"
elif desc["type"] in ["text", "keyword"]:
field["type"] = "string"
if desc["type"] == "text":
field["aggregatable"] = False
elif desc["type"] == "date":
field["type"] = "date"
elif desc["type"] == "geo_point":
field["type"] = "geo_point"
else:
field["type"] = "string"
output["fields"].append(field)
if "format" in desc:
output["fieldFormatMap"][path] = {
"id": desc["format"],
}
def fields_to_index_pattern(args, input):
docs = yaml.load(input)
if docs is None:
print("fields.yml is empty. Cannot generate index-pattern")
return
output = {
"fields": [],
"fieldFormatMap": {},
"timeFieldName": "@timestamp",
"title": args.index,
}
for k, section in enumerate(docs["fields"]):
fields_to_json(section, "", output)
# add meta fields
field_to_json({"name": "_id", "type": "keyword"}, "_id", output,
indexed=False, analyzed=False, doc_values=False,
searchable=False, aggregatable=False)
field_to_json({"name": "_type", "type": "keyword"}, "_type", output,
indexed=False, analyzed=False, doc_values=False,
searchable=True, aggregatable=True)
field_to_json({"name": "_index", "type": "keyword"}, "_index", output,
indexed=False, analyzed=False, doc_values=False,
searchable=False, aggregatable=False)
field_to_json({"name": "_score", "type": "integer"}, "_score", output,
indexed=False, analyzed=False, doc_values=False,
searchable=False, aggregatable=False)
output["fields"] = json.dumps(output["fields"])
output["fieldFormatMap"] = json.dumps(output["fieldFormatMap"])
return output
def get_index_pattern_name(index):
allow = string.ascii_letters + string.digits + "_"
return re.sub('[^%s]' % allow, '', index)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generates the index-pattern for a Beat.")
parser.add_argument("--index", help="The name of the index-pattern")
parser.add_argument("--beat", help="Local Beat directory")
parser.add_argument("--libbeat", help="Libbeat local directory")
args = parser.parse_args()
fields_yml = args.beat + "/_meta/fields.generated.yml"
# Not all beats have a fields.generated.yml. Fall back to fields.yml
if not os.path.isfile(fields_yml):
fields_yml = args.beat + "/_meta/fields.yml"
# generate the index-pattern content
with open(fields_yml, 'r') as f:
fields = f.read()
# Prepend beat fields from libbeat
with open(args.libbeat + "/_meta/fields.generated.yml") as f:
fields = f.read() + fields
# with open(target, 'w') as output:
output = fields_to_index_pattern(args, fields)
# dump output to a json file
fileName = get_index_pattern_name(args.index)
target_dir = os.path.join(args.beat, "_meta", "kibana", "index-pattern")
target_file = os.path.join(target_dir, fileName + ".json")
try:
os.makedirs(target_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
output = json.dumps(output, indent=2)
with open(target_file, 'w') as f:
f.write(output)
print("The index pattern was created under {}".format(target_file))
|
Python
|
# This file is responsible for configuring your application
# and its dependencies with the aid of the Mix.Config module.
#
# This configuration file is loaded before any dependency and
# is restricted to this project.
use Mix.Config
# Configures the endpoint
config :connectdemo, Connectdemo.Endpoint,
url: [host: "localhost"],
root: Path.dirname(__DIR__),
secret_key_base: "K5eic+a0gdMLFa0fC63CUzYDMuqMSos8KfujAuSZkIcMEQG7rrng6klOcpvfVzlx",
render_errors: [accepts: ~w(html json)],
pubsub: [name: Connectdemo.PubSub,
adapter: Phoenix.PubSub.PG2]
# Configures Elixir's Logger
config :logger, :console,
format: "$time $metadata[$level] $message\n",
metadata: [:request_id]
# Import environment specific config. This must remain at the bottom
# of this file so it overrides the configuration defined above.
import_config "#{Mix.env}.exs"
# Configure phoenix generators
config :phoenix, :generators,
migration: true,
binary_id: false
|
Elixir
|
# This file is a part of Julia. License is MIT: https://julialang.org/license
# Tests for /base/stacktraces.jl
using Serialization, Base.StackTraces
let
@noinline child() = stacktrace()
@noinline parent() = child()
@noinline grandparent() = parent()
line_numbers = @__LINE__() .- [3, 2, 1]
stack = grandparent()
# Basic tests.
@assert length(stack) >= 3 "Compiler has unexpectedly inlined functions"
@test [:child, :parent, :grandparent] == [f.func for f in stack[1:3]]
for (line, frame) in zip(line_numbers, stack[1:3])
@test [Symbol(@__FILE__), line] == [frame.file, frame.line]
end
@test [false, false, false] == [f.from_c for f in stack[1:3]]
# Test remove_frames!
stack = StackTraces.remove_frames!(grandparent(), :parent)
@test stack[1] == StackFrame(:grandparent, @__FILE__, line_numbers[3])
stack = StackTraces.remove_frames!(grandparent(), [:child, :something_nonexistent])
@test stack[1:2] == [
StackFrame(:parent, @__FILE__, line_numbers[2]),
StackFrame(:grandparent, @__FILE__, line_numbers[3])
]
b = PipeBuffer()
frame = stack[1]
serialize(b, frame)
frame2 = deserialize(b)
@test frame !== frame2
@test frame == frame2
@test frame.linfo !== nothing
@test frame2.linfo === nothing
end
# Test from_c
let (default, with_c, without_c) = (stacktrace(), stacktrace(true), stacktrace(false))
@test default == without_c
@test length(with_c) > length(without_c)
@test !isempty(filter(frame -> frame.from_c, with_c))
@test isempty(filter(frame -> frame.from_c, without_c))
end
@test StackTraces.lookup(C_NULL) == [StackTraces.UNKNOWN] == StackTraces.lookup(C_NULL + 1) == StackTraces.lookup(C_NULL - 1)
let ct = current_task()
# After a task switch, there should be nothing in catch_backtrace
yieldto(@task yieldto(ct))
@test catch_backtrace() == StackFrame[]
@noinline bad_function() = throw(UndefVarError(:nonexistent))
@noinline function try_stacktrace()
try
bad_function()
catch
return stacktrace()
end
end
@noinline function try_catch()
try
bad_function()
catch
return stacktrace(catch_backtrace())
end
end
line_numbers = @__LINE__() .- [15, 10, 5]
# Test try...catch with stacktrace
@test try_stacktrace()[1] == StackFrame(:try_stacktrace, @__FILE__, line_numbers[2])
# Test try...catch with catch_backtrace
@test try_catch()[1:2] == [
StackFrame(:bad_function, @__FILE__, line_numbers[1]),
StackFrame(:try_catch, @__FILE__, line_numbers[3])
]
end
module inlined_test
using Test
@inline g(x) = (x == 3 && throw("a"); x)
@inline h(x) = (x == 3 && g(x); x)
f(x) = (y = h(x); y)
trace = (try; f(3); catch; stacktrace(catch_backtrace()); end)[1:3]
can_inline = Bool(Base.JLOptions().can_inline)
for (frame, func, inlined) in zip(trace, [g,h,f], (can_inline, can_inline, false))
@test frame.func === typeof(func).name.mt.name
# broken until #50082 can be addressed
mi = isa(frame.linfo, Core.CodeInstance) ? frame.linfo.def : frame.linfo
@test mi.def.module === which(func, (Any,)).module broken=inlined
@test mi.def === which(func, (Any,)) broken=inlined
@test mi.specTypes === Tuple{typeof(func), Int} broken=inlined
# line
@test frame.file === Symbol(@__FILE__)
@test !frame.from_c
@test frame.inlined === inlined
end
end
let src = Meta.lower(Main, quote let x = 1 end end).args[1]::Core.CodeInfo
li = ccall(:jl_method_instance_for_thunk, Ref{Core.MethodInstance}, (Any, Any), src, @__MODULE__)
sf = StackFrame(:a, :b, 3, li, false, false, 0)
repr = string(sf)
@test repr == "Toplevel MethodInstance thunk at b:3"
end
let li = typeof(fieldtype).name.mt.cache.func::Core.MethodInstance,
sf = StackFrame(:a, :b, 3, li, false, false, 0),
repr = string(sf)
@test repr == "fieldtype(...) at b:3"
end
let ctestptr = cglobal((:ctest, "libccalltest")),
ctest = StackTraces.lookup(ctestptr)
@test length(ctest) == 1
@test ctest[1].func === :ctest
@test ctest[1].linfo === nothing
@test ctest[1].from_c
@test ctest[1].pointer === UInt64(ctestptr)
end
# issue #19655
let st = stacktrace(empty!(backtrace()))
# not in a `catch`, so should return an empty StackTrace
@test isempty(st)
@test isa(st, StackTrace)
end
module StackTracesTestMod
unfiltered_stacktrace() = stacktrace()
filtered_stacktrace() = StackTraces.remove_frames!(stacktrace(), StackTracesTestMod)
end
# Test that `removes_frames!` can correctly remove frames from within the module
trace = StackTracesTestMod.unfiltered_stacktrace()
@test occursin("unfiltered_stacktrace", string(trace))
trace = StackTracesTestMod.filtered_stacktrace()
@test !occursin("filtered_stacktrace", string(trace))
let bt, topline = @__LINE__
try
let x = 1
y = 2x
z = 2z-1
end
catch
bt = stacktrace(catch_backtrace())
end
@test bt[1].line == topline+4
end
# Accidental incorrect phi block computation in interpreter
global global_false_bool = false
let bt, topline = @__LINE__
try
let
global read_write_global_bt_test, global_false_bool
if global_false_bool
end
(read_write_global_bt_test, (read_write_global_bt_test=2;))
end
catch
bt = stacktrace(catch_backtrace())
end
@test bt[1].line == topline+6
end
# issue #28990
let bt
try
eval(Expr(:toplevel, LineNumberNode(42, :foo), :(error("blah"))))
catch
bt = stacktrace(catch_backtrace())
end
@test bt[2].line == 42
@test bt[2].file === :foo
end
@noinline f33065(x; b=1.0, a="") = error()
@noinline f33065(x, y; b=1.0, a="", c...) = error()
let bt
try
f33065(0.0f0)
catch
bt = stacktrace(catch_backtrace())
end
@test any(s->startswith(string(s), "f33065(x::Float32; b::Float64, a::String)"), bt)
try
f33065(0.0f0, b=:x)
catch
bt = stacktrace(catch_backtrace())
end
@test any(s->startswith(string(s), "f33065(x::Float32; b::Symbol, a::String)"), bt)
try
f33065(0.0f0, 0.0f0, z=0)
catch
bt = stacktrace(catch_backtrace())
end
@test any(s->startswith(string(s), "f33065(x::Float32, y::Float32; b::Float64, a::String, c::"), bt)
end
struct F49231{a,b,c,d,e,f,g} end
(::F49231)(a,b,c) = error("oops")
@testset "type_depth_limit" begin
tdl = Base.type_depth_limit
str = repr(typeof(view([1, 2, 3], 1:2)))
@test tdl(str, 0, maxdepth = 1) == "SubArray{…}"
@test tdl(str, 0, maxdepth = 2) == "SubArray{$Int, 1, Vector{…}, Tuple{…}, true}"
@test tdl(str, 0, maxdepth = 3) == "SubArray{$Int, 1, Vector{$Int}, Tuple{UnitRange{…}}, true}"
@test tdl(str, 0, maxdepth = 4) == "SubArray{$Int, 1, Vector{$Int}, Tuple{UnitRange{$Int}}, true}"
@test tdl(str, 3) == "SubArray{…}"
@test tdl(str, 44) == "SubArray{…}"
@test tdl(str, 45) == "SubArray{$Int, 1, Vector{…}, Tuple{…}, true}"
@test tdl(str, 59) == "SubArray{$Int, 1, Vector{…}, Tuple{…}, true}"
@test tdl(str, 60) == "SubArray{$Int, 1, Vector{$Int}, Tuple{UnitRange{…}}, true}"
@test tdl(str, 100) == "SubArray{$Int, 1, Vector{$Int}, Tuple{UnitRange{$Int}}, true}"
str = repr(Vector{V} where V<:AbstractVector{T} where T<:Real)
@test tdl(str, 0, maxdepth = 1) == "Vector{…} where {…}"
@test tdl(str, 0, maxdepth = 2) == "Vector{V} where {T<:Real, V<:AbstractVector{…}}"
@test tdl(str, 0, maxdepth = 3) == "Vector{V} where {T<:Real, V<:AbstractVector{T}}"
@test tdl(str, 20) == "Vector{…} where {…}"
@test tdl(str, 46) == "Vector{…} where {…}"
@test tdl(str, 47) == "Vector{V} where {T<:Real, V<:AbstractVector{T}}"
str = "F49231{Vector,Val{('}','}')},Vector{Vector{Vector{Vector}}},Tuple{Int,Int,Int,Int,Int,Int,Int},Int,Int,Int}"
@test tdl(str, 105) == "F49231{Vector,Val{('}','}')},Vector{Vector{Vector{…}}},Tuple{Int,Int,Int,Int,Int,Int,Int},Int,Int,Int}"
@test tdl(str, 85) == "F49231{Vector,Val{…},Vector{…},Tuple{…},Int,Int,Int}"
# Stacktrace
a = UInt8(81):UInt8(160)
b = view(a, 1:64)
c = reshape(b, (8, 8))
d = reinterpret(reshape, Float64, c)
sqrteach(a) = [sqrt(x) for x in a]
st = try
sqrteach(d)
catch e
stacktrace(catch_backtrace())
end
str = sprint(Base.show_backtrace, st, context = (:limit=>true, :stacktrace_types_limited => Ref(false), :color=>true, :displaysize=>(50,105)))
@test contains(str, "[5] \e[0m\e[1mcollect_to!\e[22m\e[0m\e[1m(\e[22m\e[90mdest\e[39m::\e[0mVector\e[90m{…}\e[39m, \e[90mitr\e[39m::\e[0mBase.Generator\e[90m{…}\e[39m, \e[90moffs\e[39m::\e[0m$Int, \e[90mst\e[39m::\e[0mTuple\e[90m{…}\e[39m\e[0m\e[1m)\e[22m\n\e[90m")
st = try
F49231{Vector,Val{'}'},Vector{Vector{Vector{Vector}}},Tuple{Int,Int,Int,Int,Int,Int,Int},Int,Int,Int}()(1,2,3)
catch e
stacktrace(catch_backtrace())
end
str = sprint(Base.show_backtrace, st, context = (:limit=>true, :stacktrace_types_limited => Ref(false), :color=>true, :displaysize=>(50,132)))
@test contains(str, "[2] \e[0m\e[1m(::$F49231{Vector, Val{…}, Vector{…}, NTuple{…}, $Int, $Int, $Int})\e[22m\e[0m\e[1m(\e[22m\e[90ma\e[39m::\e[0m$Int, \e[90mb\e[39m::\e[0m$Int, \e[90mc\e[39m::\e[0m$Int\e[0m\e[1m)\e[22m\n\e[90m")
end
@testset "Base.StackTraces docstrings" begin
@test isempty(Docs.undocumented_names(StackTraces))
end
|
Julia
|
#include "TTree.h"
#include "TFile.h"
#include "TH1.h"
#include "TCanvas.h"
#include "TMath.h"
#include "TRandom3.h"
#include <iostream>
#include <sstream>
/*
* THINGS TO CHECK BEFORE YOU RUN
* 1. Energy
* 2. Position
* 3. Binning
* 4. Title
*/
double ENERGY = 1000.; //energy in MeV (if known)
double XPOS = 13.;
double YPOS = -0.4999;
double CRYStoMM = 50.;
vector<vector<pair<int, double> > >::iterator vv_iter;
vector<pair<int, double> >::iterator v_iter;
// Changes x and y coordinates in crystal units to the crystal ID
int CoordtoID(int x, int y)
{
return(y+17)*35+(x+17);
}
// Gets x position from crystalID
int IDtoX(int crystalID)
{
return crystalID%35-17;
}
//Gets y position from crystalID
int IDtoY(int crystalID)
{
return crystalID/35-17;
}
// Returns distance between two crystals as defined by minimal path
int dist(int crysFID, int crysSID)
{
return TMath::Max(TMath::Abs(IDtoX(crysFID)-IDtoX(crysSID)),
TMath::Abs(IDtoY(crysFID)-IDtoY(crysSID)));
}
int crystalNumOptimized(vector<pair<int, double> > *shower)
{
double energySum(0.);
double next(0.);
int n(0);
vector<pair<int, double> >::iterator a;
for (a=shower->begin(); a!=shower->end(); ++a)
{
if (n<1) {n++; energySum+= a->second;}
else
{
next=a->second;
if (next/(energySum) < .5/a->second)
{return n;}
else
{energySum+= next;
n++;
}
}
}
return n;
}
// Returns the total energy in all crystals of a shower
double clusterDep(vector<pair<int, double> > *shower)
{
double totEnergy(0.);
for (v_iter=shower->begin(); v_iter!=shower->end(); v_iter++)
{totEnergy+=v_iter->second;}
return totEnergy;
}
//checks if a shower is already in the cluster
// ROOT really hates this method.
bool findVector (vector<vector<pair<int, double> > > *detector,
vector<pair<int, double> > shower)
{
// Look through detector
vector<vector<pair<int, double> > >::iterator a;
for (a=detector->begin(); a!=detector->end(); ++a)
{ // remember that crystal ID's are ordered, and touching clusters have
// the same crystalIDs
if ((a->front()).first==(shower.front()).first) return true;
}
return false;
}
bool findPair (vector<vector<pair<int, double> > > *detector)
{
vector<vector<pair<int, double> > >::iterator a;
vector<vector<pair<int, double> > >::iterator b;
vector<pair<int, double> >::iterator c;
vector<pair<int, double> >::iterator d;
for (a=detector->begin(); a!=detector->end()-1; a++)
{
for (b=a+1; b!=detector->end(); b++)
{
for (c=a->begin(); c!=a->end(); c++)
{for (d=b->begin(); d!=b->end(); d++)
if (c->first==d->first) {return true;}
}
}
}
return false;
}
pair<double, pair<double, double> > reconstruct(vector<pair<int, double> > shower)
{
double energy(0.), xPos(0.), yPos(0.);
//looks at crystals in the shower
for (v_iter=shower.begin(); v_iter!=shower.end(); ++v_iter)
{
energy+= v_iter->second;
xPos+=IDtoX(v_iter->first)*v_iter->second;
yPos+=IDtoY(v_iter->first)*v_iter->second;
}
//takes weighted average
xPos/=energy; yPos/=energy;
pair<double, double> position(xPos, yPos);
pair<double, pair<double, double> > photon(energy, position);
return photon;
}
vector<pair<int, double> > generateBumpMap(double bumpEnergy, double address[],
vector<pair<int, double> > shower)
{
vector<pair<int, double> > hitMap;
int ID(0.);
for (v_iter=shower.begin(); v_iter!=shower.end(); v_iter++)
{
if (v_iter->second > bumpEnergy) {
int counter(0);
ID = v_iter->first;
for (int x=-1; x<2; x++) {
for (int y=-1; y<2; y++) {
int ngbrID = CoordtoID(IDtoX(ID)+x, IDtoY(ID)+y);
if (address[ID] > address[ngbrID])
{ counter++;}
else
{}
}
}
if (counter ==8) {hitMap.push_back(*v_iter);}
}
}
return hitMap;
}
pair<int, double> reconstructID (vector<pair<int, double> > shower)
{
pair<double, pair<double, double> > photon = reconstruct(shower);
int xVal = (int) photon.second.first+.5;
int yVal = (int) photon.second.second+.5;
pair<int, double> reconstructed(CoordtoID(xVal, yVal), photon.first);
return reconstructed;
}
//Sorts energies from largest to smallest
vector<pair<int, double> > energySort(vector<pair<int, double> > shower)
{
vector<pair<double, int> > energy;
for (v_iter=shower.begin(); v_iter!=shower.end(); v_iter++)
{
pair<double, int> flipped(v_iter->second, v_iter->first);
energy.push_back(flipped);
}
std::map<double, int> myMap(energy.begin(), energy.end()+1);
map<double, int>::iterator m;
vector<pair<int, double> > sorted;
for (m=myMap.end(); m!=myMap.begin(); --m)
{
pair<int, double> orderHit(m->second, m->first);
sorted.push_back(orderHit);
}
sorted.erase(sorted.begin(), sorted.begin()+1);
return sorted;
}
vector<pair<int,double> > * DFS(pair<int, double> start, double energyThreshLo, vector<pair<int, double> > * shower,
double address[])
{
shower->push_back(start);
for (int x=-1; x<2; x++) {
for (int y=-1; y<2; y++) {
int ngbrID = CoordtoID(IDtoX(start.first)+x, IDtoY(start.first)+y);
double ngbrEn = address[ngbrID];
pair<int, double> ngbr(ngbrID, ngbrEn);
if (ngbrEn>energyThreshLo)
{
vector<int> showerID;
//no method for searching pairs
for (int f=0; f<shower->size(); f++) {showerID.push_back(((*shower)[f]).first);}
if (std::find(showerID.begin(), showerID.end(), ngbrID)!=showerID.end())
{continue;}
// if it has enough energy and has not been counted
else { shower = DFS(ngbr, energyThreshLo, shower,address);}
}
}
}
//put crystals in correct order to make other methods simpler
std::sort(shower->begin(), shower->end());
return shower;
}
void resolutionPlots()
{
cout << "Starting plots..." << endl;
TRandom3* randomGen = new TRandom3(12191982);
TFile* file = new TFile("complete.root");
TTree* tree = (TTree *)file->Get("Signal");
int nEvents = tree->GetEntries();
double addresses[1225] = {};
for (int k=0; k<1225; k++){
std::stringstream ss2;
ss2 << k;
string str = "Crystal_"+ss2.str();
const char* charstr = str.c_str();
tree->SetBranchAddress(charstr, &addresses[k]);
}
double b1, b2;
double energyThreshHi = 5.;
double energyThreshLo = 0.;
TH1D* energyReso = new TH1D("energyReso", "Energy_resolution", 400, -200, 199);
TH1D* posResoX = new TH1D("posResoX", "XPosition_resolution", 80, -40, 39);
TH1D* posResoY = new TH1D("posResoY", "YPosition_resolution", 80, -40, 39);
//iterate through all events
for (int i = 0; i < nEvents; i++)
{
tree->GetEntry(i);
vector<pair<int, double> > geant; //stores all geant data
vector<pair<int, double> > hitMap; //stores all hits above threshold
for(int w = 0; w < 1225; w++)
{
pair<int, double> hit(w, addresses[w]);
geant.push_back(hit);
if (addresses[w] > energyThreshHi)
{ hitMap.push_back(hit);}
}
vector<vector<pair<int, double> > > clusters;
for (v_iter=hitMap.begin(); v_iter!=hitMap.end(); v_iter++)
{
vector<pair<int, double> > shower;
clusters.push_back(*DFS(*v_iter,
energyThreshLo,
&shower,
addresses));
}
vector<vector<pair<int, double> > > detector;
for (vv_iter=clusters.begin(); vv_iter!=clusters.end(); ++vv_iter)
{
if (vv_iter==clusters.begin())
{detector.push_back(*vv_iter);}
else
{
if (!findVector(&detector, *vv_iter))
{detector.push_back(*vv_iter);}
}
}
//unclustering
vector<vector<pair<int, double> > > detector2;
for (vv_iter=detector.begin(); vv_iter!=detector.end(); vv_iter++)
{
vector<pair<int, double> > localMax;
localMax = generateBumpMap(energyThreshHi, addresses, *vv_iter);
if (localMax.size()==0) {continue; }
//First Case: only one bump, treat as one photon.
if (localMax.size() ==1)
{detector2.push_back(*vv_iter);
continue;}
pair<int, double> coe = reconstructID(*vv_iter);
localMax = energySort(localMax);
//Second Case: many bumps, but centered logically, treat as one photon.
if (false)
{detector2.push_back(*vv_iter);
continue;}
//Hopefully optimized for a two pronged event
else
{
for (int q=0; q<2; q++)
{
vector<pair<int, double> > newShower;
int ind = (q+1)%2;
b1 = localMax[q].second;
b2 = localMax[ind].second;
vector<pair<int, double> >::iterator a;
for (a=vv_iter->begin(); a!=vv_iter->end(); ++a)
{
double energy(0.);
int d1 = dist(localMax[q].first, a->first);
int d2 = dist(localMax[ind].first, a->first);
energy = a->second*b1*pow(.1, d1-1)/(b1*pow(.1, d1-1)+b2*pow(.1, d2-1));
pair<int, double> newHit(a->first, energy);
newShower.push_back(newHit);
}
}
}
}
vector<vector<pair<int, double> > > ordered;
int num(0);
for (vv_iter=detector2.begin(); vv_iter!=detector2.end(); ++vv_iter)
{
vector<pair<int, double> > shower = energySort(*vv_iter);
num = crystalNumOptimized(&shower);
if (shower.size()>num)
{shower.erase(shower.begin()+num, shower.end());}
ordered.push_back(shower);
}
pair<double, pair<double, double> > photon;
for (vv_iter=ordered.begin(); vv_iter!=ordered.end(); ++vv_iter)
{
photon = reconstruct(*vv_iter);
energyReso->Fill(photon.first-ENERGY);
posResoX->Fill((photon.second.first-XPOS)*CRYStoMM);
posResoY->Fill((photon.second.second-YPOS)*CRYStoMM);
}
}
energyReso->GetXaxis()->SetTitle("Energy Resolution:= (measrued-expected) in MeV");
posResoX->GetXaxis()->SetTitle("Position Resolution:=(measured-expected) in mm");
posResoY->GetXaxis()->SetTitle("Position Resolution:=(measured-expected) in mm");
if (XPOS-.2<0)
{
energyReso->SetTitle("Energy Resolution (Center)");
posResoX->SetTitle("X Position Resolution (Center)");
posResoY->SetTitle("Y Position Resolution (Center)");
}
else if (YPOS>13.4)
{
energyReso->SetTitle("Energy Resolution (Corner)");
posResoX->SetTitle("X Position Resolution (Corner)");
posResoY->SetTitle("Y Position Resolution (Corner)");
}
else
{
energyReso->SetTitle("Energy Resolution (Side)");
posResoX->SetTitle("X Position Resolution (Side)");
posResoY->SetTitle("Y Position Resolution (Side)");
}
TCanvas* canvas = new TCanvas("canvas", "canvas", 1000, 500);
canvas->Divide(3,1);
canvas->cd(1); energyReso->Draw();
canvas->cd(2); posResoX->Draw();
canvas->cd(3); posResoY->Draw();
}
|
C
|
private let table: [UInt16] = [
0x0000, 0xc0c1, 0xc181, 0x0140, 0xc301, 0x03c0, 0x0280, 0xc241,
0xc601, 0x06c0, 0x0780, 0xc741, 0x0500, 0xc5c1, 0xc481, 0x0440,
0xcc01, 0x0cc0, 0x0d80, 0xcd41, 0x0f00, 0xcfc1, 0xce81, 0x0e40,
0x0a00, 0xcac1, 0xcb81, 0x0b40, 0xc901, 0x09c0, 0x0880, 0xc841,
0xd801, 0x18c0, 0x1980, 0xd941, 0x1b00, 0xdbc1, 0xda81, 0x1a40,
0x1e00, 0xdec1, 0xdf81, 0x1f40, 0xdd01, 0x1dc0, 0x1c80, 0xdc41,
0x1400, 0xd4c1, 0xd581, 0x1540, 0xd701, 0x17c0, 0x1680, 0xd641,
0xd201, 0x12c0, 0x1380, 0xd341, 0x1100, 0xd1c1, 0xd081, 0x1040,
0xf001, 0x30c0, 0x3180, 0xf141, 0x3300, 0xf3c1, 0xf281, 0x3240,
0x3600, 0xf6c1, 0xf781, 0x3740, 0xf501, 0x35c0, 0x3480, 0xf441,
0x3c00, 0xfcc1, 0xfd81, 0x3d40, 0xff01, 0x3fc0, 0x3e80, 0xfe41,
0xfa01, 0x3ac0, 0x3b80, 0xfb41, 0x3900, 0xf9c1, 0xf881, 0x3840,
0x2800, 0xe8c1, 0xe981, 0x2940, 0xeb01, 0x2bc0, 0x2a80, 0xea41,
0xee01, 0x2ec0, 0x2f80, 0xef41, 0x2d00, 0xedc1, 0xec81, 0x2c40,
0xe401, 0x24c0, 0x2580, 0xe541, 0x2700, 0xe7c1, 0xe681, 0x2640,
0x2200, 0xe2c1, 0xe381, 0x2340, 0xe101, 0x21c0, 0x2080, 0xe041,
0xa001, 0x60c0, 0x6180, 0xa141, 0x6300, 0xa3c1, 0xa281, 0x6240,
0x6600, 0xa6c1, 0xa781, 0x6740, 0xa501, 0x65c0, 0x6480, 0xa441,
0x6c00, 0xacc1, 0xad81, 0x6d40, 0xaf01, 0x6fc0, 0x6e80, 0xae41,
0xaa01, 0x6ac0, 0x6b80, 0xab41, 0x6900, 0xa9c1, 0xa881, 0x6840,
0x7800, 0xb8c1, 0xb981, 0x7940, 0xbb01, 0x7bc0, 0x7a80, 0xba41,
0xbe01, 0x7ec0, 0x7f80, 0xbf41, 0x7d00, 0xbdc1, 0xbc81, 0x7c40,
0xb401, 0x74c0, 0x7580, 0xb541, 0x7700, 0xb7c1, 0xb681, 0x7640,
0x7200, 0xb2c1, 0xb381, 0x7340, 0xb101, 0x71c0, 0x7080, 0xb041,
0x5000, 0x90c1, 0x9181, 0x5140, 0x9301, 0x53c0, 0x5280, 0x9241,
0x9601, 0x56c0, 0x5780, 0x9741, 0x5500, 0x95c1, 0x9481, 0x5440,
0x9c01, 0x5cc0, 0x5d80, 0x9d41, 0x5f00, 0x9fc1, 0x9e81, 0x5e40,
0x5a00, 0x9ac1, 0x9b81, 0x5b40, 0x9901, 0x59c0, 0x5880, 0x9841,
0x8801, 0x48c0, 0x4980, 0x8941, 0x4b00, 0x8bc1, 0x8a81, 0x4a40,
0x4e00, 0x8ec1, 0x8f81, 0x4f40, 0x8d01, 0x4dc0, 0x4c80, 0x8c41,
0x4400, 0x84c1, 0x8581, 0x4540, 0x8701, 0x47c0, 0x4680, 0x8641,
0x8201, 0x42c0, 0x4380, 0x8341, 0x4100, 0x81c1, 0x8081, 0x4040,
]
func crc16(input: UInt8, crc: UInt16) -> UInt16 {
let index = Int(UInt16(crc & 0xff) ^ UInt16(input))
let t1 = UInt16(crc >> 8)
let t2: UInt16 = table[index]
return t1 ^ t2
}
func crc16(input: [UInt8]) -> UInt16 {
var crc: UInt16 = 0
for byte in input {
crc = crc16(input: byte, crc: crc)
}
return crc
}
|
Swift
|
[Version]
Major=7
Minor=0
[Main]
Type=temporal
CalculateFlow=yes
CalculateScalar=no
Equations=incompressible
TermAdvection=convective
TermViscous=explicit
TermDiffusion=explicit
SpaceOrder2=CompactDirect6
TimeOrder=RungeKuttaExplicit4
TimeStep=-0.016000
TimeCFL=1.20000
[Iteration]
Start=0
End=10
Restart=10
Statistics=5
IteraLog=1
ObsLog=Ekman
[Control]
FlowLimit=no
ScalLimit=yes
[Parameters]
Reynolds=25000
Schmidt=1.0
Rossby=1.0
Froude=0.01
[ViscChange]
Time=0.01
[Grid]
Imax=128
Imax(*)=64
Jmax=96
Jmax(*)=96
Kmax=128
Kmax(*)=64
XUniform=yes
YUniform=no
ZUniform=yes
XPeriodic=yes
YPeriodic=no
ZPeriodic=yes
[Flow]
VelocityX=0.0
VelocityY=0.0
VelocityZ=0.0
Density=1.0
ProfileVelocityX=Ekman
YMeanRelativeVelocityX=0.0
ThickVelocityX=0.004
DeltaVelocityX=1.0
[Scalar]
ProfileScalar1=Erf
ThickScalar1=0.0006
DeltaScalar1=2.0
YMeanRelativeScalar1=0.0
MeanScalar1=1.0
[Gravity]
Type=Linear
Parameters=0.0
Vector=0.0,0.0,0.0
[Rotation]
Type=normalized
[BoundaryConditions]
VelocityJmin=noslip
VelocityJmax=freeslip
Scalar1Jmin=dirichlet
Scalar1Jmax=neumann
[BufferZone]
Type=none
LoadBuffer=no
PointsUJmax=20
PointsSJmax=20
ParametersU=1.57,2.0
ParametersS=1.57,2.0
[Statistics]
Averages=yes
Spectrums=no
Correlations=no
Pdfs=no
Intermittency=no
[IniFields]
Velocity=PotentialBroadband
Scalar=None
ForceDilatation=no
ProfileIniK=GaussianSurface
YMeanIniK=0.0
ThickIniK=0.004
NormalizeK=0.00015
[Broadband]
f0=19.89
Sigma=3.32
Spectrum=gaussian
Distribution=gaussian
[IniGridOx]
periodic=yes
segments=1
points_1=129
scales_1=0.135
opts_1=uniform
[IniGridOy]
periodic=no
segments=1
points_1=96
scales_1=0.201972656
opts_1=tanh
vals_1=0.21,6.0,0.0168, 0,-0.75,-0.06
[IniGridOz]
periodic=yes
segments=1
points_1=129
scales_1=0.135
opts_1=uniform
#[PostProcessing]
Files=0
ParamVisuals=0,1,2,3,9,11,14
ParamSpectra=2
ParamTransform=3
ParamFFormat=1
ParamPdfs=1
Subdomain=1,2048,1,192,1,2048
Partition=0
Format=ensight
|
INI
|
@echo off
wcl386 -zq -l=stub32x lfb.asm
sc -bs -q lfb
del *.obj
|
Batchfile
|
TOP=../../
include $(TOP)/mk/boilerplate.mk
include $(TOP)/mk/test.mk
|
Makefile
|
#' One stage joint meta function
#'
#' Function to allow a one stage joint model (data from all studies analysed in
#' one model) to be fitted to data from multiple studies. The function allows
#' one longitudinal and one time-to-event outcome, and can accommodate baseline
#' hazard stratified or not stratified by study, as well as random effects at
#' the individual level and the study level. Currently only zero mean random
#' effects only proportional association supported - see Wulfsohn and Tsiatis
#' 1997
#'
#' @param data an object of class jointdata containing the variables named in
#' the model formulae
#' @param long.formula a formula object with the response varaible, and the
#' covariates to include in the longitudinal sub-model
#' @param long.rand.ind a vector of character strings to indicate what variables
#' to assign individual level random effects to. A maximum of three
#' individual level random effects can be assigned. To assign a random
#' intercept include 'int' in the vector. To not include an individual level
#' random intercept include 'noint' in the vector. For example to fit a model
#' with individual level random intercept and random slope set
#' \code{long.rand.ind = c('int', 'time')}, where \code{'time'} is the
#' longitudinal time variable in the \code{data}.
#' @param long.rand.stud a vector of character strings to indicate what
#' variables to assign study level random effects to. If no study level
#' random effects then this either not specified in function call or set to
#' \code{NULL}. If a study level random intercept is required, include the
#' name of the study membership variable for example \code{long.rand.stud =
#' 'study'}.
#' @param sharingstrct currently must be set to \code{'randprop'}. This gives a
#' model that shares the zero mean random effects (at both individual and
#' study level if specified) between the sub-models. Separate association
#' parameters are calculated for the linear combination of random effects at
#' each level. There are plans to expand to more sharing structures in the
#' future.
#' @param surv.formula a formula object with the survival time, censoring
#' indicator and the covariates to include in the survival sub-model. The
#' response must be a survival object as returned by the
#' \code{\link[survival]{Surv}} function.
#' @param gpt the number of quadrature points across which the integration with
#' respect to the random effects will be performed. If random effects are
#' specified at both the individual and the study level, the same number of
#' quadrature points is used in both cases. Defaults to \code{gpt = 5}.
#' @param lgpt the number of quadrature points which the log-likelihood is
#' evaluated over following a model fit. This defaults to \code{lgpt = 7}.
#' @param max.it the maximum number of iterations of the EM algorithm that the
#' function will perform. Defaults to \code{max.it = 350} although more
#' iterations could be required for large complex datasets.
#' @param tol the tolerance level used to determine convergence in the EM
#' algorithm. Defaults to \code{tol = 0.001}.
#' @param study.name a character string denoting the name of the variable in the
#' baseline dataset in \code{data} holding study membership, for example
#' \code{study.name = 'study'}.
#' @param strat logical value: if \code{TRUE} then the survival sub-model is
#' calculated with a baseline stratified by study. Otherwise baseline is
#' unstratified
#' @param longsep logical value: if \code{TRUE} then parameter estimates, model
#' fit and the log-likelihood from a separate linear mixed model analysis of
#' the longitudinal data are returned (see the \code{\link[lme4]{lmer}}
#' function). The separate longitudinal model fit has the same specification
#' as the longitudinal sub-model of the joint model.
#' @param survsep logical value: if \code{TRUE} then parameter estimates, model
#' fit and log-likelihood from a separate analysis of the survival data using
#' the Cox Proportional Hazards model are returned (see
#' \code{\link[survival]{coxph}} function for more details). This survival
#' fit has the same specification (apart from the association structure) as
#' the survival sub-model in the joint model.
#' @param bootrun logical value: if \code{TRUE} then the log-likelihood for the
#' model is not calculated. This option is available so that when
#' bootstrapping to obtain standard errors, as the log-likelihood is not
#' needed, it is not calculated, thus speeding up the bootstrapping process.
#' @param print.detail logical value: if \code{TRUE} then details of the
#' parameter estimates at each iteration of the EM algorithm are printed to
#' the console.
#'
#' @section Details: The \code{jointmeta1} function fits a one stage joint model
#' to survival and longitudinal data from multiple studies. This model is an
#' extension of the model proposed by Wulfsohn and Tsiatis (1997). The model
#' must contain at least one individual level random effect (specified using
#' the \code{long.rand.ind} argument). The model can also contain study level
#' random effects (specified using the \code{long.rand.stud} argument), which
#' can differ from the individual level random effects. The maximum number of
#' random effects that can be specified at each level is three. Note that the
#' fitting and bootstrapping time increases as the number of included random
#' effects increases. The model can also include a baseline hazard stratified
#' by study, or can utilise a common baseline across the studies in the
#' dataset. Interaction terms can be specified in either the longitudinal or
#' the survival sub-model.
#'
#' The longitudinal sub-model is a mixed effects model. If both individual
#' level and study level random effects are included in the function call,
#' then the sub-model has the following format:
#'
#' \deqn{Y_{kij} = X_{1kij}\beta_{1} + Z^{(2)}_{1kij}b^{(2)}_{ki} +
#' Z^{(3)}_{1kij}b^{(3)}_{k} + \epsilon_{kij}}
#'
#' Otherwise, if only individual level random effects are included in the
#' function call, then the longitudinal sub-model has the following format:
#'
#' \deqn{Y_{kij} = X_{1kij}\beta_{1} + Z^{(2)}_{1kij}b^{(2)}_{ki} +
#' \epsilon_{kij}}
#'
#' In the above equation, \eqn{Y} represents the longitudinal outcome and
#' \eqn{X_1} represents the design matrix for the longitudinal fixed effects.
#' The subscript 1 is used to distinguish between items from the longitudinal
#' sub-model and items from the survival sub-model (which contain a subscript
#' 2). The design matrices for random effects are represented using \eqn{Z},
#' fixed effect coefficients are represented by \eqn{\beta}, random effects by
#' \eqn{b} and the measurement error by \eqn{\epsilon}. Study membership is
#' represented by the subscript \eqn{k} whilst individuals are identified by
#' \eqn{i} and time points at which they are measured by \eqn{j}. The
#' longitudinal outcome is assumed continuous.
#'
#' Currently this function only supports one linking structure between the
#' sub-models, namely a random effects only proportional sharing structure. In
#' this structure, the zero mean random effects from the longitudinal
#' sub-model are inserted into the survival sub-model, with a common
#' association parameter for each level of random effects. Therefore the
#' survival sub-model (for a case without baseline stratified by study) takes
#' the following format:
#'
#' \deqn{\lambda_{ki}(t) = \lambda_{0}(t)exp(X_{2ki}\beta_{2} +
#' \alpha^{(2)}(Z^{(2)}_{1ki}b^{(2)}_{ki}) +
#' \alpha^{(3)}(Z^{(3)}_{1ki}b^{(3)}_{k})) }
#'
#' Otherwise, if only individual level random effects are included in the
#' function call, this reduces to:
#'
#' \deqn{\lambda_{ki}(t) = \lambda_{0}(t)exp(X_{2ki}\beta_{2} +
#' \alpha^{(2)}(Z^{(2)}_{1ki}b^{(2)}_{ki}) }
#'
#' In the above equation, \eqn{\lambda_{ki}(t)} represents the survival time
#' of the individual \eqn{i} in study \eqn{k}, and \eqn{\lambda_{0}(t)}
#' represents the baseline hazard. If a stratified baseline hazard were
#' specified this would be replaced by \eqn{\lambda_{0k}(t)}. The design
#' matrix for the fixed effects in the survival sub-model is represented by
#' \eqn{X_{2ki}}, with fixed effect coefficients represented by
#' \eqn{\beta_{2}}. Association parameters quantifying the link between the
#' sub-models are represented by \eqn{\alpha} terms.
#'
#' The model is fitted using an EM algorithm, starting values for which are
#' extracted from initial separate longitudinal and survival fits. Pseudo
#' adaptive Gauss - Hermite quadrature is used to evaluate functions of the
#' random effects in the EM algorithm, see Rizopoulos 2012.
#'
#'
#' @return An object of class jointmeta1 See \code{\link{jointmeta1.object}}
#'
#' @export
#'
#' @import survival stats
#'
#' @references Wulfsohn, M.S. and A.A. Tsiatis, A Joint Model for Survival and
#' Longitudinal Data Measured with Error. 1997, International Biometric
#' Society. p. 330
#'
#' Rizopoulos, D. (2012) Fast fitting of joint models for longitudinal and
#' event time data using a pseudo-adaptive Gaussian quadrature rule.
#' Computational Statistics & Data Analysis 56 (3) p.491-501
#'
#'
#'
#'
#' @examples
#' #change example data to jointdata object
#' jointdat2<-tojointdata(longitudinal = simdat2$longitudinal,
#' survival = simdat2$survival, id = 'id',longoutcome = 'Y',
#' timevarying = c('time','ltime'),
#' survtime = 'survtime', cens = 'cens',time = 'time')
#'
#' #set variables to factors
#' jointdat2$baseline$study <- as.factor(jointdat2$baseline$study)
#' jointdat2$baseline$treat <- as.factor(jointdat2$baseline$treat)
#'
#' #fit multi-study joint model
#' #note: for demonstration purposes only - max.it restricted to 5
#' #model would need more iterations to truely converge
#' onestagefit<-jointmeta1(data = jointdat2, long.formula = Y ~ 1 + time +
#' + treat + study, long.rand.ind = c('int', 'time'),
#' long.rand.stud = c('treat'),
#' sharingstrct = 'randprop',
#' surv.formula = Surv(survtime, cens) ~ treat,
#' study.name = 'study', strat = TRUE, max.it=5)
#'
jointmeta1 <- function(data, long.formula, long.rand.ind, long.rand.stud = NULL,
sharingstrct = c("randprop", "randsep", "value", "slope", "valandslope"),
surv.formula, gpt, lgpt, max.it, tol, study.name, strat = F, longsep = F,
survsep = F, bootrun = F, print.detail = F) {
if (class(data) != "jointdata") {
stop("Data should be supplied in jointdata format -
run tojointdata function if not in jointfdataformat")
}
if (sharingstrct != "randprop") {
stop("Currently only randprop sharing structure supported")
}
Call <- match.call()
id.name <- data$subj.col
time.long <- data$time.col
long.formula <- as.formula(long.formula)
long.formula.orig <- long.formula
surv.formula <- as.formula(surv.formula)
if (missing(gpt)) {
gpt <- 5
}
if (missing(lgpt)) {
lgpt <- 7
}
if (missing(max.it)) {
max.it <- 350
}
if (missing(tol)) {
tol <- 0.001
}
if (missing(bootrun)) {
bootrun <- FALSE
}
if (missing(sharingstrct)) {
stop("No sharing structure specified")
}
if ((sharingstrct %in% c("randprop", "randsep", "value", "slope", "valandslope")) ==
FALSE) {
stop("Invalid sharing structure specified")
}
if (sharingstrct != "randprop") {
stop("Currently jointmeta only supports randprop sharing structures")
}
if (missing(long.rand.ind) == TRUE) {
stop("Please specify at least one random effect
at the individual level in long.rand.ind")
}
if (length(long.rand.ind) == 0) {
stop("Please specify at least one random effect
at the individual level in long.rand.ind")
}
if (length(which(("noint" == long.rand.ind) == F)) == 0) {
stop("Please specify at least one random effect
at the individual level in long.rand.ind")
}
if (("int" %in% long.rand.ind) == TRUE) {
if (("noint" %in% long.rand.ind) == TRUE) {
stop("Both the option for no random intercept (noint)
and random intercept (int) specified in long.rand.ind")
}
}
if (("int" %in% long.rand.ind) == TRUE) {
long.rand.ind[which((long.rand.ind %in% "int") == TRUE)] <- "(Intercept)"
if (which(long.rand.ind %in% "(Intercept)") != 1) {
long.rand.ind <- long.rand.ind[-which(long.rand.ind %in% "(Intercept)")]
long.rand.ind <- c("(Intercept)", long.rand.ind)
}
}
if (missing(study.name)) {
stop("Please supply name of study indicator variable to
\"study.name\" in the function call")
}
if (is.null(long.rand.stud) == F) {
if (study.name %in% long.rand.stud) {
if (which(long.rand.stud %in% study.name) != 1) {
long.rand.stud <- long.rand.stud[-which(long.rand.stud %in%
study.name)]
long.rand.stud <- c(study.name, long.rand.stud)
}
}
}
studies <- as.character(unique(data$baseline[[study.name]]))
numstudies <- length(studies)
if (any(sapply(data$baseline, "class") == "factor")) {
data$baseline <- droplevels(data$baseline)
}
longdat2 <- merge(data$longitudinal, data$baseline, by = id.name, sort = FALSE)
long.frame <- model.frame(long.formula, data = longdat2, na.action = na.pass)
long.cov <- model.matrix(long.formula, long.frame)
long.terms <- terms(long.formula, data = longdat2)
long.names <- colnames(long.cov)
rll <- !is.na(data$longitudinal[[names(long.frame[1])]])
for (i in 1:length(rll)) {
if (length(which(is.na(long.cov[i, ]))) > 0) {
rll[i] <- FALSE
}
}
q <- 0
for (count in 1:length(long.rand.ind)) {
if (long.rand.ind[count] != "noint") {
q <- q + 1
if (length(which(grepl(long.rand.ind[count], colnames(long.cov)) ==
TRUE)) == 0) {
if (grepl(".", long.rand.ind[count])) {
temp <- unlist(strsplit(long.rand.ind[count], "."))
combs <- expand.grid(1:length(temp), 1:length(temp))
present <- FALSE
for (i in 1:nrow(combs)) {
if (!(combs[i, 1] == combs[i, 2])) {
if (length(which(grepl(paste(temp[combs[i, 1]], temp[combs[i,
2]], sep = "."), colnames(long.cov))) == TRUE) >
0) {
present <- TRUE
long.rand.ind[count] <- paste(temp[combs[i, 1]],
temp[combs[i, 2]], sep = ".")
}
}
}
}
if (!present) {
stop("Individual level random effects included
in model with no corresponding fixed effect")
}
}
}
}
if (q > 3) {
stop("Model only supports maximum of three individual level random effects")
}
if (is.null(long.rand.stud) == FALSE) {
r <- 0
for (count in 1:length(long.rand.stud)) {
if (long.rand.stud[count] != study.name) {
r <- r + 1
if (length(which(grepl(long.rand.stud[count], colnames(long.cov)) ==
TRUE)) == 0) {
if (grepl(".", long.rand.stud[count])) {
temp <- unlist(strsplit(long.rand.stud[count], "."))
combs <- expand.grid(1:length(temp), 1:length(temp))
present <- FALSE
for (i in 1:nrow(combs)) {
if (!(combs[i, 1] == combs[i, 2])) {
if (length(which(grepl(paste(temp[combs[i, 1]],
temp[combs[i, 2]], sep = "."), colnames(long.cov))) ==
TRUE) > 0) {
present <- TRUE
long.rand.stud[count] <- paste(temp[combs[i,
1]], temp[combs[i, 2]], sep = ".")
}
}
}
}
if (!present) {
stop("Study level random effects included
in model with no corresponding fixed effect")
}
}
} else {
r <- r + 1
}
}
if (r > 3) {
stop("Model only supports maximum of three study level random effects")
}
} else {
r <- NULL
}
longdat <- cbind(data$longitudinal[[id.name]][rll], long.frame[, 1][rll],
data$longitudinal[[time.long]][rll], longdat2[[study.name]][rll],
long.cov[rll, ])
longdat <- as.data.frame(longdat)
missingids <- unique(data$longitudinal[[id.name]][!rll])
names(longdat) <- c(id.name, names(long.frame)[1], time.long, study.name,
long.names)
long.formula <- as.formula(paste(as.character(long.formula)[2], "~",
paste(names(longdat)[5:ncol(longdat)], collapse = " + "), sep = ""))
p1 <- length(5:ncol(longdat))
notinteractionterms <- names(longdat[, 5:ncol(longdat)])[!(grepl(":",
names(longdat[, 5:ncol(longdat)])))]
for (count in 1:length(long.rand.ind)) {
if (length(grep(paste("^", long.rand.ind[count], "$", sep = ""),
notinteractionterms)) > 0) {
long.rand.ind[count] <- notinteractionterms[grep(paste("^",
long.rand.ind[count], "$", sep = ""), notinteractionterms)]
} else if (length(grep(paste("^", long.rand.ind[count], "$", sep = ""),
notinteractionterms)) == 0) {
if (long.rand.ind[count] %in% colnames(data$baseline)) {
if (class(data$baseline[, which(colnames(data$baseline) ==
long.rand.ind[count])]) == "factor") {
formtemp <- as.formula(paste("~", colnames(data$baseline)[which(colnames(data$baseline) ==
long.rand.ind[count])]))
matrixtemp <- model.matrix(formtemp, data$baseline)
long.rand.ind[count] <- colnames(matrixtemp)[2:ncol(matrixtemp)]
}
} else if (long.rand.ind[count] %in% colnames(data$longitudinal)) {
if (class(data$longitudinal[, which(colnames(data$longitudinal) ==
long.rand.ind[count])]) == "factor") {
formtemp <- as.formula(paste("~", colnames(data$longitudinal)[which(colnames(data$longitudinal) ==
long.rand.ind[count])]))
matrixtemp <- model.matrix(formtemp, data$longitudinal)
long.rand.ind[count] <- colnames(matrixtemp)[2:ncol(matrixtemp)]
}
}
}
}
q <- length(long.rand.ind)
if (q > 3) {
stop("Model only supports maximum of three individual level random effects")
}
if (is.null(long.rand.stud) == FALSE) {
for (count in 1:length(long.rand.stud)) {
if (long.rand.stud[count] != study.name) {
if (length(grep(paste("^", long.rand.stud[count], "$",
sep = ""), notinteractionterms)) > 0) {
long.rand.stud[count] <- notinteractionterms[grep(paste("^",
long.rand.stud[count], "$", sep = ""), notinteractionterms)]
} else if (length(grep(paste("^", long.rand.stud[count],
"$", sep = ""), notinteractionterms)) == 0) {
if (long.rand.stud[count] %in% colnames(data$baseline)) {
if (class(data$baseline[, which(colnames(data$baseline) ==
long.rand.stud[count])]) == "factor") {
formtemp <- as.formula(paste("~", colnames(data$baseline)[which(colnames(data$baseline) ==
long.rand.stud[count])]))
matrixtemp <- model.matrix(formtemp, data$baseline)
long.rand.stud[count] <- colnames(matrixtemp)[2:ncol(matrixtemp)]
}
} else if (long.rand.stud[count] %in% colnames(data$longitudinal)) {
if (class(data$longitudinal[, which(colnames(data$longitudinal) ==
long.rand.stud[count])]) == "factor") {
formtemp <- as.formula(paste("~", colnames(data$longitudinal)[which(colnames(data$longitudinal) ==
long.rand.stud[count])]))
matrixtemp <- model.matrix(formtemp, data$longitudinal)
long.rand.stud[count] <- colnames(matrixtemp)[2:ncol(matrixtemp)]
}
}
}
}
}
r <- length(long.rand.stud)
if (r > 3) {
stop("Model only supports maximum of three study level random effects")
}
}
surv.frame <- model.frame(surv.formula, data = cbind(data$survival,
data$baseline))
srv <- model.extract(surv.frame, "response")
surv.terms <- terms(surv.formula, data = cbind(data$survival, data$baseline))
attr(surv.terms, "intercept") <- 1
surv.cov <- model.matrix(surv.terms, data = cbind(data$survival, data$baseline))
namestemp <- colnames(surv.cov)
surv.cov <- as.matrix(surv.cov[, -1])
colnames(surv.cov) <- namestemp[-1]
rss <- as.integer(row.names(surv.cov))
survdat <- cbind(data$survival[[id.name]][rss], srv[rss, 1], srv[rss,
2], data$baseline[[study.name]][rss], surv.cov)
survdat <- as.data.frame(survdat)
names(survdat) <- c(id.name, surv.formula[2][[1]][[2]], surv.formula[2][[1]][[3]],
study.name, colnames(surv.cov))
if (dim(survdat)[2] > 4) {
survdat[, 5:dim(survdat)[2]] <- scale(survdat[, 5:dim(survdat)[2]],
scale = FALSE)
}
survdat2 <- data.frame(data$survival[[id.name]][rss], srv[rss, 1],
srv[rss, 2], data$baseline[[study.name]][rss], surv.frame[, -1])
if (ncol(survdat) > 4) {
surv.formula <- as.formula(paste(as.character(surv.formula)[2],
"~", paste(names(survdat)[5:ncol(survdat)], collapse = " + "),
sep = ""))
names(survdat2) <- c(id.name, surv.formula[2][[1]][[2]], surv.formula[2][[1]][[3]],
study.name, colnames(surv.frame)[2:ncol(surv.frame)])
} else {
surv.formula <- as.formula(paste(as.character(surv.formula)[2],
"~ 1", sep = ""))
names(survdat2) <- c(id.name, surv.formula[2][[1]][[2]], surv.formula[2][[1]][[3]],
study.name, colnames(surv.cov))
}
survdat[, 4] <- survdat2[, 4]
if (ncol(survdat) > 4) {
p2 <- length(5:ncol(survdat))
} else {
p2 <- 0
}
rll2 <- rep(TRUE, nrow(survdat2))
for (i in 1:length(rll2)) {
if (length(which(is.na(survdat2[i, ]))) > 0) {
rll2[i] <- FALSE
}
}
if (length(which(rll2 == FALSE)) > 0) {
missingids <- c(missingids, survdat2[!rll2, 1])
}
if (length(missingids) > 0) {
survdat <- survdat[!(survdat[, 1] %in% missingids), ]
survdat2 <- survdat2[!(survdat[, 1] %in% missingids), ]
longdat2 <- longdat2[!(longdat2[, 1] %in% missingids), ]
}
sorted <- sortDat(longdat, survdat, longdat2, survdat2)
longdat <- as.data.frame(sorted$long.s)
survdat <- as.data.frame(sorted$surv.s)
longdat2 <- as.data.frame(sorted$long.s2)
survdat2 <- as.data.frame(sorted$surv.s2)
if (is.null(long.rand.stud)) {
ldaests <- longst(longdat = longdat, long.formula.orig = long.formula,
long.rand.ind = long.rand.ind, longdat2 = longdat2, id.name = id.name,
study.name = study.name, studies = studies)
} else {
ldaests <- longst(longdat = longdat, long.formula.orig = long.formula,
long.rand.ind = long.rand.ind, long.rand.stud = long.rand.stud,
longdat2 = longdat2, id.name = id.name, study.name = study.name,
studies = studies)
}
if (strat) {
survests <- survst(survdat = survdat, surv.formula = surv.formula,
survdat2 = survdat2, strat = strat, study.name = study.name)
} else {
survests <- survst(survdat = survdat, surv.formula = surv.formula,
survdat2 = survdat2, strat = strat, study.name = study.name)
}
sep.ll <- ldaests$log.like + survests$log.like[2]
sep.loglik <- list(seplhood = sep.ll, sepy = ldaests$log.like, sepn = survests$log.like[2])
paraests <- c(ldaests, survests)
if (sharingstrct == "randprop") {
if (bootrun == FALSE) {
message("Running EM algorithm...")
}
jointfit <- EMalgRandprop(data = data, longdat = longdat, survdat = survdat,
long.rand.ind = long.rand.ind, long.rand.stud = long.rand.stud,
id.name = id.name, study.name = study.name, gpt = gpt, max.it = max.it,
tol = tol, time.long = time.long, surv.formula = surv.formula,
long.formula = long.formula, long.formula.orig = long.formula.orig,
paraests = paraests, studies = studies, p1 = p1, p2 = p2, strat = strat,
print.detail = print.detail, bootrun = bootrun, q = q, r = r)
likeests <- c(jointfit, list(rs = survests$rs, sf = survests$sf))
beta1 <- jointfit$beta1
rownames(beta1) <- rownames(paraests$beta1)
if (p2 > 0) {
beta2 <- jointfit$beta2[1:p2, ]
names(beta2) <- names(paraests$beta2)
} else {
beta2 <- NULL
}
fixed <- list(longitudinal = beta1, survival = beta2)
D <- jointfit$D
random_ind <- jointfit$random2
ids.bystudy <- lapply(1:numstudies, function(u) {
survdat[which(survdat[, 4] == studies[u]), 1]
})
random_ind <- lapply(1:numstudies, function(u) {
randtemp <- random_ind[[u]]
colnames(randtemp) <- paste("b2_", 0:(ncol(randtemp) - 1),
sep = "")
rownames(randtemp) <- ids.bystudy[[u]]
randtemp
})
random <- list(random_ind = random_ind)
if ("(Intercept)" %in% long.rand.ind) {
long.rand.ind2 <- long.rand.ind
long.rand.ind2[which(long.rand.ind2 == "(Intercept)")] <- "1"
long.rand.ind.form <- paste(long.rand.ind2, collapse = " + ")
}
if ("noint" %in% long.rand.ind) {
long.rand.ind2 <- long.rand.ind[-which(long.rand.ind == "noint")]
long.rand.ind.form <- paste("-1", long.rand.ind2, sep = " + ")
}
n.bystudy <- jointfit$n.bystudy
if (is.null(long.rand.stud) == FALSE) {
A <- jointfit$A
latent <- jointfit$beta2[(p2 + 1):(p2 + 2), ]
names(latent) <- c(paste("gamma_ind_", 0, sep = ""), paste("gamma_stud_",
0, sep = ""))
random_stud <- jointfit$random3
colnames(random_stud) <- paste("b3_", 0:(ncol(random_stud) -
1), sep = "")
rownames(random_stud) <- studies
random$random_stud <- random_stud
randstart.stud.l <- paraests$randstart.stud
randstart.stud.cov.l <- paraests$randstart.stud.cov
if (study.name %in% long.rand.stud) {
long.rand.stud2 <- long.rand.stud
long.rand.stud2[which(long.rand.stud2 == study.name)] <- "1"
long.rand.stud.form <- paste(long.rand.stud2, collapse = " + ")
} else {
long.rand.stud.form <- paste("-1", paste(long.rand.stud,
collapse = " + "), sep = " + ")
}
} else {
latent <- jointfit$beta2[(p2 + 1), ]
names(latent) <- paste("gamma_ind_", 0, sep = "")
randstart.stud.l <- NULL
randstart.stud.cov.l <- NULL
}
coefficients <- list(fixed = fixed, random = random, latent = latent)
if (bootrun == FALSE) {
message("Calculating log-likelihood...")
jointll <- jlike(data = data, longdat = longdat, survdat = survdat,
q = q, likeests = likeests, lgpt = lgpt, studies = studies,
p1 = p1, p2 = p2, long.rand.ind = long.rand.ind, randstart.ind = paraests$randstart.ind,
randstart.ind.cov = paraests$randstart.ind.cov, r = r,
long.rand.stud = long.rand.stud, randstart.stud = randstart.stud.l,
randstart.stud.cov = randstart.stud.cov.l, strat = strat,
study.name = study.name, id.name = id.name)
numpara <- p1 + p2 + (q^2) + 2
if (!is.null(long.rand.stud)) {
numpara <- numpara + (r^2) + 1
}
AIC <- (2 * numpara) - (2 * jointll$log.like)
loglik <- list(jointlhood = jointll$log.like, jointy = jointll$longlog.like,
jointn = jointll$survlog.like)
} else {
loglik <- "Not Calculated"
AIC <- "Not Calculated"
}
sepests <- list(longests = sep(ldaests, longsep), survests = sep(survests,
survsep))
formulae <- list(lformula = long.formula, sformula = surv.formula,
rand_ind_formula = as.formula(paste("~", long.rand.ind.form,
sep = "")))
rand_cov <- list(D = jointfit$D)
if (is.null(long.rand.stud) == FALSE) {
formulae$rand_stud_formula <- as.formula(paste("~", long.rand.stud.form,
sep = ""))
rand_cov$A <- jointfit$A
}
nobs <- table(longdat[[study.name]])
names(nobs) <- studies
results <- list(coefficients = coefficients, sigma.e = jointfit$sigma.e,
rand_cov = rand_cov, hazard = jointfit$haz, loglik = loglik,
numIter = jointfit$iters, convergence = jointfit$conv, sharingstrct = sharingstrct,
sepests = sepests, sep.loglik = sep.loglik, data = data, Call = Call,
numstudies = numstudies, n.bystudy = n.bystudy,
missingids = missingids, nobs = nobs, AIC = AIC)
class(results) <- "jointmeta1"
results
}
}
|
R
|
# Be sure to restart your server when you modify this file.
Mrug::Application.config.session_store :cookie_store, key: '_mrug_session'
|
Ruby
|
`timescale 1ns / 1ps
////////////////////////////////////////////////////////////////////////////////
// Company:
// Engineer:
//
// Create Date: 16:09:39 02/15/2016
// Design Name: rca
// Module Name: E:/FPGA/Assignment8feb/rca_tester.v
// Project Name: Assignment8feb
// Target Device:
// Tool versions:
// Description:
//
// Verilog Test Fixture created by ISE for module: rca
//
// Dependencies:
//
// Revision:
// Revision 0.01 - File Created
// Additional Comments:
//
////////////////////////////////////////////////////////////////////////////////
module rca_tester;
// Inputs
reg [7:0] a;
reg [7:0] b;
reg cin;
integer i;
// Outputs
wire [7:0] sum;
wire cout;
// Instantiate the Unit Under Test (UUT)
rca uut (
.a(a),
.b(b),
.cin(cin),
.sum(sum),
.cout(cout)
);
always@(a or b) begin
$monitor("a = %b, b = %b, cin = %b,cout = %b , sum = %b", a, b, cin, cout, sum);
end
initial begin
// Initialize Inputs
a = 0;
b = 0;
cin = 0;
end
initial begin
$monitor("a = %b, b = %b, cin = %b,cout = %b , sum = %b", a, b, cin, cout, sum);
end
always@(a or b)
begin
for(i = 0;i<256*256;i=i+1)
#1{a,b} = i;
#10 $stop;
end
/*#10 a = $random;b = $random;
#10 a = $random;b = $random;
#10 a = $random;b = $random;
#10 a = $random;b = $random;
#10 a = $random;b = $random;
#10 a = $random;b = $random;
#10 a = $random;b = $random;
#10 a = $random;b = $random;
#10 a = $random;b = $random;
#10 a = $random;b = $random;
*/
// Wait 100 ns for global reset to finish
// Add stimulus here
endmodule
|
Coq
|
FROM balenalib/aarch64-ubuntu:jammy-run
LABEL io.balena.device-type="forecr-dsb-ornx-orin-nano-8gb"
RUN echo "deb https://repo.download.nvidia.com/jetson/common r36.3 main" >> /etc/apt/sources.list.d/nvidia.list \
&& echo "deb https://repo.download.nvidia.com/jetson/t234 r36.3 main" >> /etc/apt/sources.list.d/nvidia.list \
&& apt-key adv --fetch-key http://repo.download.nvidia.com/jetson/jetson-ota-public.asc \
&& mkdir -p /opt/nvidia/l4t-packages/ && touch /opt/nvidia/l4t-packages/.nv-l4t-disable-boot-fw-update-in-preinstall
RUN apt-get update && apt-get install -y --no-install-recommends \
less \
kmod \
nano \
net-tools \
ifupdown \
iputils-ping \
i2c-tools \
usbutils \
&& rm -rf /var/lib/apt/lists/*
RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo 'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v8 \nOS: Ubuntu jammy \nVariant: run variant \nDefault variable(s): UDEV=off \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info
|
Dockerfile
|
function Start-Icinga()
{
Start-IcingaService -Service 'icinga2';
Start-IcingaForWindows;
}
|
PowerShell
|
TruncateHtml.configure do |config|
end
|
Ruby
|
/**
* Copyright 2011-2013 Zuse Institute Berlin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.zib.scalaris.examples.wikipedia.plugin;
import javax.servlet.ServletConfig;
import de.zib.scalaris.examples.wikipedia.WikiServletContext;
/**
* Simple plug-in interface for plug-ins in the {@link de.zib.scalaris.examples.wikipedia.bliki.WikiServlet} class.
*
* Note: this API is not stable and will probably change in future.
*
* @author Nico Kruber, [email protected]
*/
public interface WikiPlugin {
/**
* Initialises the plugin.
*
* @param servlet
* the servlet using the plugin
* @param config
* servlet config object
*/
public void init(WikiServletContext servlet, ServletConfig config);
}
|
Java
|
# encoding: UTF-8
# This file is auto-generated from the current state of the database. Instead
# of editing this file, please use the migrations feature of Active Record to
# incrementally modify your database, and then regenerate this schema definition.
#
# Note that this schema.rb definition is the authoritative source for your
# database schema. If you need to create the application database on another
# system, you should be using db:schema:load, not running all the migrations
# from scratch. The latter is a flawed and unsustainable approach (the more migrations
# you'll amass, the slower it'll run and the greater likelihood for issues).
#
# It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema.define(version: 20170416024256) do
# These are extensions that must be enabled in order to support this database
enable_extension "plpgsql"
create_table "connected_apps", force: :cascade do |t|
t.string "name"
t.string "token"
t.string "token_secret"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
create_table "tweets", force: :cascade do |t|
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.string "text"
t.float "location", default: [], array: true
t.string "keywords", default: [], array: true
t.string "hashtags", default: [], array: true
t.string "sentiment_type"
t.float "sentiment_value"
end
end
|
Ruby
|
/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package org.ngsutils.fuzzy
import org.ngsutils.ontology.GOManager
import org.ngsutils.ontology.OntologyAnnotation
import org.ngsutils.ontology.FMBGOntologyWrap
/**
*
* @author victor
*/
class FMBSimilarity {
IFMBOntologyWrap ontology
def logObjectList
/**
*
*/
def setOntologyWrap(ontObj) {
if( ontObj instanceof GOManager ) {
ontology = new FMBGOntologyWrap(goManager:ontObj)
}
}
/**
* Fuzzy Based Measure of Similarity (FMS)
*
* @param a1 : an OntologyAnnotation
* @param a2 : an OntologyAnnotation
*/
double fms(OntologyAnnotation a1, OntologyAnnotation a2) {
// build densities map for each set
def gdens = [a1,a2].collect{ a->
def map = [:]
a.terms.each{ t-> map[t] = ontology.getDensity(a.product,t) }
map
}
// intersection set
def inters = gdens[0].intersect(gdens[1])
// calculate sugeno measures
def suglm = gdens.collect{ new SugenoLambdaMeasure(it.values()) }
return sugenoSum(inters.values() as List, suglm[0], suglm[1])
}
/**
* Augmented Fuzzy Based Measure of Similarity (AFMS)
*
* @param a1 : an OntologyAnnotation
* @param a2 : an OntologyAnnotation
*/
double afms(OntologyAnnotation a1, OntologyAnnotation a2) {
// 1 - get the map of nearest common ancestors (NCA) of every pair
def nca = [:]
a1.terms.each{ t1->
double ev1 = ontology.getEvidence(a1.product,t1)
a2.terms.each{ t2->
def res = ontology.getNCAncestor(t1,t2)
if(res) {
double ev2 = ontology.getEvidence(a2.product,t2)
double dens = ontology.getDensity(res)*Math.min(ev1,ev2)
if( dens>0.0 ){ nca[res] = dens }
}
}
}
// remove redundant ancestors
def ncaTerms = nca.keySet()
def ncaRedundant = ncaTerms.findAll{ontology.isAncestor(it, ncaTerms)}
ncaRedundant.each{nca.remove(it)}
// 2 - build augmented sets
def gdens = [a1,a2].collect{ a->
def map = [:]
a.terms.each{ t-> map[t] = ontology.getDensity(a.product,t) }
map
}
def annotations = gdens.collect{it.clone()} // clone annotations
// intersection set
def inters = gdens[0].intersect(gdens[1])
inters += nca
// 3 - calculate sugeno measures
(0..1).each{ gdens[it]+=nca }
def suglm = gdens.collect{ new SugenoLambdaMeasure(it.values()) }
double similarity = sugenoSum(inters.values() as List, suglm[0], suglm[1])
appendToLog(a1.id, a2.id, annotations, nca, similarity)
return similarity
}
/**
*
*/
protected double sugenoSum(intDens, suglm1, suglm2) {
if( !intDens ){ return 0.0 }
if( intDens.size()==1 ){ return intDens[0] }
return (Math.min(suglm1.value(intDens),1.0) + Math.min(suglm2.value(intDens),1.0)) * 0.5
}
/**
*
*/
protected void appendToLog(feat1, feat2, annotations, nca, similarity) {
if( logObjectList==null ) {
return
}
def makeFeat = { name, terms ->
['name': name, 'annotations': terms.keySet().collect{['id': it, 'ic': terms[it]]}]
}
def inters = annotations[0].intersect(annotations[1])
def simObject = [
'product1': makeFeat(feat1, annotations[0]),
'product2': makeFeat(feat2, annotations[1]),
'nearestCommonAncestors': nca.keySet().collect{['id': it, 'ic': nca[it]]},
'intersection': inters.keySet().collect{['id': it, 'ic': inters[it]]},
'similarity': similarity
]
logObjectList << simObject
}
/**
* append a zero similarity object to log
*/
public void appendZeroToLog(OntologyAnnotation a1, OntologyAnnotation a2) {
if( logObjectList==null ) {
return
}
def simObject = [
'product1': ['name': a1.id],
'product2': ['name': a2.id],
'nearestCommonAncestors': [],
'intersection': [],
'similarity': 0.0d
]
logObjectList << simObject
}
}
|
Groovy
|
WIKIMETATEMPLATE=admin/templates/BeanConfigMetaTemplate
TITLE=Global GWiki Settings
NOINDEX=true
|
INI
|
(cl:in-package dasl_mocap-msg)
(cl:export '(LINEAR-VAL
LINEAR
ANGULAR-VAL
ANGULAR
))
|
Common Lisp
|
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.8"/>
<title>NonAdMD: src/harmonicbath.f90 File Reference</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<script type="text/javascript" src="navtree.js"></script>
<script type="text/javascript">
$(document).ready(initResizable);
$(window).load(resizeHeight);
</script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/javascript">
$(document).ready(function() { searchBox.OnSelectItem(0); });
</script>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js"],
jax: ["input/TeX","output/HTML-CSS"],
});
</script><script src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<td id="projectlogo"><img alt="Logo" src="Logo.gif"/></td>
<td style="padding-left: 0.5em;">
<div id="projectname">NonAdMD
 <span id="projectnumber">0.1.44</span>
</div>
<div id="projectbrief">alpha-stable</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.8 -->
<script type="text/javascript">
var searchBox = new SearchBox("searchBox", "search",false,'Search');
</script>
<div id="navrow1" class="tabs">
<ul class="tablist">
<li><a href="index.html"><span>Welcome</span></a></li>
<li><a href="annotated.html"><span>Data Types List</span></a></li>
<li class="current"><a href="files.html"><span>Files</span></a></li>
<li><a href="http://www.ucd.ie/physics"><span>UCD Physics</span></a></li>
<li>
<div id="MSearchBox" class="MSearchBoxInactive">
<span class="left">
<img id="MSearchSelect" src="search/mag_sel.png"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
alt=""/>
<input type="text" id="MSearchField" value="Search" accesskey="S"
onfocus="searchBox.OnSearchFieldFocus(true)"
onblur="searchBox.OnSearchFieldFocus(false)"
onkeyup="searchBox.OnSearchFieldChange(event)"/>
</span><span class="right">
<a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
</span>
</div>
</li>
</ul>
</div>
<div id="navrow2" class="tabs2">
<ul class="tablist">
<li><a href="files.html"><span>File List</span></a></li>
<li><a href="globals.html"><span>File Members</span></a></li>
</ul>
</div>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
<div id="nav-tree">
<div id="nav-tree-contents">
<div id="nav-sync" class="sync"></div>
</div>
</div>
<div id="splitbar" style="-moz-user-select:none;"
class="ui-resizable-handle">
</div>
</div>
<script type="text/javascript">
$(document).ready(function(){initNavTree('harmonicbath_8f90.html','');});
</script>
<div id="doc-content">
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
<a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(0)"><span class="SelectionMark"> </span>All</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(1)"><span class="SelectionMark"> </span>Classes</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(2)"><span class="SelectionMark"> </span>Files</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(3)"><span class="SelectionMark"> </span>Functions</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(4)"><span class="SelectionMark"> </span>Variables</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(5)"><span class="SelectionMark"> </span>Pages</a></div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div class="header">
<div class="summary">
<a href="#nested-classes">Data Types</a> </div>
<div class="headertitle">
<div class="title">harmonicbath.f90 File Reference</div> </div>
</div><!--header-->
<div class="contents">
<table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="nested-classes"></a>
Data Types</h2></td></tr>
<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">module  </td><td class="memItemRight" valign="bottom"><a class="el" href="classharmonicbath__class.html">harmonicbath_class</a></td></tr>
<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">Harmonic bath(s) of classical non-interacting particles. <a href="classharmonicbath__class.html#details">More...</a><br /></td></tr>
<tr class="separator:"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">type  </td><td class="memItemRight" valign="bottom"><a class="el" href="structharmonicbath__class_1_1harmonicbath.html">harmonicbath_class::harmonicbath</a></td></tr>
<tr class="separator:"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceharmonicbath__class_1_1new.html">harmonicbath_class::new</a></td></tr>
<tr class="separator:"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceharmonicbath__class_1_1kill.html">harmonicbath_class::kill</a></td></tr>
<tr class="separator:"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceharmonicbath__class_1_1display.html">harmonicbath_class::display</a></td></tr>
<tr class="separator:"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceharmonicbath__class_1_1save.html">harmonicbath_class::save</a></td></tr>
<tr class="separator:"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceharmonicbath__class_1_1update.html">harmonicbath_class::update</a></td></tr>
<tr class="separator:"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceharmonicbath__class_1_1resample.html">harmonicbath_class::resample</a></td></tr>
<tr class="separator:"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceharmonicbath__class_1_1check.html">harmonicbath_class::check</a></td></tr>
<tr class="separator:"><td class="memSeparator" colspan="2"> </td></tr>
</table>
</div><!-- contents -->
</div><!-- doc-content -->
<!-- start footer part -->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
<li class="navelem"><a class="el" href="dir_68267d1309a1af8e8297ef4c3efbcdba.html">src</a></li><li class="navelem"><a class="el" href="harmonicbath_8f90.html">harmonicbath.f90</a></li>
<li class="footer">Generated on Thu Apr 20 2017 11:23:08 for NonAdMD by
<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.8 </li>
</ul>
</div>
</body>
</html>
|
HTML
|
/** <module> Common utilities for the task-parallel scheduler
@author Dylan Meysmans <[email protected]>
@license MIT
@version 0.1.0
*/
:- module(utilities, [cores/1,
tasks/1,
depends_on/2,
schedule_for_task_core/3,
augmented_less/2]).
%! cores(+Cs:list) is semidet.
%! cores(-Cs:list) is det.
%
% Succeeds if Cs is the list of cores that the system can schedule tasks on.
% You need not instantiate Cs, if you do not, it is instantiated to the list of cores.
cores(Cs) :-
findall(C, user:core(C), Cs).
%! tasks(+Ts:list) is semidet.
%! tasks(-Ts:list) is det.
%
% Succeeds if Ts is the list of tasks that the system needs schedule.
% You need not instantiate Ts, if you do not, it is instantiated to the list of tasks.
tasks(Ts) :-
findall(T, user:task(T), Ts).
%! depends_on(+T, +D) is semidet.
%
% Succeeds if T directly or indirectly depends on D.
% Represents the reflexive and transitive closure of the dependency relation.
depends_on(T, T).
depends_on(T, D) :-
T \== D,
user:depends_on(T, D, _).
depends_on(T, D) :-
T \== D,
not(user:depends_on(T, D, _)),
user:depends_on(T, V, _),
depends_on(V, D).
%! schedule_for_task_core(+T, +Ss:list, -S:schedule) is semidet.
%
% Instantiates S to the schedule for the core on which T is scheduled in Ss.
schedule_for_task_core(T, Ss, schedule(C,Ts)) :-
member(schedule(C,Ts), Ss),
memberchk(T, Ts).
%! augmented_less(+M, +N) is semidet.
%
% Succeeds if </2 succeeds for M and N, unless M is the atom infinity.
augmented_less(infinity, _) :-
fail.
augmented_less(M, infinity) :-
M \== infinity.
augmented_less(M, N) :-
M \== infinity,
N \== infinity,
M < N.
|
Prolog
|
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 15,284